source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
array-stride.c | #include <assert.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define ITERS (1ul<<5)
#define SIZE (1ul<<31)
#define NSEC2SEC 1000000000ul
typedef unsigned long ul;
typedef struct { ul pad[8]; } cl;
static ul barrier;
static ul accum_bw;
static ul nthreads;
static inline
void arrive_wait() {
__atomic_sub_fetch(&barrier, 1, __ATOMIC_SEQ_CST);
while (__atomic_load_n(&barrier, __ATOMIC_SEQ_CST) > 0)
;
}
static inline
void depart_wait() {
__atomic_add_fetch(&barrier, 1, __ATOMIC_SEQ_CST);
while (__atomic_load_n(&barrier, __ATOMIC_SEQ_CST) < nthreads)
;
}
static inline
ul now() {
struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &t);
return (t.tv_sec*NSEC2SEC + t.tv_nsec);
}
void work() {
ul *array = malloc(SIZE);
assert(array);
memset(array, 0xdb, SIZE);
volatile cl *p = (cl*) &array[0];
arrive_wait();
ul start = now();
// TODO update this to use SIMD
for (ul n = 0; n < ITERS; n++) {
for (ul i = 0; i < (SIZE>>6); i+=(1ul<<3)) {
p[i+0].pad[0];
p[i+1].pad[0];
p[i+2].pad[0];
p[i+3].pad[0];
p[i+4].pad[0];
p[i+5].pad[0];
p[i+6].pad[0];
p[i+7].pad[0];
}
}
depart_wait();
ul dur = now()-start;
float sec = (float)dur / NSEC2SEC;
ul ops = ITERS * (SIZE>>6); // each is 64B
ul bw = (ops<<6)/sec;
__atomic_add_fetch(&accum_bw, bw, __ATOMIC_SEQ_CST);
}
long env_threads() {
if (!getenv("OMP_NUM_THREADS")) {
fprintf(stderr, "Specify OMP_NUM_THREADS\n");
exit(EXIT_FAILURE);
}
long nthreads = strtol(getenv("OMP_NUM_THREADS"), NULL, 10);
if (nthreads <= 0) {
fprintf(stderr, "Invalid OMP_NUM_THREADS: %ld\n", nthreads);
exit(EXIT_FAILURE);
}
return nthreads;
}
int main() {
nthreads = env_threads();
__atomic_store_n(&barrier, nthreads, __ATOMIC_SEQ_CST);
__atomic_store_n(&accum_bw, 0ul, __ATOMIC_SEQ_CST);
printf("using %lu threads\n", nthreads);
#pragma omp parallel
{
work();
}
ul bw = __atomic_load_n(&accum_bw, __ATOMIC_SEQ_CST);
printf("bandwidth: %.2lf GBps\n", bw/(float)(1ul<<30));
return 0;
}
|
H2Pack_matvec_periodic.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "H2Pack_config.h"
#include "H2Pack_typedef.h"
#include "H2Pack_aux_structs.h"
#include "H2Pack_matvec.h"
#include "H2Pack_matvec_periodic.h"
#include "H2Pack_utils.h"
#include "utils.h"
// Extend the number of points to a multiple of SIMD_LEN and perform an n-body matvec
// Input parameters:
// coord0 : Matrix, size dim-by-ld0, coordinates of the 1st point set
// ld0 : Leading dimension of coord0, should be >= n0
// n0 : Number of points in coord0 (each column in coord0 is a coordinate)
// coord1 : Matrix, size dim-by-ld1, coordinates of the 2nd point set
// ld1 : Leading dimension of coord1, should be >= n1
// n1 : Number of points in coord1 (each column in coord0 is a coordinate)
// x_in : Matrix, size >= krnl_dim * n1, will be left multiplied by kernel_matrix(coord0, coord1)
// ldi, ldo : Leading dimensions of x_in and x_out, ldi >= n1, ldo >= n0
// xpt_dim : Dimension of extended point coordinate
// krnl_dim : Dimension of tensor kernel's return
// workbuf : H2P_dense_mat data structure for allocating working buffer
// krnl_param : Pointer to kernel function parameter array
// krnl_mv : Pointer to kernel matrix matvec function
// Output parameter:
// x_out : Matrix, size >= krnl_dim * n0, x_out += kernel_matrix(coord0, coord1) * x_in
// Note:
// For x_{in,out}, they are not stored as the original (n{0,1} * krnl_dim)-by-1 column vector,
// which can be viewed as n{0,1}-by-krnl_dim matrices. Instead, they are stored as krnl_dim-by-n{0,1}
// matrices so the krnl_mv can vectorize the load and store.
void H2P_ext_krnl_mv(
const DTYPE *coord0, const int ld0, const int n0,
const DTYPE *coord1, const int ld1, const int n1,
const DTYPE *x_in, const int ldi, DTYPE * __restrict x_out, const int ldo,
const int xpt_dim, const int krnl_dim, H2P_dense_mat_p workbuf,
const void *krnl_param, kernel_mv_fptr krnl_mv
)
{
int n0_ext = (n0 + SIMD_LEN - 1) / SIMD_LEN * SIMD_LEN;
int n1_ext = (n1 + SIMD_LEN - 1) / SIMD_LEN * SIMD_LEN;
int n01_ext = n0_ext + n1_ext;
int buf_size = (xpt_dim + krnl_dim) * n01_ext;
H2P_dense_mat_resize(workbuf, 1, buf_size);
DTYPE *trg_coord = workbuf->data;
DTYPE *src_coord = trg_coord + xpt_dim * n0_ext;
DTYPE *x_in_ = src_coord + xpt_dim * n1_ext;
DTYPE *x_out_ = x_in_ + n1_ext * krnl_dim;
// Copy coordinates and pad the extend part
for (int i = 0; i < xpt_dim; i++)
{
const DTYPE *c0_src = coord0 + i * ld0;
const DTYPE *c1_src = coord1 + i * ld1;
DTYPE *c0_dst = trg_coord + i * n0_ext;
DTYPE *c1_dst = src_coord + i * n1_ext;
memcpy(c0_dst, c0_src, sizeof(DTYPE) * n0);
memcpy(c1_dst, c1_src, sizeof(DTYPE) * n1);
// Use an extremely large coordinate so the inverse distance of these
// extra points to original points are numerically zero
for (int j = n0; j < n0_ext; j++) c0_dst[j] = 1e100;
for (int j = n1; j < n1_ext; j++) c1_dst[j] = 1e100;
}
// Copy input vector and initialize output vector
// Must set the last n{0,1}_ext - n{0,1} elements in each row to 0,
// otherwise tensor kernel results might be incorrect
for (int i = 0; i < krnl_dim; i++)
{
const DTYPE *src = x_in + i * ldi;
DTYPE *dst = x_in_ + i * n1_ext;
memcpy(dst, src, sizeof(DTYPE) * n1);
for (int j = n1; j < n1_ext; j++) dst[j] = 0;
}
memset(x_out_, 0, sizeof(DTYPE) * n0_ext * krnl_dim);
// Do the n-body bi-matvec
krnl_mv(
trg_coord, n0_ext, n0_ext,
src_coord, n1_ext, n1_ext,
krnl_param, x_in_, x_out_
);
// Add results back to original output vectors
for (int i = 0; i < krnl_dim; i++)
{
DTYPE *dst = x_out + i * ldo;
DTYPE *src = x_out_ + i * n0_ext;
#pragma omp simd
for (int j = 0; j < n0; j++) dst[j] += src[j];
}
}
// H2 matvec intermediate multiplication, calculate B_{ij} * (U_j^T * x_j)
// Need to calculate all B_{ij} matrices before using it
void H2P_matvec_periodic_intmd_mult_JIT(H2Pack_p h2pack, const DTYPE *x, DTYPE *y)
{
int pt_dim = h2pack->pt_dim;
int xpt_dim = h2pack->xpt_dim;
int krnl_dim = h2pack->krnl_dim;
int n_point = h2pack->n_point;
int n_node = h2pack->n_node;
int n_thread = h2pack->n_thread;
int *node_level = h2pack->node_level;
int *pt_cluster = h2pack->pt_cluster;
int *mat_cluster = h2pack->mat_cluster;
int *B_nrow = h2pack->B_nrow;
int *B_ncol = h2pack->B_ncol;
int *B_p2i_rowptr = h2pack->B_p2i_rowptr;
int *B_p2i_colidx = h2pack->B_p2i_colidx;
int *B_p2i_val = h2pack->B_p2i_val;
DTYPE *coord = h2pack->coord;
DTYPE *per_adm_shifts = h2pack->per_adm_shifts;
void *krnl_param = h2pack->krnl_param;
H2P_dense_mat_p *y0 = h2pack->y0;
H2P_dense_mat_p *J_coord = h2pack->J_coord;
kernel_eval_fptr krnl_eval = h2pack->krnl_eval;
kernel_mv_fptr krnl_mv = h2pack->krnl_mv;
H2P_thread_buf_p *thread_buf = h2pack->tb;
H2P_matvec_init_y1(h2pack);
H2P_dense_mat_p *y1 = h2pack->y1;
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
H2P_dense_mat_p Bij = thread_buf[tid]->mat0;
H2P_dense_mat_p workbuf = thread_buf[tid]->mat0;
H2P_dense_mat_p coord1_s = thread_buf[tid]->mat1;
DTYPE shift[8] = {0, 0, 0, 0, 0, 0, 0, 0};
thread_buf[tid]->timer = -get_wtime_sec();
#pragma omp for schedule(static)
for (int i = 0; i < n_node; i++)
{
if (y1[i]->ld == 0) continue;
memset(y1[i]->data, 0, sizeof(DTYPE) * y1[i]->ncol);
}
#pragma omp for schedule(dynamic)
for (int node0 = 0; node0 < n_node; node0++)
{
int level0 = node_level[node0];
H2P_dense_mat_p y1_0 = y1[node0];
memset(y1_0->data, 0, sizeof(DTYPE) * y1_0->nrow * y1_0->ncol);
for (int i = B_p2i_rowptr[node0]; i < B_p2i_rowptr[node0 + 1]; i++)
{
int node1 = B_p2i_colidx[i];
int pair_idx = B_p2i_val[i] - 1;
int level1 = node_level[node1];
DTYPE *per_adm_shift_i = per_adm_shifts + pair_idx * pt_dim;
for (int k = 0; k < pt_dim; k++) shift[k] = per_adm_shift_i[k];
int Bij_nrow = B_nrow[pair_idx];
int Bij_ncol = B_ncol[pair_idx];
int node0_npt = Bij_nrow / krnl_dim;
int node1_npt = Bij_ncol / krnl_dim;
if (krnl_mv == NULL) H2P_dense_mat_resize(Bij, Bij_nrow, Bij_ncol);
// (1) Two nodes are of the same level, compress on both sides
if (level0 == level1)
{
H2P_dense_mat_copy(J_coord[node1], coord1_s);
H2P_shift_coord(coord1_s, shift, 1.0);
if (krnl_mv != NULL)
{
H2P_ext_krnl_mv(
J_coord[node0]->data, J_coord[node0]->ld, J_coord[node0]->ncol,
coord1_s->data, coord1_s->ld, coord1_s->ncol,
y0[node1]->data, node1_npt, y1[node0]->data, node0_npt,
xpt_dim, krnl_dim, workbuf, krnl_param, krnl_mv
);
} else {
krnl_eval(
J_coord[node0]->data, J_coord[node0]->ld, J_coord[node0]->ncol,
coord1_s->data, coord1_s->ld, coord1_s->ncol,
krnl_param, Bij->data, Bij->ld
);
CBLAS_GEMV(
CblasRowMajor, CblasNoTrans, Bij_nrow, Bij_ncol,
1.0, Bij->data, Bij->ld, y0[node1]->data, 1, 1.0, y1[node0]->data, 1
);
}
} // End of "if (level0 == level1)"
// (2) node1 is a leaf node and its level is higher than node0's level,
// only compressed on node0's side, node1's side don't need the
// downward sweep and can directly accumulate result to output vector
if (level0 > level1)
{
int pt_s1 = pt_cluster[node1 * 2];
int node1_npt = pt_cluster[node1 * 2 + 1] - pt_s1 + 1;
int vec_s1 = mat_cluster[node1 * 2];
H2P_dense_mat_resize(coord1_s, xpt_dim, node1_npt);
copy_matrix_block(sizeof(DTYPE), xpt_dim, node1_npt, coord + pt_s1, n_point, coord1_s->data, coord1_s->ld);
H2P_shift_coord(coord1_s, shift, 1.0);
if (krnl_mv != NULL)
{
const DTYPE *x_spos = x + pt_s1;
H2P_ext_krnl_mv(
J_coord[node0]->data, J_coord[node0]->ld, J_coord[node0]->ncol,
coord1_s->data, coord1_s->ld, coord1_s->ncol,
x_spos, n_point, y1[node0]->data, node0_npt,
xpt_dim, krnl_dim, workbuf, krnl_param, krnl_mv
);
} else {
const DTYPE *x_spos = x + vec_s1;
krnl_eval(
J_coord[node0]->data, J_coord[node0]->ld, J_coord[node0]->ncol,
coord1_s->data, coord1_s->ld, coord1_s->ncol,
krnl_param, Bij->data, Bij->ld
);
CBLAS_GEMV(
CblasRowMajor, CblasNoTrans, Bij_nrow, Bij_ncol,
1.0, Bij->data, Bij->ld, x_spos, 1, 1.0, y1[node0]->data, 1
);
}
} // End of "if (level0 > level1)"
// (3) node0 is a leaf node and its level is higher than node1's level,
// only compressed on node1's side, node0's side don't need the
// downward sweep and can directly accumulate result to output vector
if (level0 < level1)
{
int pt_s0 = pt_cluster[node0 * 2];
int node0_npt = pt_cluster[node0 * 2 + 1] - pt_s0 + 1;
int vec_s0 = mat_cluster[node0 * 2];
H2P_dense_mat_copy(J_coord[node1], coord1_s);
H2P_shift_coord(coord1_s, shift, 1.0);
if (krnl_mv != NULL)
{
DTYPE *y_spos = y + pt_s0;
H2P_ext_krnl_mv(
coord + pt_s0, n_point, node0_npt,
coord1_s->data, coord1_s->ld, coord1_s->ncol,
y0[node1]->data, node1_npt, y_spos, n_point,
xpt_dim, krnl_dim, workbuf, krnl_param, krnl_mv
);
} else {
DTYPE *y_spos = y + vec_s0;
krnl_eval(
coord + pt_s0, n_point, node0_npt,
coord1_s->data, coord1_s->ld, coord1_s->ncol,
krnl_param, Bij->data, Bij->ld
);
CBLAS_GEMV(
CblasRowMajor, CblasNoTrans, Bij_nrow, Bij_ncol,
1.0, Bij->data, Bij->ld, y0[node1]->data, 1, 1.0, y_spos, 1
);
}
} // End of "if (level0 < level1)"
} // End of node1 loop
} // End of node0 loop
thread_buf[tid]->timer += get_wtime_sec();
} // End of "#pragma omp parallel"
if (h2pack->print_timers == 1)
{
double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0;
for (int i = 0; i < n_thread; i++)
{
double thread_i_timer = thread_buf[i]->timer;
avg_t += thread_i_timer;
max_t = MAX(max_t, thread_i_timer);
min_t = MIN(min_t, thread_i_timer);
}
avg_t /= (double) n_thread;
INFO_PRINTF("Matvec intermediate multiplication: min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t);
}
}
// H2 matvec dense multiplication, calculate D_{ij} * x_j
// Need to calculate all D_{ij} matrices before using it
void H2P_matvec_periodic_dense_mult_JIT(H2Pack_p h2pack, const DTYPE *x, DTYPE *y)
{
int pt_dim = h2pack->pt_dim;
int xpt_dim = h2pack->xpt_dim;
int krnl_dim = h2pack->krnl_dim;
int n_point = h2pack->n_point;
int n_node = h2pack->n_node;
int n_leaf_node = h2pack->n_leaf_node;
int n_thread = h2pack->n_thread;
int *pt_cluster = h2pack->pt_cluster;
int *mat_cluster = h2pack->mat_cluster;
int *D_nrow = h2pack->D_nrow;
int *D_ncol = h2pack->D_ncol;
int *D_p2i_rowptr = h2pack->D_p2i_rowptr;
int *D_p2i_colidx = h2pack->D_p2i_colidx;
int *D_p2i_val = h2pack->D_p2i_val;
DTYPE *coord = h2pack->coord;
DTYPE *per_inadm_shifts = h2pack->per_inadm_shifts;
void *krnl_param = h2pack->krnl_param;
kernel_eval_fptr krnl_eval = h2pack->krnl_eval;
kernel_mv_fptr krnl_mv = h2pack->krnl_mv;
H2P_thread_buf_p *thread_buf = h2pack->tb;
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
H2P_dense_mat_p Dij = thread_buf[tid]->mat0;
H2P_dense_mat_p workbuf = thread_buf[tid]->mat0;
H2P_dense_mat_p coord1_s = thread_buf[tid]->mat1;
DTYPE shift[8] = {0, 0, 0, 0, 0, 0, 0, 0};
thread_buf[tid]->timer = -get_wtime_sec();
#pragma omp for schedule(dynamic)
for (int node0 = 0; node0 < n_node; node0++)
{
int pt_s0 = pt_cluster[2 * node0];
int node0_npt = pt_cluster[2 * node0 + 1] - pt_s0 + 1;
int y_offset = (krnl_mv == NULL) ? mat_cluster[node0 * 2] : pt_cluster[node0 * 2];
DTYPE *y_spos = y + y_offset;
for (int i = D_p2i_rowptr[node0]; i < D_p2i_rowptr[node0 + 1]; i++)
{
int node1 = D_p2i_colidx[i];
int pair_idx = D_p2i_val[i] - 1;
int pt_s1 = pt_cluster[2 * node1];
int node1_npt = pt_cluster[2 * node1 + 1] - pt_s1 + 1;
int x_offset = (krnl_mv == NULL) ? mat_cluster[node1 * 2] : pt_cluster[node1 * 2];
const DTYPE *x_spos = x + x_offset;
int Dij_nrow = D_nrow[pair_idx];
int Dij_ncol = D_ncol[pair_idx];
if (krnl_mv == NULL) H2P_dense_mat_resize(Dij, Dij_nrow, Dij_ncol);
if (pair_idx < n_leaf_node)
{
// (i, i) pair, no shift
for (int k = 0; k < pt_dim; k++) shift[k] = 0.0;
} else {
// The (pair_idx - n_leaf_node)-th inadmissible pair, need shifting
DTYPE *per_inadm_shift_i = per_inadm_shifts + (pair_idx - n_leaf_node) * pt_dim;
for (int k = 0; k < pt_dim; k++) shift[k] = per_inadm_shift_i[k];
}
H2P_dense_mat_resize(coord1_s, xpt_dim, node1_npt);
copy_matrix_block(sizeof(DTYPE), xpt_dim, node1_npt, coord + pt_s1, n_point, coord1_s->data, coord1_s->ld);
H2P_shift_coord(coord1_s, shift, 1.0);
if (krnl_mv != NULL)
{
H2P_ext_krnl_mv(
coord + pt_s0, n_point, node0_npt,
coord1_s->data, coord1_s->ld, coord1_s->ncol,
x_spos, n_point, y_spos, n_point,
xpt_dim, krnl_dim, workbuf, krnl_param, krnl_mv
);
} else {
krnl_eval(
coord + pt_s0, n_point, node0_npt,
coord1_s->data, coord1_s->ld, coord1_s->ncol,
krnl_param, Dij->data, Dij->ld
);
CBLAS_GEMV(
CblasRowMajor, CblasNoTrans, Dij_nrow, Dij_ncol,
1.0, Dij->data, Dij->ld, x_spos, 1, 1.0, y_spos, 1
);
}
} // End of i loop
} // End of node0 loop
thread_buf[tid]->timer += get_wtime_sec();
} // End of "pragma omp parallel"
if (h2pack->print_timers == 1)
{
double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0;
for (int i = 0; i < n_thread; i++)
{
double thread_i_timer = thread_buf[i]->timer;
avg_t += thread_i_timer;
max_t = MAX(max_t, thread_i_timer);
min_t = MIN(min_t, thread_i_timer);
}
avg_t /= (double) n_thread;
INFO_PRINTF("Matvec dense multiplication: min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t);
}
}
// H2 representation multiplies a column vector
void H2P_matvec_periodic(H2Pack_p h2pack, const DTYPE *x, DTYPE *y)
{
double st, et;
int krnl_mat_size = h2pack->krnl_mat_size;
int n_thread = h2pack->n_thread;
int BD_JIT = h2pack->BD_JIT;
int krnl_dim = h2pack->krnl_dim;
int n_point = h2pack->n_point;
int need_trans = ((h2pack->krnl_mv != NULL) && (BD_JIT == 1) && (krnl_dim > 1));
DTYPE *xT = h2pack->xT;
DTYPE *yT = h2pack->yT;
DTYPE *pmt_x = h2pack->pmt_x;
DTYPE *pmt_y = h2pack->pmt_y;
double *timers = h2pack->timers;
size_t *mat_size = h2pack->mat_size;
H2P_thread_buf_p *thread_buf = h2pack->tb;
DTYPE *x_ = need_trans ? xT : pmt_x;
DTYPE *y_ = need_trans ? yT : pmt_y;
if (BD_JIT != 1)
{
ERROR_PRINTF("Only support BD_JIT=1 in this function for the moment.\n");
return;
}
// 1. Forward permute the input vector
st = get_wtime_sec();
H2P_permute_vector_forward(h2pack, x, pmt_x);
et = get_wtime_sec();
timers[MV_VOP_TIMER_IDX] += et - st;
mat_size[MV_VOP_SIZE_IDX] += 2 * krnl_mat_size;
// 2. Reset y result to 0 and transpose x if necessary
st = get_wtime_sec();
#pragma omp parallel for simd
for (int i = 0; i < krnl_mat_size; i++)
{
pmt_y[i] = 0.0;
yT[i] = 0.0;
}
mat_size[MV_VOP_SIZE_IDX] += 2 * krnl_mat_size;
if (need_trans)
{
H2P_transpose_dmat(n_thread, n_point, krnl_dim, pmt_x, krnl_dim, xT, n_point);
mat_size[MV_VOP_SIZE_IDX] += 2 * krnl_mat_size;
}
et = get_wtime_sec();
timers[MV_VOP_TIMER_IDX] += et - st;
// 3. Forward transformation, calculate U_j^T * x_j
st = get_wtime_sec();
H2P_matvec_fwd_transform(h2pack, pmt_x);
et = get_wtime_sec();
timers[MV_FWD_TIMER_IDX] += et - st;
// 4. Intermediate multiplication, calculate B_{ij} * (U_j^T * x_j)
st = get_wtime_sec();
if (BD_JIT == 1)
{
if (need_trans) H2P_transpose_y0_from_krnldim(h2pack);
H2P_matvec_periodic_intmd_mult_JIT(h2pack, x_, y_);
if (need_trans) H2P_transpose_y1_to_krnldim(h2pack);
} else {
// "Lost butterfly"
ERROR_PRINTF("Only support BD_JIT=1 in this function for the moment.\n");
return;
}
// Multiply the periodic block for root node
// y1{root} = y1{root} + O * y0{root}; % y1{root} should be empty
int root_idx = h2pack->root_idx;
int root_J_npt = h2pack->J[root_idx]->length;
int per_blk_size = root_J_npt * krnl_dim;
H2P_dense_mat_p y0_root = h2pack->y0[root_idx];
H2P_dense_mat_p y1_root = h2pack->y1[root_idx];
H2P_dense_mat_resize(y1_root, 1, per_blk_size);
if (need_trans)
{
H2P_dense_mat_p y0_root_tmp = thread_buf[0]->mat0;
H2P_dense_mat_resize(y0_root_tmp, root_J_npt, krnl_dim);
H2P_transpose_dmat(1, krnl_dim, root_J_npt, y0_root->data, root_J_npt, y0_root_tmp->data, krnl_dim);
memcpy(y0_root->data, y0_root_tmp->data, sizeof(DTYPE) * per_blk_size);
}
CBLAS_GEMV(
CblasRowMajor, CblasNoTrans, per_blk_size, per_blk_size,
1.0, h2pack->per_blk, per_blk_size, y0_root->data, 1, 0.0, y1_root->data, 1
);
et = get_wtime_sec();
timers[MV_MID_TIMER_IDX] += et - st;
// 5. Backward transformation, calculate U_i * (B_{ij} * (U_j^T * x_j))
st = get_wtime_sec();
H2P_matvec_bwd_transform(h2pack, pmt_x, pmt_y);
et = get_wtime_sec();
timers[MV_BWD_TIMER_IDX] += et - st;
// 6. Dense multiplication, calculate D_i * x_i
st = get_wtime_sec();
if (BD_JIT == 1)
{
H2P_matvec_periodic_dense_mult_JIT(h2pack, x_, y_);
} else {
// "Lost butterfly"
ERROR_PRINTF("Only support BD_JIT=1 in this function for the moment.\n");
return;
}
et = get_wtime_sec();
timers[MV_DEN_TIMER_IDX] += et - st;
// 7. Sum yT partial results into y if needed
st = get_wtime_sec();
// We use xT here to hold the transpose of yT
if (need_trans)
{
H2P_transpose_dmat(n_thread, krnl_dim, n_point, yT, n_point, xT, krnl_dim);
#pragma omp parallel for simd
for (int i = 0; i < krnl_mat_size; i++) pmt_y[i] += xT[i];
mat_size[MV_VOP_SIZE_IDX] += 4 * krnl_mat_size;
}
et = get_wtime_sec();
timers[MV_VOP_TIMER_IDX] += et - st;
// 8. Backward permute the output vector
st = get_wtime_sec();
H2P_permute_vector_backward(h2pack, pmt_y, y);
et = get_wtime_sec();
timers[MV_VOP_TIMER_IDX] += et - st;
mat_size[MV_VOP_SIZE_IDX] += 2 * krnl_mat_size;
h2pack->n_matvec++;
}
|
par_cheby.c | /******************************************************************************
*
* Chebyshev setup and solve
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "_hypre_parcsr_mv.h"
#include "float.h"
/******************************************************************************
Chebyshev relaxation
Can specify order 1-4 (this is the order of the resid polynomial)- here we
explicitly code the coefficients (instead of
iteratively determining)
variant 0: standard chebyshev
this is rlx 11 if scale = 0, and 16 if scale == 1
variant 1: modified cheby: T(t)* f(t) where f(t) = (1-b/t)
this is rlx 15 if scale = 0, and 17 if scale == 1
ratio indicates the percentage of the whole spectrum to use (so .5
means half, and .1 means 10percent)
*******************************************************************************/
HYPRE_Int hypre_ParCSRRelax_Cheby_Setup(hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Real max_eig,
HYPRE_Real min_eig,
HYPRE_Real fraction,
HYPRE_Int order, /* polynomial order */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int variant,
HYPRE_Real **coefs_ptr,
HYPRE_Real **ds_ptr) /* initial/updated approximation */
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Real theta, delta;
HYPRE_Real den;
HYPRE_Real upper_bound, lower_bound;
HYPRE_Int j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Real *coefs = NULL;
HYPRE_Int cheby_order;
HYPRE_Real *ds_data = NULL;
HYPRE_Real diag;
/* u = u + p(A)r */
if (order > 4)
order = 4;
if (order < 1)
order = 1;
coefs = hypre_CTAlloc(HYPRE_Real, order+1, HYPRE_MEMORY_HOST);
/* we are using the order of p(A) */
cheby_order = order -1;
/* make sure we are large enough - Adams et al. 2003 */
upper_bound = max_eig * 1.1;
/* lower_bound = max_eig/fraction; */
lower_bound = (upper_bound - min_eig)* fraction + min_eig;
/* theta and delta */
theta = (upper_bound + lower_bound)/2;
delta = (upper_bound - lower_bound)/2;
if (variant == 1 )
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less that resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* (del - t + 2*th)/(th^2 + del*th) */
den = (theta*theta + delta*theta);
coefs[0] = (delta + 2*theta)/den;
coefs[1] = -1.0/den;
break;
case 2: /* (4*del*th - del^2 - t*(2*del + 6*th) + 2*t^2 + 6*th^2)/(2*del*th^2 - del^2*th - del^3 + 2*th^3)*/
den = 2*delta*theta*theta - delta*delta*theta - pow(delta,3) + 2*pow(theta,3);
coefs[0] = (4*delta*theta - pow(delta,2) + 6*pow(theta,2))/den;
coefs[1] = -(2*delta + 6*theta)/den;
coefs[2] = 2/den;
break;
case 3: /* -(6*del^2*th - 12*del*th^2 - t^2*(4*del + 16*th) + t*(12*del*th - 3*del^2 + 24*th^2) + 3*del^3 + 4*t^3 - 16*th^3)/(4*del*th^3 - 3*del^2*th^2 - 3*del^3*th + 4*th^4)*/
den = - (4*delta*pow(theta,3) - 3*pow(delta,2)*pow(theta,2) - 3*pow(delta,3)*theta + 4*pow(theta,4) );
coefs[0] = (6*pow(delta,2)*theta - 12*delta*pow(theta,2) + 3*pow(delta,3) - 16*pow(theta,3) )/den;
coefs[1] = (12*delta*theta - 3*pow(delta,2) + 24*pow(theta,2))/den;
coefs[2] = -( 4*delta + 16*theta)/den;
coefs[3] = 4/den;
break;
}
}
else /* standard chebyshev */
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less thatn resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* ( 2*t - 4*th)/(del^2 - 2*th^2) */
den = delta*delta - 2*theta*theta;
coefs[0] = -4*theta/den;
coefs[1] = 2/den;
break;
case 2: /* (3*del^2 - 4*t^2 + 12*t*th - 12*th^2)/(3*del^2*th - 4*th^3)*/
den = 3*(delta*delta)*theta - 4*(theta*theta*theta);
coefs[0] = (3*delta*delta - 12 *theta*theta)/den;
coefs[1] = 12*theta/den;
coefs[2] = -4/den;
break;
case 3: /*(t*(8*del^2 - 48*th^2) - 16*del^2*th + 32*t^2*th - 8*t^3 + 32*th^3)/(del^4 - 8*del^2*th^2 + 8*th^4)*/
den = pow(delta,4) - 8*delta*delta*theta*theta + 8*pow(theta,4);
coefs[0] = (32*pow(theta,3)- 16*delta*delta*theta)/den;
coefs[1] = (8*delta*delta - 48*theta*theta)/den;
coefs[2] = 32*theta/den;
coefs[3] = -8/den;
break;
}
}
*coefs_ptr = coefs;
if (scale)
{
/*grab 1/sqrt(diagonal) */
ds_data = hypre_CTAlloc(HYPRE_Real, num_rows, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,diag) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_rows; j++)
{
diag = A_diag_data[A_diag_i[j]];
ds_data[j] = 1/sqrt(diag);
}
}/* end of scaling code */
*ds_ptr = ds_data;
return hypre_error_flag;
}
HYPRE_Int hypre_ParCSRRelax_Cheby_Solve(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
HYPRE_Real *ds_data,
HYPRE_Real *coefs,
HYPRE_Int order, /* polynomial order */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int variant,
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v /* temporary vector */,
hypre_ParVector *r /*another temp vector */ )
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
HYPRE_Real *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r));
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Real mult;
HYPRE_Real *orig_u;
HYPRE_Int cheby_order;
HYPRE_Real *tmp_data;
hypre_ParVector *tmp_vec;
/* u = u + p(A)r */
if (order > 4)
order = 4;
if (order < 1)
order = 1;
/* we are using the order of p(A) */
cheby_order = order -1;
orig_u = hypre_CTAlloc(HYPRE_Real, num_rows, HYPRE_MEMORY_HOST);
if (!scale)
{
/* get residual: r = f - A*u */
hypre_ParVectorCopy(f, r);
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r);
for ( i = 0; i < num_rows; i++ )
{
orig_u[i] = u_data[i];
u_data[i] = r_data[i] * coefs[cheby_order];
}
for (i = cheby_order - 1; i >= 0; i-- )
{
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v);
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = mult * r_data[j] + v_data[j];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for ( i = 0; i < num_rows; i++ )
{
u_data[i] = orig_u[i] + u_data[i];
}
}
else /* scaling! */
{
/*grab 1/sqrt(diagonal) */
tmp_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(tmp_vec);
hypre_ParVectorSetPartitioningOwner(tmp_vec,0);
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(tmp_vec));
/* get ds_data and get scaled residual: r = D^(-1/2)f -
* D^(-1/2)A*u */
hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
r_data[j] = ds_data[j] * (f_data[j] + tmp_data[j]);
}
/* save original u, then start
the iteration by multiplying r by the cheby coef.*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
orig_u[j] = u_data[j]; /* orig, unscaled u */
u_data[j] = r_data[j] * coefs[cheby_order];
}
/* now do the other coefficients */
for (i = cheby_order - 1; i >= 0; i-- )
{
/* v = D^(-1/2)AD^(-1/2)u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_data[j] = ds_data[j] * u_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, tmp_vec, 0.0, v);
/* u_new = coef*r + v*/
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = mult * r_data[j] + ds_data[j]*v_data[j];
}
} /* end of cheby_order loop */
/* now we have to scale u_data before adding it to u_orig*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = orig_u[j] + ds_data[j]*u_data[j];
}
hypre_ParVectorDestroy(tmp_vec);
}/* end of scaling code */
hypre_TFree(orig_u, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
problem.p6.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
void evaluateBeta(double x, double y, double z, double *B, double *Bx, double *By, double *Bz){
double Bmin = 1.0;
double Bmax = 10.0;
double c2 = (Bmax-Bmin)/2; // coefficients to affect this transition
double c1 = (Bmax+Bmin)/2;
double c3 = 10.0; // how sharply (B)eta transitions
double xcenter = 0.50;
double ycenter = 0.50;
double zcenter = 0.50;
// calculate distance from center of the domain (0.5,0.5,0.5)
double r2 = pow((x-xcenter),2) + pow((y-ycenter),2) + pow((z-zcenter),2);
double r2x = 2.0*(x-xcenter);
double r2y = 2.0*(y-ycenter);
double r2z = 2.0*(z-zcenter);
//double r2xx = 2.0;
//double r2yy = 2.0;
//double r2zz = 2.0;
double r = pow(r2,0.5);
double rx = 0.5*r2x*pow(r2,-0.5);
double ry = 0.5*r2y*pow(r2,-0.5);
double rz = 0.5*r2z*pow(r2,-0.5);
//double rxx = 0.5*r2xx*pow(r2,-0.5) - 0.25*r2x*r2x*pow(r2,-1.5);
//double ryy = 0.5*r2yy*pow(r2,-0.5) - 0.25*r2y*r2y*pow(r2,-1.5);
//double rzz = 0.5*r2zz*pow(r2,-0.5) - 0.25*r2z*r2z*pow(r2,-1.5);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*B = c1+c2*tanh( c3*(r-0.25) );
*Bx = c2*c3*rx*(1-pow(tanh( c3*(r-0.25) ),2));
*By = c2*c3*ry*(1-pow(tanh( c3*(r-0.25) ),2));
*Bz = c2*c3*rz*(1-pow(tanh( c3*(r-0.25) ),2));
}
//------------------------------------------------------------------------------------------------------------------------------
void evaluateU(double x, double y, double z, double *U, double *Ux, double *Uy, double *Uz, double *Uxx, double *Uyy, double *Uzz, int isPeriodic){
// should be continuous in u, u', u'', u''', and u'''' to guarantee high order and periodic boundaries
// v(w) = ???
// u(x,y,z) = v(x)v(y)v(z)
// If Periodic, then the integral of the RHS should sum to zero.
// Setting shift=1.0 should ensure that the integrals of X, Y, or Z should sum to zero...
// That should(?) make the integrals of u,ux,uy,uz,uxx,uyy,uzz sum to zero and thus make the integral of f sum to zero
// If dirichlet, then w(0)=w(1) = 0.0
// Setting shift to 0 should ensure that U(x,y,z) = 0 on boundary
// u = ax^6 + bx^5 + cx^4 + dx^3 + ex^2 + fx + g
// ux = 6ax^5 + 5bx^4 + 4cx^3 + 3dx^2 + 2ex + f
// uxx = 30ax^4 + 20bx^3 + 12cx^2 + 6dx + 2e
// a = 42.0
// b = -126.0
// c = 105.0
// d = 0.0
// e = -21.0
// f = 0.0
// g = 1.0
double shift = 0.0;if(isPeriodic)shift= 1.0/21.0;
double X = 2.0*pow(x,6) - 6.0*pow(x,5) + 5.0*pow(x,4) - 1.0*pow(x,2) + shift;
double Y = 2.0*pow(y,6) - 6.0*pow(y,5) + 5.0*pow(y,4) - 1.0*pow(y,2) + shift;
double Z = 2.0*pow(z,6) - 6.0*pow(z,5) + 5.0*pow(z,4) - 1.0*pow(z,2) + shift;
double Xx = 12.0*pow(x,5) - 30.0*pow(x,4) + 20.0*pow(x,3) - 2.0*x;
double Yy = 12.0*pow(y,5) - 30.0*pow(y,4) + 20.0*pow(y,3) - 2.0*y;
double Zz = 12.0*pow(z,5) - 30.0*pow(z,4) + 20.0*pow(z,3) - 2.0*z;
double Xxx = 60.0*pow(x,4) - 120.0*pow(x,3) + 60.0*pow(x,2) - 2.0;
double Yyy = 60.0*pow(y,4) - 120.0*pow(y,3) + 60.0*pow(y,2) - 2.0;
double Zzz = 60.0*pow(z,4) - 120.0*pow(z,3) + 60.0*pow(z,2) - 2.0;
*U = X * Y * Z;
*Ux = Xx * Y * Z;
*Uy = X * Yy * Z;
*Uz = X * Y * Zz;
*Uxx = Xxx * Y * Z;
*Uyy = X * Yyy * Z;
*Uzz = X * Y * Zzz;
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_problem(level_type * level, double hLevel, double a, double b){
level->h = hLevel;
int box;
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int ghosts = level->my_boxes[box].ghosts;
const int dim_i = level->my_boxes[box].dim;
const int dim_j = level->my_boxes[box].dim;
const int dim_k = level->my_boxes[box].dim;
#ifdef _OPENMP
#pragma omp parallel for private(k,j,i) collapse(3)
#endif
for(k=0;k<=dim_k;k++){ // include high face
for(j=0;j<=dim_j;j++){ // include high face
for(i=0;i<=dim_i;i++){ // include high face
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;
double x = hLevel*( (double)(i+level->my_boxes[box].low.i) + 0.5 ); // +0.5 to get to the center of cell
double y = hLevel*( (double)(j+level->my_boxes[box].low.j) + 0.5 );
double z = hLevel*( (double)(k+level->my_boxes[box].low.k) + 0.5 );
double A,B,Bx,By,Bz,Bi,Bj,Bk;
double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A = 1.0;
B = 1.0;
Bx = 0.0;
By = 0.0;
Bz = 0.0;
Bi = 1.0;
Bj = 1.0;
Bk = 1.0;
#ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...
evaluateBeta(x-hLevel*0.5,y ,z ,&Bi,&Bx,&By,&Bz); // face-centered value of Beta for beta_i
evaluateBeta(x ,y-hLevel*0.5,z ,&Bj,&Bx,&By,&Bz); // face-centered value of Beta for beta_j
evaluateBeta(x ,y ,z-hLevel*0.5,&Bk,&Bx,&By,&Bz); // face-centered value of Beta for beta_k
evaluateBeta(x ,y ,z ,&B ,&Bx,&By,&Bz); // cell-centered value of Beta
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
evaluateU(x,y,z,&U,&Ux,&Uy,&Uz,&Uxx,&Uyy,&Uzz, (level->boundary_condition.type == BC_PERIODIC) );
double F = a*A*U - b*( (Bx*Ux + By*Uy + Bz*Uz) + B*(Uxx + Uyy + Uzz) );
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
level->my_boxes[box].vectors[VECTOR_BETA_I][ijk] = Bi;
level->my_boxes[box].vectors[VECTOR_BETA_J][ijk] = Bj;
level->my_boxes[box].vectors[VECTOR_BETA_K][ijk] = Bk;
#ifdef VECTOR_ALPHA
level->my_boxes[box].vectors[VECTOR_ALPHA ][ijk] = A;
#endif
//level->my_boxes[box].vectors[VECTOR_UTRUE ][ijk] = U; // obviated by Richardson analysis
level->my_boxes[box].vectors[VECTOR_F ][ijk] = F;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
}}}
}
}
//------------------------------------------------------------------------------------------------------------------------------
|
scale.c | // Modified from the coop package. Copyright (c) 2015-2017 Drew Schmidt
#include "safeomp.h"
#include <Rdefines.h>
#include <stdbool.h>
#include "Rfloat.h"
#include "unroll.h"
static inline void centerscalevec(const float_len_t j, const float_len_t m, float *restrict x, float *restrict colmean, float *restrict colvar)
{
const float tmp = 1. / ((float) m-1);
const float_len_t mj = m*j;
*colmean = 0;
*colvar = 0;
for (float_len_t i=0; i<m; i++)
{
float dt = x[i + mj] - *colmean;
*colmean += dt/((float) i+1);
*colvar += dt * (x[i + mj] - *colmean);
}
*colvar = sqrt(*colvar * tmp);
// Remove mean and variance
SAFE_FOR_SIMD
for (float_len_t i=0; i<m; i++)
x[i + mj] = (x[i + mj] - *colmean) / *colvar;
}
static inline float centervec(const float_len_t j, const float_len_t m, float *x)
{
const float div = 1. / ((float) m);
const float_len_t mj = m*j;
float colmean = 0;
// Get column mean
SAFE_FOR_SIMD
for (float_len_t i=0; i<m; i++)
colmean += x[i + mj] * div;
// Remove mean from column
SAFE_FOR_SIMD
for (float_len_t i=0; i<m; i++)
x[i + mj] -= colmean;
return colmean;
}
static inline float scalevec(const float_len_t j, const float_len_t m, float *x)
{
const float div = 1./((float) m-1);
const float_len_t mj = m*j;
float colvar = 0;
// Get column variance
SAFE_FOR_SIMD
for (float_len_t i=0; i<m; i++)
{
float tmp = x[i + mj];
colvar += tmp*tmp*div;
}
colvar = sqrt(colvar);
// Remove variance from column
SAFE_FOR_SIMD
for (float_len_t i=0; i<m; i++)
x[i + mj] /= colvar;
return colvar;
}
static inline int scaler(const bool centerx, const bool scalex, const float_len_t m, const float_len_t n, float *restrict x, float *restrict colmeans, float *restrict colvars)
{
if (m == 0 || n == 0)
return 0;
// Doing both at once, if needed, is more performant
if (centerx && scalex)
{
float colmean;
float colvar;
#pragma omp parallel for shared(x) if (m*n > OMP_MIN_SIZE)
for (float_len_t j=0; j<n; j++)
{
centerscalevec(j, m, x, &colmean, &colvar);
colmeans[j] = colmean;
colvars[j] = colvar;
}
}
else if (centerx)
{
#pragma omp parallel for shared(x) if (m*n > OMP_MIN_SIZE)
for (float_len_t j=0; j<n; j++)
colmeans[j] = centervec(j, m, x);
}
else if (scalex) // RMSE
{
#pragma omp parallel for shared(x) if (m*n > OMP_MIN_SIZE)
for (float_len_t j=0; j<n; j++)
colvars[j] = scalevec(j, m, x);
}
return 0;
}
SEXP R_scale_spm(SEXP x, SEXP center_, SEXP scale_)
{
SEXP ret;
SEXP ret_s4_class, cm_s4_class, cv_s4_class;
SEXP ret_s4, cm_s4, cv_s4;
SEXP cm, cv;
const float_len_t m = NROWS(x);
const float_len_t n = NCOLS(x);
const bool center = INTEGER(center_)[0];
const bool scale = INTEGER(scale_)[0];
int ptct = 0;
float *colmeans, *colvars;
PROTECT(ret = newmat(m, n));
ptct++;
memcpy(DATA(ret), DATA(x), (size_t)m*n*sizeof(float));
if (center)
{
PROTECT(cm = newvec(n));
ptct++;
colmeans = DATA(cm);
}
else
{
cm = NULL;
colmeans = NULL;
}
if (scale)
{
PROTECT(cv = newvec(n));
ptct++;
colvars = DATA(cv);
}
else
{
cv = NULL;
colvars = NULL;
}
scaler(center, scale, m, n, DATA(ret), colmeans, colvars);
PROTECT(ret_s4_class = MAKE_CLASS("float32"));
PROTECT(ret_s4 = NEW_OBJECT(ret_s4_class));
ptct += 2;
SET_SLOT(ret_s4, install("Data"), ret);
if (center)
{
PROTECT(cm_s4_class = MAKE_CLASS("float32"));
PROTECT(cm_s4 = NEW_OBJECT(cm_s4_class));
ptct += 2;
SET_SLOT(cm_s4, install("Data"), cm);
setAttrib(ret_s4, install("scaled:center"), cm_s4);
}
if (scale)
{
PROTECT(cv_s4_class = MAKE_CLASS("float32"));
PROTECT(cv_s4 = NEW_OBJECT(cv_s4_class));
ptct += 2;
SET_SLOT(cv_s4, install("Data"), cv);
setAttrib(ret_s4, install("scaled:scale"), cv_s4);
}
UNPROTECT(ptct);
return ret_s4;
}
|
effect.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT %
% E F F E C T %
% EEE FFF FFF EEE C T %
% E F F E C T %
% EEEEE F F EEEEE CCCC T %
% %
% %
% MagickCore Image Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/threshold.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveBlurImage() adaptively blurs the image by blurring less
% intensely near image edges and more intensely far from edges. We blur the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveBlurImage() selects a suitable radius for you.
%
% The format of the AdaptiveBlurImage method is:
%
% Image *AdaptiveBlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveBlurImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*blur_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*blur_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, blur, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory(
(size_t) (width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]+=(double) (1.0-normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
const Quantum
*magick_restrict r;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
const Quantum
*magick_restrict p;
ssize_t
i;
ssize_t
center,
j;
j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5));
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
const double
*magick_restrict k;
const Quantum
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(blur_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveSharpenImage() adaptively sharpens the image by sharpening more
% intensely near image edges and less intensely far from edges. We sharpen the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you.
%
% The format of the AdaptiveSharpenImage method is:
%
% Image *AdaptiveSharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveSharpenImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*sharp_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*sharp_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sharp_image=CloneImage(image,0,0,MagickTrue,exception);
if (sharp_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(sharp_image);
if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, sharp, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
(width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]=(double) ((-2.0)*normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively sharpen image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
sharp_view=AcquireAuthenticCacheView(sharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sharp_image,sharp_image->rows,1)
#endif
for (y=0; y < (ssize_t) sharp_image->rows; y++)
{
const Quantum
*magick_restrict r;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) sharp_image->columns; x++)
{
const Quantum
*magick_restrict p;
ssize_t
i;
ssize_t
center,
j;
j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5));
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
sharp_traits,
traits;
const double
*magick_restrict k;
const Quantum
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
sharp_traits=GetPixelChannelTraits(sharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(sharp_traits == UndefinedPixelTrait))
continue;
if ((sharp_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(sharp_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((sharp_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(sharp_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sharp_image->type=image->type;
sharp_view=DestroyCacheView(sharp_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
sharp_image=DestroyImage(sharp_image);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlurImage() blurs an image. We convolve the image with a Gaussian operator
% of the given radius and standard deviation (sigma). For reasonable results,
% the radius should be larger than sigma. Use a radius of 0 and BlurImage()
% selects a suitable radius for you.
%
% The format of the BlurImage method is:
%
% Image *BlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateBlurImage(image,radius,sigma,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
(void) FormatLocaleString(geometry,MagickPathExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l a t e r a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilateralBlurImage() is a non-linear, edge-preserving, and noise-reducing
% smoothing filter for images. It replaces the intensity of each pixel with
% a weighted average of intensity values from nearby pixels. This weight is
% based on a Gaussian distribution. The weights depend not only on Euclidean
% distance of pixels, but also on the radiometric differences (e.g., range
% differences, such as color intensity, depth distance, etc.). This preserves
% sharp edges.
%
% The format of the BilateralBlurImage method is:
%
% Image *BilateralBlurImage(const Image *image,const size_t width,
% const size_t height,const double intensity_sigma,
% const double spatial_sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the neighborhood in pixels.
%
% o height: the height of the neighborhood in pixels.
%
% o intensity_sigma: sigma in the intensity space. A larger value means
% that farther colors within the pixel neighborhood (see spatial_sigma)
% will be mixed together, resulting in larger areas of semi-equal color.
%
% o spatial_sigma: sigma in the coordinate space. A larger value means that
% farther pixels influence each other as long as their colors are close
% enough (see intensity_sigma ). When the neigborhood diameter is greater
% than zero, it specifies the neighborhood size regardless of
% spatial_sigma. Otherwise, the neigborhood diameter is proportional to
% spatial_sigma.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double BlurDistance(const ssize_t x,const ssize_t y,
const ssize_t u,const ssize_t v)
{
return(sqrt(((double) x-u)*((double) x-u)+((double) y-v)*((double) y-v)));
}
static inline double BlurGaussian(const double x,const double sigma)
{
return(exp(-((double) x*x)*PerceptibleReciprocal(2.0*sigma*sigma))*
PerceptibleReciprocal(Magick2PI*sigma*sigma));
}
static double **DestroyBilateralThreadSet(const ssize_t number_threads,
double **weights)
{
ssize_t
i;
assert(weights != (double **) NULL);
for (i=0; i <= (ssize_t) number_threads; i++)
if (weights[i] != (double *) NULL)
weights[i]=(double *) RelinquishMagickMemory(weights[i]);
weights=(double **) RelinquishMagickMemory(weights);
return(weights);
}
static double **AcquireBilateralThreadSet(const size_t number_threads,
const size_t width,const size_t height)
{
double
**weights;
ssize_t
i;
weights=(double **) AcquireQuantumMemory(number_threads+1,sizeof(*weights));
if (weights == (double **) NULL)
return((double **) NULL);
(void) memset(weights,0,number_threads*sizeof(*weights));
for (i=0; i <= (ssize_t) number_threads; i++)
{
weights[i]=(double *) AcquireQuantumMemory(width,height*sizeof(**weights));
if (weights[i] == (double *) NULL)
return(DestroyBilateralThreadSet(number_threads,weights));
}
return(weights);
}
MagickExport Image *BilateralBlurImage(const Image *image,const size_t width,
const size_t height,const double intensity_sigma,const double spatial_sigma,
ExceptionInfo *exception)
{
#define MaxIntensity (255)
#define BilateralBlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view;
double
intensity_gaussian[2*(MaxIntensity+1)],
*spatial_gaussian,
**weights;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
mid;
ssize_t
u;
ssize_t
n,
number_threads,
v;
ssize_t
i,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
weights=AcquireBilateralThreadSet(number_threads,width,height);
if (weights == (double **) NULL)
{
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=(-MaxIntensity); i < MaxIntensity; i++)
intensity_gaussian[i+MaxIntensity]=BlurGaussian((double) i,intensity_sigma);
spatial_gaussian=weights[number_threads];
n=0;
mid.x=(ssize_t) (width/2L);
mid.y=(ssize_t) (height/2L);
for (v=0; v < (ssize_t) height; v++)
for (u=0; u < (ssize_t) width; u++)
spatial_gaussian[n++]=BlurGaussian(BlurDistance(0,0,u-mid.x,v-mid.y),
spatial_sigma);
/*
Bilateral blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
double
gamma,
pixel;
const Quantum
*magick_restrict p,
*magick_restrict r;
ssize_t
i,
u;
ssize_t
n,
v;
/*
Tonal weighting preserves edges while smoothing in the flat regions.
*/
p=GetCacheViewVirtualPixels(image_view,x-mid.x,y-mid.y,width,height,
exception);
if (p == (const Quantum *) NULL)
break;
p+=(ssize_t) GetPixelChannels(image)*width*mid.y+GetPixelChannels(image)*
mid.x;
n=0;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
double
intensity;
r=p+(ssize_t) GetPixelChannels(image)*(ssize_t) width*(mid.y-v)+
GetPixelChannels(image)*(mid.x-u);
intensity=ScaleQuantumToChar(GetPixelIntensity(image,r))-
(double) ScaleQuantumToChar(GetPixelIntensity(image,p));
if ((intensity >= -MaxIntensity) && (intensity <= MaxIntensity))
weights[id][n]=intensity_gaussian[(ssize_t) intensity+MaxIntensity]*
spatial_gaussian[n];
else
weights[id][n]=BlurGaussian(intensity,intensity_sigma)*
BlurGaussian(BlurDistance(x,y,x+u-mid.x,y+v-mid.y),spatial_sigma);
n++;
}
}
for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++)
{
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
pixel=0.0;
gamma=0.0;
n=0;
if ((blur_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
r=p+(ssize_t) GetPixelChannels(image)*width*(mid.y-v)+
GetPixelChannels(image)*(mid.x-u);
pixel+=weights[id][n]*r[i];
gamma+=weights[id][n];
n++;
}
}
SetPixelChannel(blur_image,channel,ClampToQuantum(
PerceptibleReciprocal(gamma)*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
double
alpha,
beta;
r=p+(ssize_t) GetPixelChannels(image)*width*(mid.y-v)+
GetPixelChannels(image)*(mid.x-u);
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*GetPixelAlpha(image,r));
pixel+=weights[id][n]*r[i];
gamma+=weights[id][n]*alpha*beta;
n++;
}
}
SetPixelChannel(blur_image,channel,ClampToQuantum(
PerceptibleReciprocal(gamma)*pixel),q);
}
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BilateralBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
weights=DestroyBilateralThreadSet(number_threads,weights);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const KernelInfo *kernel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kernel: the filtering kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConvolveImage(const Image *image,
const KernelInfo *kernel_info,ExceptionInfo *exception)
{
Image
*convolve_image;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
convolve_image=AccelerateConvolveImage(image,kernel_info,exception);
if (convolve_image != (Image *) NULL)
return(convolve_image);
#endif
convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,
exception);
return(convolve_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s p e c k l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DespeckleImage() reduces the speckle noise in an image while perserving the
% edges of the original image. A speckle removing filter uses a complementary
% hulling technique (raising pixels that are darker than their surrounding
% neighbors, then complementarily lowering pixels that are brighter than their
% surrounding neighbors) to reduce the speckle index of that image (reference
% Crimmins speckle removal).
%
% The format of the DespeckleImage method is:
%
% Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Hull(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g)
{
Quantum
*p,
*q,
*r,
*s;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(f != (Quantum *) NULL);
assert(g != (Quantum *) NULL);
p=f+(columns+2);
q=g+(columns+2);
r=p+(y_offset*((ssize_t) columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickRealType
v;
ssize_t
i,
x;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2)))
v+=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2)))
v-=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
}
p=f+(columns+2);
q=g+(columns+2);
r=q+(y_offset*((ssize_t) columns+2)+x_offset);
s=q-(y_offset*((ssize_t) columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
ssize_t
i,
x;
MagickRealType
v;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] > v))
v+=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] < v))
v-=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
}
}
MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
{
#define DespeckleImageTag "Despeckle/Image"
CacheView
*despeckle_view,
*image_view;
Image
*despeckle_image;
MagickBooleanType
status;
MemoryInfo
*buffer_info,
*pixel_info;
Quantum
*magick_restrict buffer,
*magick_restrict pixels;
ssize_t
i;
size_t
length;
static const ssize_t
X[4] = {0, 1, 1,-1},
Y[4] = {1, 0, 1, 1};
/*
Allocate despeckled image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
despeckle_image=AccelerateDespeckleImage(image,exception);
if (despeckle_image != (Image *) NULL)
return(despeckle_image);
#endif
despeckle_image=CloneImage(image,0,0,MagickTrue,exception);
if (despeckle_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(despeckle_image,DirectClass,exception);
if (status == MagickFalse)
{
despeckle_image=DestroyImage(despeckle_image);
return((Image *) NULL);
}
/*
Allocate image buffer.
*/
length=(size_t) ((image->columns+2)*(image->rows+2));
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
buffer_info=AcquireVirtualMemory(length,sizeof(*buffer));
if ((pixel_info == (MemoryInfo *) NULL) ||
(buffer_info == (MemoryInfo *) NULL))
{
if (buffer_info != (MemoryInfo *) NULL)
buffer_info=RelinquishVirtualMemory(buffer_info);
if (pixel_info != (MemoryInfo *) NULL)
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image=DestroyImage(despeckle_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info);
buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info);
/*
Reduce speckle in the image.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
despeckle_traits,
traits;
ssize_t
k,
x;
ssize_t
j,
y;
if (status == MagickFalse)
continue;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
despeckle_traits=GetPixelChannelTraits(despeckle_image,channel);
if ((traits == UndefinedPixelTrait) ||
(despeckle_traits == UndefinedPixelTrait))
continue;
if ((despeckle_traits & CopyPixelTrait) != 0)
continue;
(void) memset(pixels,0,length*sizeof(*pixels));
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[j++]=p[i];
p+=GetPixelChannels(image);
}
j++;
}
(void) memset(buffer,0,length*sizeof(*buffer));
for (k=0; k < 4; k++)
{
Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer);
Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer);
}
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelChannel(despeckle_image,channel,pixels[j++],q);
q+=GetPixelChannels(despeckle_image);
}
sync=SyncCacheViewAuthenticPixels(despeckle_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
j++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i,
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
despeckle_view=DestroyCacheView(despeckle_view);
image_view=DestroyCacheView(image_view);
buffer_info=RelinquishVirtualMemory(buffer_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image->type=image->type;
if (status == MagickFalse)
despeckle_image=DestroyImage(despeckle_image);
return(despeckle_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EdgeImage() finds edges in an image. Radius defines the radius of the
% convolution filter. Use a radius of 0 and EdgeImage() selects a suitable
% radius for you.
%
% The format of the EdgeImage method is:
%
% Image *EdgeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EdgeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*edge_image;
KernelInfo
*kernel_info;
ssize_t
i;
size_t
width;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,0.5);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (kernel_info->width-1)/2;
kernel_info->y=(ssize_t) (kernel_info->height-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]=(-1.0);
kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0;
edge_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E m b o s s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EmbossImage() returns a grayscale image with a three-dimensional effect.
% We convolve the image with a Gaussian operator of the given radius and
% standard deviation (sigma). For reasonable results, radius should be
% larger than sigma. Use a radius of 0 and Emboss() selects a suitable
% radius for you.
%
% The format of the EmbossImage method is:
%
% Image *EmbossImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EmbossImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*emboss_image;
KernelInfo
*kernel_info;
ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->width*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
j=(ssize_t) (kernel_info->width-1)/2;
k=j;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 :
8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/
(2.0*MagickPI*MagickSigma*MagickSigma));
if (u != k)
kernel_info->values[i]=0.0;
i++;
}
k--;
}
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
emboss_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (emboss_image != (Image *) NULL)
(void) EqualizeImage(emboss_image,exception);
return(emboss_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s i a n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussianBlurImage() blurs an image. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, the radius should be larger than sigma. Use a
% radius of 0 and GaussianBlurImage() selects a suitable radius for you.
%
% The format of the GaussianBlurImage method is:
%
% Image *GaussianBlurImage(const Image *image,onst double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GaussianBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K u w a h a r a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KuwaharaImage() is an edge preserving noise reduction filter.
%
% The format of the KuwaharaImage method is:
%
% Image *KuwaharaImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the square window radius.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickRealType GetMeanLuma(const Image *magick_restrict image,
const double *magick_restrict pixel)
{
return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */
}
MagickExport Image *KuwaharaImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define KuwaharaImageTag "Kuwahara/Image"
CacheView
*image_view,
*kuwahara_view;
Image
*gaussian_image,
*kuwahara_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
width;
ssize_t
y;
/*
Initialize Kuwahara image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) radius+1;
gaussian_image=BlurImage(image,radius,sigma,exception);
if (gaussian_image == (Image *) NULL)
return((Image *) NULL);
kuwahara_image=CloneImage(image,0,0,MagickTrue,exception);
if (kuwahara_image == (Image *) NULL)
{
gaussian_image=DestroyImage(gaussian_image);
return((Image *) NULL);
}
if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse)
{
gaussian_image=DestroyImage(gaussian_image);
kuwahara_image=DestroyImage(kuwahara_image);
return((Image *) NULL);
}
/*
Edge preserving noise reduction filter.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(gaussian_image,exception);
kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,kuwahara_image,gaussian_image->rows,1)
#endif
for (y=0; y < (ssize_t) gaussian_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) gaussian_image->columns; x++)
{
const Quantum
*magick_restrict p;
double
min_variance;
RectangleInfo
quadrant,
target;
size_t
i;
min_variance=MagickMaximumValue;
SetGeometry(gaussian_image,&target);
quadrant.width=width;
quadrant.height=width;
for (i=0; i < 4; i++)
{
const Quantum
*magick_restrict k;
double
mean[MaxPixelChannels],
variance;
ssize_t
n;
ssize_t
j;
quadrant.x=x;
quadrant.y=y;
switch (i)
{
case 0:
{
quadrant.x=x-(ssize_t) (width-1);
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 1:
{
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 2:
{
quadrant.x=x-(ssize_t) (width-1);
break;
}
case 3:
default:
break;
}
p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y,
quadrant.width,quadrant.height,exception);
if (p == (const Quantum *) NULL)
break;
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]=0.0;
k=p;
for (n=0; n < (ssize_t) (width*width); n++)
{
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]+=(double) k[j];
k+=GetPixelChannels(gaussian_image);
}
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]/=(double) (width*width);
k=p;
variance=0.0;
for (n=0; n < (ssize_t) (width*width); n++)
{
double
luma;
luma=GetPixelLuma(gaussian_image,k);
variance+=(luma-GetMeanLuma(gaussian_image,mean))*
(luma-GetMeanLuma(gaussian_image,mean));
k+=GetPixelChannels(gaussian_image);
}
if (variance < min_variance)
{
min_variance=variance;
target=quadrant;
}
}
if (i < 4)
{
status=MagickFalse;
break;
}
status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image,
UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double)
target.y+target.height/2.0,q,exception);
if (status == MagickFalse)
break;
q+=GetPixelChannels(kuwahara_image);
}
if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
kuwahara_view=DestroyCacheView(kuwahara_view);
image_view=DestroyCacheView(image_view);
gaussian_image=DestroyImage(gaussian_image);
if (status == MagickFalse)
kuwahara_image=DestroyImage(kuwahara_image);
return(kuwahara_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L o c a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LocalContrastImage() attempts to increase the appearance of large-scale
% light-dark transitions. Local contrast enhancement works similarly to
% sharpening with an unsharp mask, however the mask is instead created using
% an image with a greater blur distance.
%
% The format of the LocalContrastImage method is:
%
% Image *LocalContrastImage(const Image *image, const double radius,
% const double strength,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian blur, in percentage with 100%
% resulting in a blur radius of 20% of largest dimension.
%
% o strength: the strength of the blur mask in percentage.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LocalContrastImage(const Image *image,const double radius,
const double strength,ExceptionInfo *exception)
{
#define LocalContrastImageTag "LocalContrast/Image"
CacheView
*image_view,
*contrast_view;
float
*interImage,
*scanline,
totalWeight;
Image
*contrast_image;
MagickBooleanType
status;
MemoryInfo
*scanline_info,
*interImage_info;
ssize_t
scanLineSize,
width;
/*
Initialize contrast image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception);
if (contrast_image != (Image *) NULL)
return(contrast_image);
#endif
contrast_image=CloneImage(image,0,0,MagickTrue,exception);
if (contrast_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse)
{
contrast_image=DestroyImage(contrast_image);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(image,exception);
contrast_view=AcquireAuthenticCacheView(contrast_image,exception);
scanLineSize=(ssize_t) MagickMax(image->columns,image->rows);
width=(ssize_t) scanLineSize*0.002f*fabs(radius);
scanLineSize+=(2*width);
scanline_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()*
scanLineSize,sizeof(*scanline));
if (scanline_info == (MemoryInfo *) NULL)
{
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
scanline=(float *) GetVirtualMemoryBlob(scanline_info);
/*
Create intermediate buffer.
*/
interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)),
sizeof(*interImage));
if (interImage_info == (MemoryInfo *) NULL)
{
scanline_info=RelinquishVirtualMemory(scanline_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
interImage=(float *) GetVirtualMemoryBlob(interImage_info);
totalWeight=(float) ((width+1)*(width+1));
/*
Vertical pass.
*/
status=MagickTrue;
{
ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*out,
*pix,
*pixels;
ssize_t
y;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanline;
pixels+=id*scanLineSize;
pix=pixels;
p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width),
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) image->rows+(2*width); y++)
{
*pix++=(float)GetPixelLuma(image,p);
p+=image->number_channels;
}
out=interImage+x+width;
for (y=0; y < (ssize_t) image->rows; y++)
{
float
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+y;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* write to output */
*out=sum/totalWeight;
/* mirror into padding */
if (x <= width && x != 0)
*(out-(x*2))=*out;
if ((x > (ssize_t) image->columns-width-2) &&
(x != (ssize_t) image->columns-1))
*(out+((image->columns-x-1)*2))=*out;
out+=image->columns+(width*2);
}
}
}
/*
Horizontal pass.
*/
{
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*pix,
*pixels;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanline;
pixels+=id*scanLineSize;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+
(2*width))*sizeof(float));
for (x=0; x < (ssize_t) image->columns; x++)
{
float
mult,
srcVal,
sum,
weight;
PixelTrait
traits;
weight=1.0f;
sum=0;
pix=pixels+x;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* Apply and write */
srcVal=(float) GetPixelLuma(image,p);
mult=(srcVal-(sum/totalWeight))*(strength/100.0f);
mult=(srcVal+mult)/srcVal;
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelRed(contrast_image,ClampToQuantum((MagickRealType)
GetPixelRed(image,p)*mult),q);
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelGreen(contrast_image,ClampToQuantum((MagickRealType)
GetPixelGreen(image,p)*mult),q);
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlue(contrast_image,ClampToQuantum((MagickRealType)
GetPixelBlue(image,p)*mult),q);
p+=image->number_channels;
q+=contrast_image->number_channels;
}
if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse)
status=MagickFalse;
}
}
scanline_info=RelinquishVirtualMemory(scanline_info);
interImage_info=RelinquishVirtualMemory(interImage_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
contrast_image=DestroyImage(contrast_image);
return(contrast_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o t i o n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MotionBlurImage() simulates motion blur. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and MotionBlurImage() selects a suitable radius for you.
% Angle gives the angle of the blurring motion.
%
% Andrew Protano contributed this effect.
%
% The format of the MotionBlurImage method is:
%
% Image *MotionBlurImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickRealType *GetMotionBlurKernel(const size_t width,
const double sigma)
{
MagickRealType
*kernel,
normalize;
ssize_t
i;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
return(kernel);
normalize=0.0;
for (i=0; i < (ssize_t) width; i++)
{
kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma*
MagickSigma)))/(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *MotionBlurImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
#define BlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view,
*motion_view;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
OffsetInfo
*offset;
PointInfo
point;
ssize_t
i;
size_t
width;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetMotionBlurKernel(width,sigma);
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset));
if (offset == (OffsetInfo *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
point.x=(double) width*sin(DegreesToRadians(angle));
point.y=(double) width*cos(DegreesToRadians(angle));
for (i=0; i < (ssize_t) width; i++)
{
offset[i].x=CastDoubleToLong(ceil((double) (i*point.y)/
hypot(point.x,point.y)-0.5));
offset[i].y=CastDoubleToLong(ceil((double) (i*point.x)/
hypot(point.x,point.y)-0.5));
}
/*
Motion blur image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception);
if (blur_image != (Image *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return(blur_image);
}
#endif
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
motion_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
const Quantum
*magick_restrict r;
MagickRealType
*magick_restrict k;
ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
k=kernel;
pixel=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+
offset[j].y,1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=(*k)*r[i];
k++;
}
SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q);
continue;
}
alpha=0.0;
gamma=0.0;
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1,
1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,r));
pixel+=(*k)*alpha*r[i];
gamma+=(*k)*alpha;
k++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlurImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
motion_view=DestroyCacheView(motion_view);
image_view=DestroyCacheView(image_view);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r e v i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PreviewImage() tiles 9 thumbnails of the specified image with an image
% processing operation applied with varying parameters. This may be helpful
% pin-pointing an appropriate parameter for a particular image processing
% operation.
%
% The format of the PreviewImages method is:
%
% Image *PreviewImages(const Image *image,const PreviewType preview,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o preview: the image processing operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PreviewImage(const Image *image,const PreviewType preview,
ExceptionInfo *exception)
{
#define NumberTiles 9
#define PreviewImageTag "Preview/Image"
#define DefaultPreviewGeometry "204x204+10+10"
char
factor[MagickPathExtent],
label[MagickPathExtent];
double
degrees,
gamma,
percentage,
radius,
sigma,
threshold;
Image
*images,
*montage_image,
*preview_image,
*thumbnail;
ImageInfo
*preview_info;
MagickBooleanType
proceed;
MontageInfo
*montage_info;
QuantizeInfo
quantize_info;
RectangleInfo
geometry;
ssize_t
i,
x;
size_t
colors;
ssize_t
y;
/*
Open output image file.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colors=2;
degrees=0.0;
gamma=(-0.2f);
preview_info=AcquireImageInfo();
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
images=NewImageList();
percentage=12.5;
GetQuantizeInfo(&quantize_info);
radius=0.0;
sigma=1.0;
threshold=0.0;
x=0;
y=0;
for (i=0; i < NumberTiles; i++)
{
thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception);
if (thumbnail == (Image *) NULL)
break;
(void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL,
(void *) NULL);
(void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception);
if (i == (NumberTiles/2))
{
(void) QueryColorCompliance("#dfdfdf",AllCompliance,
&thumbnail->matte_color,exception);
AppendImageToList(&images,thumbnail);
continue;
}
switch (preview)
{
case RotatePreview:
{
degrees+=45.0;
preview_image=RotateImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees);
break;
}
case ShearPreview:
{
degrees+=5.0;
preview_image=ShearImage(thumbnail,degrees,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees,
2.0*degrees);
break;
}
case RollPreview:
{
x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles;
y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles;
preview_image=RollImage(thumbnail,x,y,exception);
(void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g",
(double) x,(double) y);
break;
}
case HuePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0*
percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case SaturationPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0*
percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case BrightnessPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case GammaPreview:
default:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
gamma+=0.4f;
(void) GammaImage(preview_image,gamma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma);
break;
}
case SpiffPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image != (Image *) NULL)
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)",
(double) i+1);
break;
}
case DullPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickFalse,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)",
(double) i+1);
break;
}
case GrayscalePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
quantize_info.colorspace=GRAYColorspace;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"-colorspace gray -colors %.20g",(double) colors);
break;
}
case QuantizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,"colors %.20g",
(double) colors);
break;
}
case DespecklePreview:
{
for (x=0; x < (i-1); x++)
{
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
thumbnail=DestroyImage(thumbnail);
thumbnail=preview_image;
}
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)",
(double) i+1);
break;
}
case ReduceNoisePreview:
{
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t)
radius,(size_t) radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius);
break;
}
case AddNoisePreview:
{
switch ((int) i)
{
case 0:
{
(void) CopyMagickString(factor,"uniform",MagickPathExtent);
break;
}
case 1:
{
(void) CopyMagickString(factor,"gaussian",MagickPathExtent);
break;
}
case 2:
{
(void) CopyMagickString(factor,"multiplicative",MagickPathExtent);
break;
}
case 3:
{
(void) CopyMagickString(factor,"impulse",MagickPathExtent);
break;
}
case 5:
{
(void) CopyMagickString(factor,"laplacian",MagickPathExtent);
break;
}
case 6:
{
(void) CopyMagickString(factor,"Poisson",MagickPathExtent);
break;
}
default:
{
(void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent);
break;
}
}
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i,
(size_t) i,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor);
break;
}
case SharpenPreview:
{
preview_image=SharpenImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g",
radius,sigma);
break;
}
case BlurPreview:
{
preview_image=BlurImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius,
sigma);
break;
}
case ThresholdPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) BilevelImage(thumbnail,(double) (percentage*((double)
QuantumRange+1.0))/100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"threshold %g",
(double) (percentage*((double) QuantumRange+1.0))/100.0);
break;
}
case EdgeDetectPreview:
{
preview_image=EdgeImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius);
break;
}
case SpreadPreview:
{
preview_image=SpreadImage(thumbnail,image->interpolate,radius,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"spread %g",
radius+0.5);
break;
}
case SolarizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) SolarizeImage(preview_image,(double) QuantumRange*percentage/
100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"solarize %g",
(QuantumRange*percentage)/100.0);
break;
}
case ShadePreview:
{
degrees+=10.0;
preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees,
degrees);
break;
}
case RaisePreview:
{
RectangleInfo
raise;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
raise.width=(size_t) (2*i+2);
raise.height=(size_t) (2*i+2);
raise.x=(i-1)/2;
raise.y=(i-1)/2;
(void) RaiseImage(preview_image,&raise,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double)
raise.height,(double) raise.x,(double) raise.y);
break;
}
case SegmentPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
threshold+=0.4f;
(void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold,
threshold,exception);
(void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g",
threshold,threshold);
break;
}
case SwirlPreview:
{
preview_image=SwirlImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees);
degrees+=45.0;
break;
}
case ImplodePreview:
{
degrees+=0.1f;
preview_image=ImplodeImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees);
break;
}
case WavePreview:
{
degrees+=5.0f;
preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,
image->interpolate,exception);
(void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5*
degrees,2.0*degrees);
break;
}
case OilPaintPreview:
{
preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case CharcoalDrawingPreview:
{
preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case JPEGPreview:
{
char
filename[MagickPathExtent];
int
file;
MagickBooleanType
status;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
preview_info->quality=(size_t) percentage;
(void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double)
preview_info->quality);
file=AcquireUniqueFileResource(filename);
if (file != -1)
file=close(file)-1;
(void) FormatLocaleString(preview_image->filename,MagickPathExtent,
"jpeg:%s",filename);
status=WriteImage(preview_info,preview_image,exception);
if (status != MagickFalse)
{
Image
*quality_image;
(void) CopyMagickString(preview_info->filename,
preview_image->filename,MagickPathExtent);
quality_image=ReadImage(preview_info,exception);
if (quality_image != (Image *) NULL)
{
preview_image=DestroyImage(preview_image);
preview_image=quality_image;
}
}
(void) RelinquishUniqueFileResource(preview_image->filename);
if ((GetBlobSize(preview_image)/1024) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ",
factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/
1024.0/1024.0);
else
if (GetBlobSize(preview_image) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,
"quality %s\n%gkb ",factor,(double) ((MagickOffsetType)
GetBlobSize(preview_image))/1024.0);
else
(void) FormatLocaleString(label,MagickPathExtent,
"quality %s\n%.20gb ",factor,(double) ((MagickOffsetType)
GetBlobSize(thumbnail)));
break;
}
}
thumbnail=DestroyImage(thumbnail);
percentage+=12.5;
radius+=0.5;
sigma+=0.25;
if (preview_image == (Image *) NULL)
break;
preview_image->alpha_trait=UndefinedPixelTrait;
(void) DeleteImageProperty(preview_image,"label");
(void) SetImageProperty(preview_image,"label",label,exception);
AppendImageToList(&images,preview_image);
proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i,
NumberTiles);
if (proceed == MagickFalse)
break;
}
if (images == (Image *) NULL)
{
preview_info=DestroyImageInfo(preview_info);
return((Image *) NULL);
}
/*
Create the montage.
*/
montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL);
(void) CopyMagickString(montage_info->filename,image->filename,
MagickPathExtent);
montage_info->shadow=MagickTrue;
(void) CloneString(&montage_info->tile,"3x3");
(void) CloneString(&montage_info->geometry,DefaultPreviewGeometry);
(void) CloneString(&montage_info->frame,DefaultTileFrame);
montage_image=MontageImages(images,montage_info,exception);
montage_info=DestroyMontageInfo(montage_info);
images=DestroyImageList(images);
if (montage_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (montage_image->montage != (char *) NULL)
{
/*
Free image directory.
*/
montage_image->montage=(char *) RelinquishMagickMemory(
montage_image->montage);
if (image->directory != (char *) NULL)
montage_image->directory=(char *) RelinquishMagickMemory(
montage_image->directory);
}
preview_info=DestroyImageInfo(preview_info);
return(montage_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t i o n a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotationalBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RotationalBlurImage method is:
%
% Image *RotationalBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o angle: the angle of the radial blur.
%
% o blur: the blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotationalBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view,
*radial_view;
double
blur_radius,
*cos_theta,
offset,
*sin_theta,
theta;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
blur_center;
ssize_t
i;
size_t
n;
ssize_t
y;
/*
Allocate blur image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateRotationalBlurImage(image,angle,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
blur_center.x=(double) (image->columns-1)/2.0;
blur_center.y=(double) (image->rows-1)/2.0;
blur_radius=hypot(blur_center.x,blur_center.y);
n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL);
theta=DegreesToRadians(angle)/(double) (n-1);
cos_theta=(double *) AcquireQuantumMemory((size_t) n,
sizeof(*cos_theta));
sin_theta=(double *) AcquireQuantumMemory((size_t) n,
sizeof(*sin_theta));
if ((cos_theta == (double *) NULL) ||
(sin_theta == (double *) NULL))
{
if (cos_theta != (double *) NULL)
cos_theta=(double *) RelinquishMagickMemory(cos_theta);
if (sin_theta != (double *) NULL)
sin_theta=(double *) RelinquishMagickMemory(sin_theta);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
offset=theta*(double) (n-1)/2.0;
for (i=0; i < (ssize_t) n; i++)
{
cos_theta[i]=cos((double) (theta*i-offset));
sin_theta[i]=sin((double) (theta*i-offset));
}
/*
Radial blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
radial_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
radius;
PointInfo
center;
ssize_t
i;
size_t
step;
center.x=(double) x-blur_center.x;
center.y=(double) y-blur_center.y;
radius=hypot((double) center.x,center.y);
if (radius == 0)
step=1;
else
{
step=(size_t) (blur_radius/radius);
if (step == 0)
step=1;
else
if (step >= n)
step=n-1;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
const Quantum
*magick_restrict r;
ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
gamma=0.0;
pixel=0.0;
if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) ||
(channel == AlphaPixelChannel))
{
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=r[i];
gamma++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
double
alpha;
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) QuantumScale*GetPixelAlpha(image,r);
pixel+=alpha*r[i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlurImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
radial_view=DestroyCacheView(radial_view);
image_view=DestroyCacheView(image_view);
cos_theta=(double *) RelinquishMagickMemory(cos_theta);
sin_theta=(double *) RelinquishMagickMemory(sin_theta);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e l e c t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SelectiveBlurImage() selectively blur pixels within a contrast threshold.
% It is similar to the unsharpen mask that sharpens everything with contrast
% above a certain threshold.
%
% The format of the SelectiveBlurImage method is:
%
% Image *SelectiveBlurImage(const Image *image,const double radius,
% const double sigma,const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o threshold: only pixels within this contrast threshold are included
% in the blur operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SelectiveBlurImage(const Image *image,const double radius,
const double sigma,const double threshold,ExceptionInfo *exception)
{
#define SelectiveBlurImageTag "SelectiveBlur/Image"
CacheView
*blur_view,
*image_view,
*luminance_view;
Image
*blur_image,
*luminance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
ssize_t
i;
size_t
width;
ssize_t
center,
j,
u,
v,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,width*sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) (width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
const MagickRealType
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double)
*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
luminance_image=CloneImage(image,0,0,MagickTrue,exception);
if (luminance_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
status=TransformImageColorspace(luminance_image,GRAYColorspace,exception);
if (status == MagickFalse)
{
luminance_image=DestroyImage(luminance_image);
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
/*
Threshold blur image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)*
((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L));
image_view=AcquireVirtualCacheView(image,exception);
luminance_view=AcquireVirtualCacheView(luminance_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
contrast;
MagickBooleanType
sync;
const Quantum
*magick_restrict l,
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t)
((width-1)/2L),image->columns+width,width,exception);
l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y-
(ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity;
ssize_t
i;
intensity=GetPixelIntensity(image,p+center);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
const MagickRealType
*magick_restrict k;
const Quantum
*magick_restrict luminance_pixels,
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel;
pixel=0.0;
pixels=p;
luminance_pixels=l;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,luminance_pixels)-
intensity;
if (fabs(contrast) < threshold)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(image,pixels)-intensity;
if (fabs(contrast) < threshold)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
l+=GetPixelChannels(luminance_image);
q+=GetPixelChannels(blur_image);
}
sync=SyncCacheViewAuthenticPixels(blur_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SelectiveBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
luminance_view=DestroyCacheView(luminance_view);
image_view=DestroyCacheView(image_view);
luminance_image=DestroyImage(luminance_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadeImage() shines a distant light on an image to create a
% three-dimensional effect. You control the positioning of the light with
% azimuth and elevation; azimuth is measured in degrees off the x axis
% and elevation is measured in pixels above the Z axis.
%
% The format of the ShadeImage method is:
%
% Image *ShadeImage(const Image *image,const MagickBooleanType gray,
% const double azimuth,const double elevation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o gray: A value other than zero shades the intensity of each pixel.
%
% o azimuth, elevation: Define the light source direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray,
const double azimuth,const double elevation,ExceptionInfo *exception)
{
#define GetShadeIntensity(image,pixel) \
ClampPixel(GetPixelIntensity((image),(pixel)))
#define ShadeImageTag "Shade/Image"
CacheView
*image_view,
*shade_view;
Image
*linear_image,
*shade_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
light;
ssize_t
y;
/*
Initialize shaded image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
shade_image=CloneImage(image,0,0,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (shade_image != (Image *) NULL)
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
/*
Compute the light vector.
*/
light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.z=(double) QuantumRange*sin(DegreesToRadians(elevation));
/*
Shade image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(linear_image,exception);
shade_view=AcquireAuthenticCacheView(shade_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(linear_image,shade_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
double
distance,
normal_distance,
shade;
PrimaryInfo
normal;
const Quantum
*magick_restrict center,
*magick_restrict p,
*magick_restrict post,
*magick_restrict pre;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3,
exception);
q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Shade this row of pixels.
*/
normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
ssize_t
i;
/*
Determine the surface normal and compute shading.
*/
pre=p+GetPixelChannels(linear_image);
center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image);
post=center+(linear_image->columns+2)*GetPixelChannels(linear_image);
normal.x=(double) (
GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,center-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,center+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image)));
normal.y=(double) (
GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,post)+
GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre)-
GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image)));
if ((fabs(normal.x) <= MagickEpsilon) &&
(fabs(normal.y) <= MagickEpsilon))
shade=light.z;
else
{
shade=0.0;
distance=normal.x*light.x+normal.y*light.y+normal.z*light.z;
if (distance > MagickEpsilon)
{
normal_distance=normal.x*normal.x+normal.y*normal.y+
normal.z*normal.z;
if (normal_distance > (MagickEpsilon*MagickEpsilon))
shade=distance/sqrt((double) normal_distance);
}
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel
channel;
PixelTrait
shade_traits,
traits;
channel=GetPixelChannelChannel(linear_image,i);
traits=GetPixelChannelTraits(linear_image,channel);
shade_traits=GetPixelChannelTraits(shade_image,channel);
if ((traits == UndefinedPixelTrait) ||
(shade_traits == UndefinedPixelTrait))
continue;
if ((shade_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if ((traits & UpdatePixelTrait) == 0)
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if (gray != MagickFalse)
{
SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q);
continue;
}
SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade*
center[i]),q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(shade_image);
}
if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
shade_view=DestroyCacheView(shade_view);
image_view=DestroyCacheView(image_view);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
shade_image=DestroyImage(shade_image);
return(shade_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SharpenImage() sharpens the image. We convolve the image with a Gaussian
% operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SharpenImage() selects a suitable radius for you.
%
% Using a separable kernel would be faster, but the negative weights cancel
% out on the corners of the kernel producing often undesirable ringing in the
% filtered result; this can be avoided by using a 2D gaussian shaped image
% sharpening kernel instead.
%
% The format of the SharpenImage method is:
%
% Image *SharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*sharp_image;
KernelInfo
*kernel_info;
ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
normalize=0.0;
j=(ssize_t) (kernel_info->width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0*
MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel_info->values[i];
i++;
}
}
kernel_info->values[i/2]=(double) ((-2.0)*normalize);
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
sharp_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p r e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpreadImage() is a special effects method that randomly displaces each
% pixel in a square area defined by the radius parameter.
%
% The format of the SpreadImage method is:
%
% Image *SpreadImage(const Image *image,
% const PixelInterpolateMethod method,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: intepolation method.
%
% o radius: choose a random pixel in a neighborhood of this extent.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpreadImage(const Image *image,
const PixelInterpolateMethod method,const double radius,
ExceptionInfo *exception)
{
#define SpreadImageTag "Spread/Image"
CacheView
*image_view,
*spread_view;
Image
*spread_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
size_t
width;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize spread image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
spread_image=CloneImage(image,0,0,MagickTrue,exception);
if (spread_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse)
{
spread_image=DestroyImage(spread_image);
return((Image *) NULL);
}
/*
Spread image.
*/
status=MagickTrue;
progress=0;
width=GetOptimalKernelWidth1D(radius,0.5);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
spread_view=AcquireAuthenticCacheView(spread_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,spread_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PointInfo
point;
point.x=GetPseudoRandomValue(random_info[id]);
point.y=GetPseudoRandomValue(random_info[id]);
status=InterpolatePixelChannels(image,image_view,spread_image,method,
(double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q,
exception);
if (status == MagickFalse)
break;
q+=GetPixelChannels(spread_image);
}
if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
spread_view=DestroyCacheView(spread_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
spread_image=DestroyImage(spread_image);
return(spread_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h a r p M a s k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnsharpMaskImage() sharpens one or more image channels. We convolve the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and UnsharpMaskImage() selects a suitable radius for you.
%
% The format of the UnsharpMaskImage method is:
%
% Image *UnsharpMaskImage(const Image *image,const double radius,
% const double sigma,const double amount,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o gain: the percentage of the difference between the original and the
% blur image that is added back into the original.
%
% o threshold: the threshold in pixels needed to apply the diffence gain.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *UnsharpMaskImage(const Image *image,const double radius,
const double sigma,const double gain,const double threshold,
ExceptionInfo *exception)
{
#define SharpenImageTag "Sharpen/Image"
CacheView
*image_view,
*unsharp_view;
Image
*unsharp_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
quantum_threshold;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
/* This kernel appears to be broken.
#if defined(MAGICKCORE_OPENCL_SUPPORT)
unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold,
exception);
if (unsharp_image != (Image *) NULL)
return(unsharp_image);
#endif
*/
unsharp_image=BlurImage(image,radius,sigma,exception);
if (unsharp_image == (Image *) NULL)
return((Image *) NULL);
quantum_threshold=(double) QuantumRange*threshold;
/*
Unsharp-mask image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,unsharp_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits,
unsharp_traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
unsharp_traits=GetPixelChannelTraits(unsharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(unsharp_traits == UndefinedPixelTrait))
continue;
if ((unsharp_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(unsharp_image,channel,p[i],q);
continue;
}
pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q);
if (fabs(2.0*pixel) < quantum_threshold)
pixel=(double) p[i];
else
pixel=(double) p[i]+gain*pixel;
SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(unsharp_image);
}
if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
unsharp_image->type=image->type;
unsharp_view=DestroyCacheView(unsharp_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unsharp_image=DestroyImage(unsharp_image);
return(unsharp_image);
}
|
maxwell_grad.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
* OpenMP Problems
*
* Need to fix the way these variables are set and incremented in loops:
* i, nrows (only where they are listed at the end of SMP_PRIVATE)
*
* Are private static arrays a problem?
*
******************************************************************************/
#include "_hypre_sstruct_ls.h"
/*--------------------------------------------------------------------------
* hypre_Maxwell_Grad.c
* Forms a node-to-edge gradient operator. Looping over the
* edge grid so that each processor fills up only its own rows. Each
* processor will have its processor interface nodal ranks.
* Loops over two types of boxes, interior of grid boxes and boundary
* of boxes. Algo:
* find all nodal and edge physical boundary points and set
* the appropriate flag to be 0 at a boundary dof.
* set -1's in value array
* for each edge box,
* for interior
* {
* connect edge ijk (row) to nodes (col) connected to this edge
* and change -1 to 1 if needed;
* }
* for boundary layers
* {
* if edge not on the physical boundary connect only the nodes
* that are not on the physical boundary
* }
* set parcsr matrix with values;
*
* Note that the nodes that are on the processor interface can be
* on the physical boundary. But the off-proc edges connected to this
* type of node will be a physical boundary edge.
*
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix *
hypre_Maxwell_Grad(hypre_SStructGrid *grid)
{
MPI_Comm comm = (grid -> comm);
HYPRE_IJMatrix T_grad;
hypre_ParCSRMatrix *parcsr_grad;
HYPRE_Int matrix_type= HYPRE_PARCSR;
hypre_SStructGrid *node_grid, *edge_grid;
hypre_SStructPGrid *pgrid;
hypre_StructGrid *var_grid;
hypre_BoxArray *boxes, *tmp_box_array1, *tmp_box_array2;
hypre_BoxArray *edge_boxes, *cell_boxes;
hypre_Box *box, *cell_box;
hypre_Box layer, interior_box;
hypre_Box *box_piece;
hypre_BoxManager *boxman;
hypre_BoxManEntry *entry;
HYPRE_Int *inode, *jedge;
HYPRE_Int nrows, nnodes, *nflag, *eflag, *ncols;
HYPRE_Real *vals;
hypre_Index index;
hypre_Index loop_size, start, lindex;
hypre_Index shift, shift2;
hypre_Index *offsets, *varoffsets;
HYPRE_Int nparts= hypre_SStructGridNParts(grid);
HYPRE_Int ndim = hypre_SStructGridNDim(grid);
HYPRE_SStructVariable vartype_node, *vartype_edges;
HYPRE_SStructVariable *vartypes;
HYPRE_Int nvars, part;
HYPRE_Int i, j, k, m, n, d;
HYPRE_Int *direction, ndirection;
HYPRE_Int ilower, iupper;
HYPRE_Int jlower, jupper;
HYPRE_Int start_rank1, start_rank2, rank;
HYPRE_Int myproc;
HYPRE_Int ierr=0;
hypre_BoxInit(&layer, ndim);
hypre_BoxInit(&interior_box, ndim);
hypre_MPI_Comm_rank(comm, &myproc);
hypre_ClearIndex(shift);
for (i= 0; i< ndim; i++)
{
hypre_IndexD(shift, i)= -1;
}
/* To get the correct ranks, separate node & edge grids must be formed.
Note that the edge vars must be ordered the same way as is in grid.*/
HYPRE_SStructGridCreate(comm, ndim, nparts, &node_grid);
HYPRE_SStructGridCreate(comm, ndim, nparts, &edge_grid);
vartype_node = HYPRE_SSTRUCT_VARIABLE_NODE;
vartype_edges= hypre_TAlloc(HYPRE_SStructVariable, ndim);
/* Assuming the same edge variable types on all parts */
pgrid = hypre_SStructGridPGrid(grid, 0);
vartypes= hypre_SStructPGridVarTypes(pgrid);
nvars = hypre_SStructPGridNVars(pgrid);
k= 0;
for (i= 0; i< nvars; i++)
{
j= vartypes[i];
switch(j)
{
case 2:
{
vartype_edges[k]= HYPRE_SSTRUCT_VARIABLE_XFACE;
k++;
break;
}
case 3:
{
vartype_edges[k]= HYPRE_SSTRUCT_VARIABLE_YFACE;
k++;
break;
}
case 5:
{
vartype_edges[k]= HYPRE_SSTRUCT_VARIABLE_XEDGE;
k++;
break;
}
case 6:
{
vartype_edges[k]= HYPRE_SSTRUCT_VARIABLE_YEDGE;
k++;
break;
}
case 7:
{
vartype_edges[k]= HYPRE_SSTRUCT_VARIABLE_ZEDGE;
k++;
break;
}
} /* switch(j) */
} /* for (i= 0; i< nvars; i++) */
for (part= 0; part< nparts; part++)
{
pgrid= hypre_SStructGridPGrid(grid, part);
var_grid= hypre_SStructPGridCellSGrid(pgrid) ;
boxes= hypre_StructGridBoxes(var_grid);
hypre_ForBoxI(j, boxes)
{
box= hypre_BoxArrayBox(boxes, j);
HYPRE_SStructGridSetExtents(node_grid, part,
hypre_BoxIMin(box), hypre_BoxIMax(box));
HYPRE_SStructGridSetExtents(edge_grid, part,
hypre_BoxIMin(box), hypre_BoxIMax(box));
}
HYPRE_SStructGridSetVariables(node_grid, part, 1, &vartype_node);
HYPRE_SStructGridSetVariables(edge_grid, part, ndim, vartype_edges);
}
HYPRE_SStructGridAssemble(node_grid);
HYPRE_SStructGridAssemble(edge_grid);
/* CREATE IJ_MATRICES- need to find the size of each one. Notice that the row
and col ranks of these matrices can be created using only grid information.
Grab the first part, first variable, first box, and lower index (lower rank);
Grab the last part, last variable, last box, and upper index (upper rank). */
/* Grad: node(col) -> edge(row). Same for 2-d and 3-d */
/* lower rank */
part= 0;
i = 0;
hypre_SStructGridBoxProcFindBoxManEntry(edge_grid, part, 0, i, myproc, &entry);
pgrid = hypre_SStructGridPGrid(edge_grid, part);
var_grid= hypre_SStructPGridSGrid(pgrid, 0);
boxes = hypre_StructGridBoxes(var_grid);
box = hypre_BoxArrayBox(boxes, 0);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(box), &ilower);
hypre_SStructGridBoxProcFindBoxManEntry(node_grid, part, 0, i, myproc, &entry);
pgrid = hypre_SStructGridPGrid(node_grid, part);
var_grid= hypre_SStructPGridSGrid(pgrid, 0);
boxes = hypre_StructGridBoxes(var_grid);
box = hypre_BoxArrayBox(boxes, 0);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMin(box), &jlower);
/* upper rank */
part= nparts-1;
pgrid = hypre_SStructGridPGrid(edge_grid, part);
nvars = hypre_SStructPGridNVars(pgrid);
var_grid= hypre_SStructPGridSGrid(pgrid, nvars-1);
boxes = hypre_StructGridBoxes(var_grid);
box = hypre_BoxArrayBox(boxes, hypre_BoxArraySize(boxes)-1);
hypre_SStructGridBoxProcFindBoxManEntry(edge_grid, part, nvars-1,
hypre_BoxArraySize(boxes)-1, myproc,
&entry);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(box), &iupper);
pgrid = hypre_SStructGridPGrid(node_grid, part);
nvars = hypre_SStructPGridNVars(pgrid);
var_grid= hypre_SStructPGridSGrid(pgrid, nvars-1);
boxes = hypre_StructGridBoxes(var_grid);
box = hypre_BoxArrayBox(boxes, hypre_BoxArraySize(boxes)-1);
hypre_SStructGridBoxProcFindBoxManEntry(node_grid, part, nvars-1,
hypre_BoxArraySize(boxes)-1, myproc,
&entry);
hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(box), &jupper);
HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &T_grad);
HYPRE_IJMatrixSetObjectType(T_grad, HYPRE_PARCSR);
HYPRE_IJMatrixInitialize(T_grad);
/*------------------------------------------------------------------------------
* fill up the parcsr matrix.
*------------------------------------------------------------------------------*/
/* count the no. of rows. Make sure repeated nodes along the boundaries are counted.*/
nrows = 0;
nnodes= 0;
for (part= 0; part< nparts; part++)
{
pgrid= hypre_SStructGridPGrid(edge_grid, part);
nvars= hypre_SStructPGridNVars(pgrid);
for (m= 0; m< nvars; m++)
{
var_grid= hypre_SStructPGridSGrid(pgrid, m);
boxes = hypre_StructGridBoxes(var_grid);
hypre_ForBoxI(j, boxes)
{
box= hypre_BoxArrayBox(boxes, j);
/* make slightly bigger to handle any shared nodes */
hypre_CopyBox(box, &layer);
hypre_AddIndexes(hypre_BoxIMin(&layer), shift, 3, hypre_BoxIMin(&layer));
hypre_SubtractIndexes(hypre_BoxIMax(&layer), shift, 3, hypre_BoxIMax(&layer));
nrows+= hypre_BoxVolume(&layer);
}
}
pgrid= hypre_SStructGridPGrid(node_grid, part);
var_grid= hypre_SStructPGridSGrid(pgrid, 0); /* only one variable grid */
boxes = hypre_StructGridBoxes(var_grid);
hypre_ForBoxI(j, boxes)
{
box= hypre_BoxArrayBox(boxes, j);
/* make slightly bigger to handle any shared nodes */
hypre_CopyBox(box, &layer);
hypre_AddIndexes(hypre_BoxIMin(&layer), shift, 3, hypre_BoxIMin(&layer));
hypre_SubtractIndexes(hypre_BoxIMax(&layer), shift, 3, hypre_BoxIMax(&layer));
nnodes+= hypre_BoxVolume(&layer);
}
}
eflag = hypre_CTAlloc(HYPRE_Int, nrows);
nflag = hypre_CTAlloc(HYPRE_Int, nnodes);
/* Set eflag to have the number of nodes connected to an edge (2) and
nflag to have the number of edges connect to a node. */
for (i= 0; i< nrows; i++)
{
eflag[i]= 2;
}
j= 2*ndim;
for (i= 0; i< nnodes; i++)
{
nflag[i]= j;
}
/* Determine physical boundary points. Get the rank and set flag[rank]= 0.
This will boundary dof, i.e., flag[rank]= 0 will flag a boundary dof. */
start_rank1= hypre_SStructGridStartRank(node_grid);
start_rank2= hypre_SStructGridStartRank(edge_grid);
for (part= 0; part< nparts; part++)
{
/* node flag */
pgrid = hypre_SStructGridPGrid(node_grid, part);
var_grid= hypre_SStructPGridSGrid(pgrid, 0);
boxes = hypre_StructGridBoxes(var_grid);
boxman = hypre_SStructGridBoxManager(node_grid, part, 0);
hypre_ForBoxI(j, boxes)
{
box= hypre_BoxArrayBox(boxes, j);
hypre_BoxManGetEntry(boxman, myproc, j, &entry);
i= hypre_BoxVolume(box);
tmp_box_array1= hypre_BoxArrayCreate(0, ndim);
ierr += hypre_BoxBoundaryG(box, var_grid, tmp_box_array1);
for (m= 0; m< hypre_BoxArraySize(tmp_box_array1); m++)
{
box_piece= hypre_BoxArrayBox(tmp_box_array1, m);
if (hypre_BoxVolume(box_piece) < i)
{
hypre_BoxGetSize(box_piece, loop_size);
hypre_CopyIndex(hypre_BoxIMin(box_piece), start);
hypre_BoxLoop0Begin(ndim, loop_size);
#if 0 /* Are private static arrays a problem? */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,index,rank) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop0For()
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(index, lindex[0], lindex[1], lindex[2]);
hypre_AddIndexes(index, start, 3, index);
hypre_SStructBoxManEntryGetGlobalRank(entry, index,
&rank, matrix_type);
nflag[rank-start_rank1]= 0;
}
hypre_BoxLoop0End();
} /* if (hypre_BoxVolume(box_piece) < i) */
} /* for (m= 0; m< hypre_BoxArraySize(tmp_box_array1); m++) */
hypre_BoxArrayDestroy(tmp_box_array1);
} /* hypre_ForBoxI(j, boxes) */
/*-----------------------------------------------------------------
* edge flag. Since we want only the edges that completely lie
* on a boundary, whereas the boundary extraction routines mark
* edges that touch the boundary, we need to call the boundary
* routines in appropriate directions:
* 2-d horizontal edges (y faces)- search in j directions
* 2-d vertical edges (x faces) - search in i directions
* 3-d x edges - search in j,k directions
* 3-d y edges - search in i,k directions
* 3-d z edges - search in i,j directions
*-----------------------------------------------------------------*/
pgrid = hypre_SStructGridPGrid(edge_grid, part);
nvars = hypre_SStructPGridNVars(pgrid);
direction= hypre_TAlloc(HYPRE_Int, 2); /* only two directions at most */
for (m= 0; m< nvars; m++)
{
var_grid= hypre_SStructPGridSGrid(pgrid, m);
boxes = hypre_StructGridBoxes(var_grid);
boxman = hypre_SStructGridBoxManager(edge_grid, part, m);
j= vartype_edges[m];
switch(j)
{
case 2: /* x faces, 2d */
{
ndirection = 1;
direction[0]= 0;
break;
}
case 3: /* y faces, 2d */
{
ndirection = 1;
direction[0]= 1;
break;
}
case 5: /* x edges, 3d */
{
ndirection = 2;
direction[0]= 1;
direction[1]= 2;
break;
}
case 6: /* y edges, 3d */
{
ndirection = 2;
direction[0]= 0;
direction[1]= 2;
break;
}
case 7: /* z edges, 3d */
{
ndirection = 2;
direction[0]= 0;
direction[1]= 1;
break;
}
} /* switch(j) */
hypre_ForBoxI(j, boxes)
{
box= hypre_BoxArrayBox(boxes, j);
hypre_BoxManGetEntry(boxman, myproc, j, &entry);
i= hypre_BoxVolume(box);
for (d= 0; d< ndirection; d++)
{
tmp_box_array1= hypre_BoxArrayCreate(0, ndim);
tmp_box_array2= hypre_BoxArrayCreate(0, ndim);
ierr+= hypre_BoxBoundaryDG(box, var_grid, tmp_box_array1,
tmp_box_array2, direction[d]);
for (k= 0; k< hypre_BoxArraySize(tmp_box_array1); k++)
{
box_piece= hypre_BoxArrayBox(tmp_box_array1, k);
if (hypre_BoxVolume(box_piece) < i)
{
hypre_BoxGetSize(box_piece, loop_size);
hypre_CopyIndex(hypre_BoxIMin(box_piece), start);
hypre_BoxLoop0Begin(ndim, loop_size);
#if 0 /* Are private static arrays a problem? */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,index,rank) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop0For()
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(index, lindex[0], lindex[1], lindex[2]);
hypre_AddIndexes(index, start, 3, index);
hypre_SStructBoxManEntryGetGlobalRank(entry, index,
&rank, matrix_type);
eflag[rank-start_rank2]= 0;
}
hypre_BoxLoop0End();
} /* if (hypre_BoxVolume(box_piece) < i) */
} /* for (k= 0; k< hypre_BoxArraySize(tmp_box_array1); k++) */
hypre_BoxArrayDestroy(tmp_box_array1);
for (k= 0; k< hypre_BoxArraySize(tmp_box_array2); k++)
{
box_piece= hypre_BoxArrayBox(tmp_box_array2, k);
if (hypre_BoxVolume(box_piece) < i)
{
hypre_BoxGetSize(box_piece, loop_size);
hypre_CopyIndex(hypre_BoxIMin(box_piece), start);
hypre_BoxLoop0Begin(ndim, loop_size);
#if 0 /* Are private static arrays a problem? */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,index,rank) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop0For()
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(index, lindex[0], lindex[1], lindex[2]);
hypre_AddIndexes(index, start, 3, index);
hypre_SStructBoxManEntryGetGlobalRank(entry, index,
&rank, matrix_type);
eflag[rank-start_rank2]= 0;
}
hypre_BoxLoop0End();
} /* if (hypre_BoxVolume(box_piece) < i) */
} /* for (k= 0; k< hypre_BoxArraySize(tmp_box_array2); k++) */
hypre_BoxArrayDestroy(tmp_box_array2);
} /* for (d= 0; d< ndirection; d++) */
} /* hypre_ForBoxI(j, boxes) */
} /* for (m= 0; m< nvars; m++) */
hypre_TFree(direction);
} /* for (part= 0; part< nparts; part++) */
/* set vals. Will have more memory than is needed- extra allotted
for repeated nodes. */
inode= hypre_CTAlloc(HYPRE_Int, nrows);
ncols= hypre_CTAlloc(HYPRE_Int, nrows);
/* each row can have at most two columns */
k= 2*nrows;
jedge= hypre_CTAlloc(HYPRE_Int, k);
vals = hypre_TAlloc(HYPRE_Real, k);
for (i= 0; i< k; i++)
{
vals[i]=-1.0;
}
/* to get the correct col connection to each node, we need to offset
index ijk. Determine these. Assuming the same var ordering for each
part. Note that these are not the variable offsets. */
offsets = hypre_TAlloc(hypre_Index, ndim);
varoffsets= hypre_TAlloc(hypre_Index, ndim);
for (i= 0; i< ndim; i++)
{
j= vartype_edges[i];
hypre_SStructVariableGetOffset(vartype_edges[i], ndim, varoffsets[i]);
switch(j)
{
case 2:
{
hypre_SetIndex3(offsets[i], 0, 1, 0);
break;
}
case 3:
{
hypre_SetIndex3(offsets[i], 1, 0, 0);
break;
}
case 5:
{
hypre_SetIndex3(offsets[i], 1, 0, 0);
break;
}
case 6:
{
hypre_SetIndex3(offsets[i], 0, 1, 0);
break;
}
case 7:
{
hypre_SetIndex3(offsets[i], 0, 0, 1);
break;
}
} /* switch(j) */
} /* for (i= 0; i< ndim; i++) */
nrows= 0; i= 0;
for (part= 0; part< nparts; part++)
{
/* grab boxarray for node rank extracting later */
pgrid = hypre_SStructGridPGrid(node_grid, part);
var_grid = hypre_SStructPGridSGrid(pgrid, 0);
/* grab edge structures */
pgrid = hypre_SStructGridPGrid(edge_grid, part);
/* the cell-centred reference box is used to get the correct
interior edge box. For parallel distribution of the edge
grid, simple contraction of the edge box does not get the
correct interior edge box. Need to contract the cell box. */
var_grid= hypre_SStructPGridCellSGrid(pgrid);
cell_boxes= hypre_StructGridBoxes(var_grid);
nvars = hypre_SStructPGridNVars(pgrid);
for (n= 0; n< nvars; n++)
{
var_grid = hypre_SStructPGridSGrid(pgrid, n);
edge_boxes= hypre_StructGridBoxes(var_grid);
hypre_ForBoxI(j, edge_boxes)
{
box= hypre_BoxArrayBox(edge_boxes, j);
cell_box= hypre_BoxArrayBox(cell_boxes, j);
hypre_CopyBox(cell_box, &interior_box);
/* shrink the cell_box to get the interior cell_box. All
edges in the interior box should be on this proc. */
hypre_SubtractIndexes(hypre_BoxIMin(&interior_box), shift, 3,
hypre_BoxIMin(&interior_box));
hypre_AddIndexes(hypre_BoxIMax(&interior_box), shift, 3,
hypre_BoxIMax(&interior_box));
/* offset this to the variable interior box */
hypre_CopyBox(&interior_box, &layer);
hypre_SubtractIndexes(hypre_BoxIMin(&layer), varoffsets[n], 3,
hypre_BoxIMin(&layer));
hypre_BoxGetSize(&layer, loop_size);
hypre_CopyIndex(hypre_BoxIMin(&layer), start);
/* Interior box- loop over each edge and find the row rank and
then the column ranks for the connected nodes. Change the
appropriate values to 1. */
hypre_BoxLoop0Begin(ndim, loop_size);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,index,entry,m,i,nrows) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop0For()
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(index, lindex[0], lindex[1], lindex[2]);
hypre_AddIndexes(index, start, 3, index);
/* edge ijk connected to nodes ijk & ijk-offsets. Interior edges
and so no boundary edges to consider. */
hypre_SStructGridFindBoxManEntry(edge_grid, part, index, n,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, index, &m, matrix_type);
inode[nrows]= m;
hypre_SStructGridFindBoxManEntry(node_grid, part, index, 0,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, index, &m, matrix_type);
jedge[i]= m;
vals[i] = 1.0; /* change only this connection */
i++;
hypre_SubtractIndexes(index, offsets[n], 3, index);
hypre_SStructGridFindBoxManEntry(node_grid, part, index, 0,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, index, &m, matrix_type);
jedge[i]= m;
i++;
ncols[nrows]= 2;
nrows++;
}
hypre_BoxLoop0End();
/* now the boundary layers. To cases to consider: is the
edge totally on the boundary or is the edge connected
to the boundary. Need to check eflag & nflag. */
for (d= 0; d< ndim; d++)
{
/*shift the layer box in the correct direction and distance.
distance= hypre_BoxIMax(box)[d]-hypre_BoxIMin(box)[d]+1-1
= hypre_BoxIMax(box)[d]-hypre_BoxIMin(box)[d] */
hypre_ClearIndex(shift2);
shift2[d]= hypre_BoxIMax(box)[d]-hypre_BoxIMin(box)[d];
/* ndirection= 0 negative; ndirection= 1 positive */
for (ndirection= 0; ndirection< 2; ndirection++)
{
hypre_CopyBox(box, &layer);
if (ndirection)
{
hypre_BoxShiftPos(&layer, shift2);
}
else
{
hypre_BoxShiftNeg(&layer, shift2);
}
hypre_IntersectBoxes(box, &layer, &layer);
hypre_BoxGetSize(&layer, loop_size);
hypre_CopyIndex(hypre_BoxIMin(&layer), start);
hypre_BoxLoop0Begin(ndim, loop_size);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,index,entry,m,i,nrows) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop0For()
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex3(index, lindex[0], lindex[1], lindex[2]);
hypre_AddIndexes(index, start, 3, index);
/* edge ijk connects to nodes ijk & ijk+offsets. */
hypre_SStructGridFindBoxManEntry(edge_grid, part, index, n,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, index, &m,
matrix_type);
/* check if the edge lies on the boundary & if not
check if the connecting node is on the boundary. */
if (eflag[m-start_rank2])
{
inode[nrows]= m;
/* edge not completely on the boundary. One connecting
node must be in the interior. */
hypre_SStructGridFindBoxManEntry(node_grid, part, index, 0,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, index, &m,
matrix_type);
/* check if node on my processor. If not, the node must
be in the interior (draw a diagram to see this). */
if (m >= start_rank1 && m <= jupper)
{
/* node on proc. Now check if on the boundary. */
if (nflag[m-start_rank1]) /* interior node */
{
jedge[i]= m;
vals[i] = 1.0;
i++;
ncols[nrows]++;
}
}
else /* node off-proc */
{
jedge[i]= m;
vals[i] = 1.0;
i++;
ncols[nrows]++;
}
/* ijk+offsets */
hypre_SubtractIndexes(index, offsets[n], 3, index);
hypre_SStructGridFindBoxManEntry(node_grid, part, index, 0,
&entry);
hypre_SStructBoxManEntryGetGlobalRank(entry, index, &m,
matrix_type);
/* boundary checks again */
if (m >= start_rank1 && m <= jupper)
{
/* node on proc. Now check if on the boundary. */
if (nflag[m-start_rank1]) /* interior node */
{
jedge[i]= m;
i++;
ncols[nrows]++;
}
}
else /* node off-proc */
{
jedge[i]= m;
i++;
ncols[nrows]++;
}
nrows++; /* must have at least one node connection */
} /* if (eflag[m-start_rank2]) */
}
hypre_BoxLoop0End();
} /* for (ndirection= 0; ndirection< 2; ndirection++) */
} /* for (d= 0; d< ndim; d++) */
} /* hypre_ForBoxI(j, boxes) */
} /* for (n= 0; n< nvars; n++) */
} /* for (part= 0; part< nparts; part++) */
hypre_TFree(offsets);
hypre_TFree(varoffsets);
hypre_TFree(vartype_edges);
HYPRE_SStructGridDestroy(node_grid);
HYPRE_SStructGridDestroy(edge_grid);
HYPRE_IJMatrixSetValues(T_grad, nrows, ncols,
(const HYPRE_Int*) inode, (const HYPRE_Int*) jedge,
(const HYPRE_Real*) vals);
HYPRE_IJMatrixAssemble(T_grad);
hypre_TFree(eflag);
hypre_TFree(nflag);
hypre_TFree(ncols);
hypre_TFree(inode);
hypre_TFree(jedge);
hypre_TFree(vals);
parcsr_grad= (hypre_ParCSRMatrix *) hypre_IJMatrixObject(T_grad);
HYPRE_IJMatrixSetObjectType(T_grad, -1);
HYPRE_IJMatrixDestroy(T_grad);
return parcsr_grad;
}
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distribute-cache-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/nt-base-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/policy.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/timer-private.h"
#include "magick/utility.h"
#include "magick/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const IndexPacket
*GetVirtualIndexesFromCache(const Image *);
static const PixelPacket
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,
PixelPacket *,ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,PixelPacket *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCacheIndexes(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCacheIndexes(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCachePixels(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static PixelPacket
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static inline OpenCLCacheInfo *RelinquishOpenCLCacheInfo(MagickCLEnv clEnv,
OpenCLCacheInfo *info)
{
ssize_t
i;
for (i=0; i < (ssize_t) info->event_count; i++)
clEnv->library->clReleaseEvent(info->events[i]);
info->events=(cl_event *) RelinquishMagickMemory(info->events);
DestroySemaphoreInfo(&info->events_semaphore);
if (info->buffer != (cl_mem) NULL)
{
clEnv->library->clReleaseMemObject(info->buffer);
info->buffer=(cl_mem) NULL;
}
return((OpenCLCacheInfo *) RelinquishMagickMemory(info));
}
static void CL_API_CALL RelinquishPixelCachePixelsDelayed(
cl_event magick_unused(event),cl_int magick_unused(event_command_exec_status),
void *user_data)
{
MagickCLEnv
clEnv;
OpenCLCacheInfo
*info;
PixelPacket
*pixels;
ssize_t
i;
magick_unreferenced(event);
magick_unreferenced(event_command_exec_status);
info=(OpenCLCacheInfo *) user_data;
clEnv=GetDefaultOpenCLEnv();
for (i=(ssize_t)info->event_count-1; i >= 0; i--)
{
cl_int
event_status;
cl_uint
status;
status=clEnv->library->clGetEventInfo(info->events[i],
CL_EVENT_COMMAND_EXECUTION_STATUS,sizeof(cl_int),&event_status,NULL);
if ((status == CL_SUCCESS) && (event_status > CL_COMPLETE))
{
clEnv->library->clSetEventCallback(info->events[i],CL_COMPLETE,
&RelinquishPixelCachePixelsDelayed,info);
return;
}
}
pixels=info->pixels;
RelinquishMagickResource(MemoryResource,info->length);
(void) RelinquishOpenCLCacheInfo(clEnv,info);
(void) RelinquishAlignedMemory(pixels);
}
static MagickBooleanType RelinquishOpenCLBuffer(
CacheInfo *magick_restrict cache_info)
{
MagickCLEnv
clEnv;
assert(cache_info != (CacheInfo *) NULL);
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
return(MagickFalse);
RelinquishPixelCachePixelsDelayed((cl_event) NULL,0,cache_info->opencl);
return(MagickTrue);
}
static cl_event *CopyOpenCLEvents(OpenCLCacheInfo *opencl_info,
cl_uint *event_count)
{
cl_event
*events;
size_t
i;
assert(opencl_info != (OpenCLCacheInfo *) NULL);
events=(cl_event *) NULL;
LockSemaphoreInfo(opencl_info->events_semaphore);
*event_count=opencl_info->event_count;
if (*event_count > 0)
{
events=AcquireQuantumMemory(*event_count,sizeof(*events));
if (events == (cl_event *) NULL)
*event_count=0;
else
{
for (i=0; i < opencl_info->event_count; i++)
events[i]=opencl_info->events[i];
}
}
UnlockSemaphoreInfo(opencl_info->events_semaphore);
return(events);
}
#endif
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A d d O p e n C L E v e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddOpenCLEvent() adds an event to the list of operations the next operation
% should wait for.
%
% The format of the AddOpenCLEvent() method is:
%
% void AddOpenCLEvent(const Image *image,cl_event event)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o event: the event that should be added.
%
*/
extern MagickPrivate void AddOpenCLEvent(const Image *image,cl_event event)
{
CacheInfo
*magick_restrict cache_info;
MagickCLEnv
clEnv;
assert(image != (const Image *) NULL);
assert(event != (cl_event) NULL);
cache_info=(CacheInfo *)image->cache;
assert(cache_info->opencl != (OpenCLCacheInfo *) NULL);
clEnv=GetDefaultOpenCLEnv();
if (clEnv->library->clRetainEvent(event) != CL_SUCCESS)
{
clEnv->library->clWaitForEvents(1,&event);
return;
}
LockSemaphoreInfo(cache_info->opencl->events_semaphore);
if (cache_info->opencl->events == (cl_event *) NULL)
{
cache_info->opencl->events=AcquireMagickMemory(sizeof(
*cache_info->opencl->events));
cache_info->opencl->event_count=1;
}
else
cache_info->opencl->events=ResizeQuantumMemory(cache_info->opencl->events,
++cache_info->opencl->event_count,sizeof(*cache_info->opencl->events));
if (cache_info->opencl->events == (cl_event *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
cache_info->opencl->events[cache_info->opencl->event_count-1]=event;
UnlockSemaphoreInfo(cache_info->opencl->events_semaphore);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->channels=4;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=MagickMin(GetMagickResourceLimit(WidthResource),
(MagickSizeType) MAGICK_SSIZE_MAX);
cache_info->height_limit=MagickMin(GetMagickResourceLimit(HeightResource),
(MagickSizeType) MAGICK_SSIZE_MAX);
cache_info->semaphore=AllocateSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AllocateSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(number_threads,
2*sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% const void *AcquirePixelCachePixels(const Image *image,
% MagickSizeType *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const void *AcquirePixelCachePixels(const Image *image,
MagickSizeType *length,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) exception;
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((const void *) NULL);
*length=cache_info->length;
return((const void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickExport MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AllocateSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickExport void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
DestroySemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const PixelPacket
*magick_restrict r;
IndexPacket
*magick_restrict nexus_indexes,
*magick_restrict indexes;
MagickOffsetType
n;
NexusInfo
**magick_restrict clip_nexus;
PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->clip_mask == (Image *) NULL) ||
(image->storage_class == PseudoClass))
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
clip_nexus=AcquirePixelCacheNexus(1);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
indexes=nexus_info->virtual_nexus->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelCacheNexus(image->clip_mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) ||
(r == (const PixelPacket *) NULL))
return(MagickFalse);
n=0;
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
mask_alpha;
mask_alpha=QuantumScale*GetPixelIntensity(image,r);
if (fabs(mask_alpha) >= MagickEpsilon)
{
SetPixelRed(q,mask_alpha*MagickOver_((MagickRealType) p->red,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->red,
(MagickRealType) GetPixelOpacity(q)));
SetPixelGreen(q,mask_alpha*MagickOver_((MagickRealType) p->green,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->green,
(MagickRealType) GetPixelOpacity(q)));
SetPixelBlue(q,mask_alpha*MagickOver_((MagickRealType) p->blue,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->blue,
(MagickRealType) GetPixelOpacity(q)));
SetPixelOpacity(q,GetPixelOpacity(p));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+n,GetPixelIndex(indexes+n));
}
p++;
q++;
r++;
n++;
}
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickExport void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
{
#if defined(MAGICKCORE_HAVE_LINUX_SENDFILE)
if (cache_info->length < 0x7ffff000)
{
count=sendfile(clone_info->file,cache_info->file,(off_t *) NULL,
(size_t) cache_info->length);
if (count == (ssize_t) cache_info->length)
return(MagickTrue);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
}
#endif
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
}
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->active_index_channel == clone_info->active_index_channel))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) ||
(clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->columns*cache_info->rows*sizeof(*cache_info->pixels));
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
(void) memcpy(clone_info->indexes,cache_info->indexes,
cache_info->columns*cache_info->rows*
sizeof(*cache_info->indexes));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->pixels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length);
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
{
/*
Clone indexes.
*/
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->indexes);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length);
status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (RelinquishOpenCLBuffer(cache_info) != MagickFalse)
{
cache_info->pixels=(PixelPacket *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(PixelPacket *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(PixelPacket *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->indexes=(IndexPacket *) NULL;
}
MagickExport Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(PixelPacket *) NULL;
nexus_info->pixels=(PixelPacket *) NULL;
nexus_info->indexes=(IndexPacket *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickExport NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (PixelPacket *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetAuthenticPixelsCache().
%
% The format of the GetAuthenticIndexesFromCache() method is:
%
% IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexQueue() returns the authentic black channel or the colormap
% indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetAuthenticIndexQueue() method is:
%
% IndexPacket *GetAuthenticIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport IndexPacket *GetAuthenticIndexQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
return(cache_info->methods.get_authentic_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
cl_context
context;
cl_int
status;
MagickCLEnv
clEnv;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *)image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *)image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
clEnv=GetDefaultOpenCLEnv();
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
{
assert(cache_info->pixels != NULL);
context=GetOpenCLContext(clEnv);
cache_info->opencl=(OpenCLCacheInfo *) AcquireCriticalMemory(
sizeof(*cache_info->opencl));
(void) memset(cache_info->opencl,0,sizeof(*cache_info->opencl));
cache_info->opencl->events_semaphore=AllocateSemaphoreInfo();
cache_info->opencl->length=cache_info->length;
cache_info->opencl->pixels=cache_info->pixels;
cache_info->opencl->buffer=clEnv->library->clCreateBuffer(context,
CL_MEM_USE_HOST_PTR,cache_info->length,cache_info->pixels,&status);
if (status != CL_SUCCESS)
cache_info->opencl=RelinquishOpenCLCacheInfo(clEnv,cache_info->opencl);
}
if (cache_info->opencl != (OpenCLCacheInfo *) NULL)
clEnv->library->clRetainMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
return((cl_mem) NULL);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% PixelPacket *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
PixelPacket
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((PixelPacket *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
if (cache_info->active_index_channel != MagickFalse)
if (ReadPixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% PixelPacket *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static PixelPacket *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% PixelPacket *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport PixelPacket *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a PixelPacket array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or if the storage class is
% PseduoClass, call GetAuthenticIndexQueue() after invoking
% GetAuthenticPixels() to obtain the black color component or colormap indexes
% (of type IndexPacket) corresponding to the region. Once the PixelPacket
% (and/or IndexPacket) array has been updated, the changes must be saved back
% to the underlying image using SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
return(cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O p e n C L E v e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOpenCLEvents() returns the events that the next operation should wait
% for. The argument event_count is set to the number of events.
%
% The format of the GetOpenCLEvents() method is:
%
% const cl_event *GetOpenCLEvents(const Image *image,
% cl_command_queue queue)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o event_count: will be set to the number of events.
%
*/
extern MagickPrivate cl_event *GetOpenCLEvents(const Image *image,
cl_uint *event_count)
{
CacheInfo
*magick_restrict cache_info;
cl_event
*events;
assert(image != (const Image *) NULL);
assert(event_count != (cl_uint *) NULL);
cache_info=(CacheInfo *) image->cache;
*event_count=0;
events=(cl_event *) NULL;
if (cache_info->opencl != (OpenCLCacheInfo *) NULL)
events=CopyOpenCLEvents(cache_info->opencl,event_count);
return(events);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
CacheInfo
*magick_restrict cache_info;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_epoch=GetMagickTime();
cache_timelimit=GetMagickResourceLimit(TimeResource);
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AllocateSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
DestroySemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (image->type != UndefinedType)
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MapCache, MemoryCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetPixelCacheType(const Image *image)
{
return(GetImagePixelCacheType(image));
}
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
PixelPacket
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
pixels=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,PixelPacket *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M a g i c k P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMagickPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMagickPixel() method is:
%
% MagickBooleanType GetOneVirtualMagickPixel(const Image image,
% const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
% ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image,
const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
GetMagickPixelPacket(image,pixel);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]);
SetMagickPixelPacket(image,pixels,indexes,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M e t h o d P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMethodPixel() returns a single pixel at the specified (x,y)
% location as defined by specified pixel method. The image background color
% is returned if an error occurs. If you plan to modify the pixel, use
% GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMethodPixel() method is:
%
% MagickBooleanType GetOneVirtualMethodPixel(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,Pixelpacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMethodPixel(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
virtual_pixel_method,x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelPacket method,const ssize_t x,const ssize_t y,
% PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
*pixel=image->background_color;
pixels=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheChannels() returns the number of pixel channels associated
% with this instance of the pixel cache.
%
% The format of the GetPixelCacheChannels() method is:
%
% size_t GetPixelCacheChannels(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheChannels returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport size_t GetPixelCacheChannels(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the colorspace of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_indexes_from_handler=GetVirtualIndexesFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_indexes_from_handler=
GetAuthenticIndexesFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated with
% the last call to SetPixelCacheNexusPixels() or GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickExport MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) exception;
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimize cache tile width in pixels.
%
% o height: the optimize cache tile height in pixels.
%
*/
MagickExport void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*width=2048UL/sizeof(PixelPacket);
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/sizeof(PixelPacket);
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualIndexesFromCache() method is:
%
% IndexPacket *GetVirtualIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const IndexPacket *GetVirtualIndexesFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromNexus() returns the indexes associated with the
% specified cache nexus.
%
% The format of the GetVirtualIndexesFromNexus() method is:
%
% const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap indexes.
%
*/
MagickExport const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((IndexPacket *) NULL);
return(nexus_info->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexQueue() returns the virtual black channel or the
% colormap indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetVirtualIndexQueue() method is:
%
% const IndexPacket *GetVirtualIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const IndexPacket *GetVirtualIndexQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
return(cache_info->methods.get_virtual_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% PixelPacket *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset;
if (extent != 0)
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickExport const PixelPacket *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const IndexPacket
*magick_restrict virtual_indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
virtual_index,
*magick_restrict indexes;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
PixelPacket
*magick_restrict pixels,
*magick_restrict q,
virtual_pixel;
ssize_t
u,
v;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const PixelPacket *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((const PixelPacket *) NULL);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
if ((cache_info->storage_class == PseudoClass) ||
(cache_info->colorspace == CMYKColorspace))
{
status=ReadPixelCacheIndexes(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
}
return(pixels);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
q=pixels;
indexes=nexus_info->indexes;
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case GrayVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange/2);
SetPixelGreen(&virtual_pixel,QuantumRange/2);
SetPixelBlue(&virtual_pixel,QuantumRange/2);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case TransparentVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,TransparentOpacity);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange);
SetPixelGreen(&virtual_pixel,QuantumRange);
SetPixelBlue(&virtual_pixel,QuantumRange);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
default:
{
virtual_pixel=image->background_color;
break;
}
}
virtual_index=(IndexPacket) 0;
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
}
if (p == (const PixelPacket *) NULL)
break;
*q++=(*p);
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
*indexes++=(*virtual_indexes);
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const PixelPacket *) NULL)
break;
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) length*sizeof(*p));
q+=length;
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
{
(void) memcpy(indexes,virtual_indexes,(size_t) length*
sizeof(*virtual_indexes));
indexes+=length;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (v < (ssize_t) rows)
return((const PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const PixelPacket *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const PixelPacket *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated with the
% last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const PixelPacket *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const PixelPacket *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access
% the black color component or to obtain the colormap indexes (of type
% IndexPacket) corresponding to the region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const PixelPacket *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated with the last call
% to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% PixelPacket *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const PixelPacket *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const IndexPacket *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickExport const PixelPacket *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((PixelPacket *) NULL);
return((const PixelPacket *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ApplyPixelCompositeMask(const MagickPixelPacket *p,
const MagickRealType alpha,const MagickPixelPacket *q,
const MagickRealType beta,MagickPixelPacket *composite)
{
double
gamma;
if (fabs((double) (alpha-TransparentOpacity)) < MagickEpsilon)
{
*composite=(*q);
return;
}
gamma=1.0-QuantumScale*QuantumScale*alpha*beta;
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*MagickOver_(p->red,alpha,q->red,beta);
composite->green=gamma*MagickOver_(p->green,alpha,q->green,beta);
composite->blue=gamma*MagickOver_(p->blue,alpha,q->blue,beta);
if ((p->colorspace == CMYKColorspace) && (q->colorspace == CMYKColorspace))
composite->index=gamma*MagickOver_(p->index,alpha,q->index,beta);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const PixelPacket
*magick_restrict r;
IndexPacket
*magick_restrict nexus_indexes,
*magick_restrict indexes;
MagickOffsetType
n;
MagickPixelPacket
alpha,
beta;
NexusInfo
**magick_restrict mask_nexus;
PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply composite mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->mask == (Image *) NULL) || (image->storage_class == PseudoClass))
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
mask_nexus=AcquirePixelCacheNexus(1);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
indexes=nexus_info->virtual_nexus->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelCacheNexus(image->mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,mask_nexus[0],&image->exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) ||
(r == (const PixelPacket *) NULL))
return(MagickFalse);
n=0;
GetMagickPixelPacket(image,&alpha);
GetMagickPixelPacket(image,&beta);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
SetMagickPixelPacket(image,p,indexes+n,&alpha);
SetMagickPixelPacket(image,q,nexus_indexes+n,&beta);
ApplyPixelCompositeMask(&beta,GetPixelIntensity(image,r),&alpha,
alpha.opacity,&beta);
SetPixelRed(q,ClampToQuantum(beta.red));
SetPixelGreen(q,ClampToQuantum(beta.green));
SetPixelBlue(q,ClampToQuantum(beta.blue));
SetPixelOpacity(q,ClampToQuantum(beta.opacity));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+n,GetPixelIndex(indexes+n));
p++;
q++;
r++;
n++;
}
}
mask_nexus=DestroyPixelCacheNexus(mask_nexus,1);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% colormap indexes, and memory mapping the cache if it is disk based. The
% cache nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
message[MaxTextExtent];
(void) FormatMagickSize(length,MagickFalse,format);
(void) FormatLocaleString(message,MaxTextExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MaxTextExtent],
message[MaxTextExtent];
const char
*hosts,
*type;
MagickSizeType
length,
number_pixels;
MagickStatusType
status;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
if (GetMagickResourceLimit(ListLengthResource) != MagickResourceInfinity)
{
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
}
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MaxTextExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
cache_info->channels=image->channels;
cache_info->active_index_channel=((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace)) ? MagickTrue : MagickFalse;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
packet_size+=sizeof(IndexPacket);
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->colorspace=image->colorspace;
cache_info->type=MemoryCache;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status&=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",
type,(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MaxTextExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,
format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,GetDistributeCacheFile(
(DistributeCacheInfo *) cache_info->server_info),type,
(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=DiskCache;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if (length == (MagickSizeType) ((size_t) length))
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status != MagickFalse)
{
cache_info->pixels=(PixelPacket *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MaxTextExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MaxTextExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->active_index_channel=cache_info->active_index_channel;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
clone_info->channels=cache_info->channels;
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixel(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,clone,nexus_info,
exception));
}
MagickExport PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
PixelPacket
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((PixelPacket *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((PixelPacket *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((PixelPacket *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a PixelPacket array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain
% the black color component or the colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
return(cache_info->methods.queue_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheIndexes() reads colormap indexes from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheIndexes() method is:
%
% MagickBooleanType ReadPixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheIndexes(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
IndexPacket
*magick_restrict q;
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=length*rows;
q=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
IndexPacket
*magick_restrict p;
/*
Read indexes from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read indexes from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*q),length,(unsigned char *) q);
if (count < (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read indexes from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
PixelPacket
*magick_restrict q;
ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
if ((length/sizeof(PixelPacket)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
q=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
PixelPacket
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*q),length,(unsigned char *) q);
if (count < (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickExport Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
cache_info->methods.get_virtual_indexes_from_handler=
cache_methods->get_virtual_indexes_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
cache_info->methods.get_authentic_indexes_from_handler=
cache_methods->get_authentic_indexes_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% PixelPacket SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrcit cache_info,const MapMode mode,
% const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) length));
if (nexus_info->cache != (PixelPacket *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (PixelPacket *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (PixelPacket *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static inline MagickBooleanType ValidatePixelOffset(const ssize_t x,
const size_t a)
{
if ((x >= 0) && (x >= ((ssize_t) MAGICK_SSIZE_MAX-(ssize_t) a)))
return(MagickFalse);
if (x <= ((ssize_t) MAGICK_SSIZE_MIN+(ssize_t) a))
return(MagickFalse);
return(MagickTrue);
}
static PixelPacket *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((PixelPacket *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((PixelPacket *) NULL);
}
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit) ||
(ValidatePixelOffset(x,width) == MagickFalse) ||
(ValidatePixelOffset(y,height) == MagickFalse))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((PixelPacket *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+offset;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=cache_info->indexes+offset;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
length+=number_pixels*sizeof(IndexPacket);
status=MagickTrue;
if (nexus_info->cache == (PixelPacket *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
{
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
return((PixelPacket *) NULL);
}
nexus_info->pixels=nexus_info->cache;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=(IndexPacket *) (nexus_info->pixels+number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,
const Quantum opacity)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->matte=MagickTrue;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,&image->exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
&image->exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
q->opacity=opacity;
q++;
}
status=SyncCacheViewAuthenticPixels(image_view,&image->exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace((Image *) image,sRGBColorspace);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->matte == MagickFalse)
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() ensures all the OpenCL operations have been
% completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
MagickCLEnv
clEnv;
assert(cache_info != (CacheInfo *)NULL);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (OpenCLCacheInfo *)NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl != (OpenCLCacheInfo *)NULL)
{
cl_event
*events;
cl_uint
event_count;
clEnv=GetDefaultOpenCLEnv();
events=CopyOpenCLEvents(cache_info->opencl,&event_count);
if (events != (cl_event *) NULL)
{
cl_command_queue
queue;
cl_context
context;
cl_int
status;
PixelPacket
*pixels;
context=GetOpenCLContext(clEnv);
queue=AcquireOpenCLCommandQueue(clEnv);
pixels=(PixelPacket *) clEnv->library->clEnqueueMapBuffer(queue,
cache_info->opencl->buffer,CL_TRUE, CL_MAP_READ | CL_MAP_WRITE,0,
cache_info->length,event_count,events,NULL,&status);
assert(pixels == cache_info->pixels);
events=(cl_event *) RelinquishMagickMemory(events);
RelinquishOpenCLCommandQueue(clEnv,queue);
}
cache_info->opencl=RelinquishOpenCLCacheInfo(clEnv,cache_info->opencl);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *)NULL);
cache_info = (CacheInfo *)image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->clip_mask != (Image *) NULL) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->mask != (Image *) NULL) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->active_index_channel != MagickFalse) &&
(WritePixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
return(cache_info->methods.sync_authentic_pixels_handler(image,exception));
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheIndexes() writes the colormap indexes to the specified
% region of the pixel cache.
%
% The format of the WritePixelCacheIndexes() method is:
%
% MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
const IndexPacket
*magick_restrict p;
ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=(MagickSizeType) length*rows;
p=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
IndexPacket
*magick_restrict q;
/*
Write indexes to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write indexes to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*p),length,(const unsigned char *)
p);
if (count < (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write indexes to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
const PixelPacket
*magick_restrict p;
ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
rows=nexus_info->region.height;
extent=length*rows;
p=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
PixelPacket
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*p),length,(const unsigned char *) p);
if (count < (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
GxB_SelectOp_wait.c | //------------------------------------------------------------------------------
// GxB_SelectOp_wait: wait for a user-defined GxB_SelectOp to complete
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// In SuiteSparse:GraphBLAS, a user-defined GxB_SelectOp has no pending
// operations to wait for. All this method does is verify that the op is
// properly initialized, and then it does an OpenMP flush.
#include "GB.h"
GrB_Info GxB_SelectOp_wait // no work, just check if the GxB_SelectOp is valid
(
GxB_SelectOp op,
GrB_WaitMode waitmode
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_SelectOp_wait (op, waitmode)") ;
GB_RETURN_IF_NULL_OR_FAULTY (op) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
test3.c | int main () {
int x;
#pragma omp parallel
{
0;
if (1) {
2;
if (3) {
4;
x = 10;
#pragma omp barrier
5;
} else {
6;
#pragma omp barrier
7;
}
8;
} else {
9;
if (10) {
11;
#pragma omp barrier
12;
} else {
13;
#pragma omp barrier
14;
}
15;
}
16;
}
17;
}
|
GamePlayer.h | // JAGLAVAK CHESS ENGINE (c) 2019 Stuart Riffle
#pragma once
#include "Random.h"
#include "PlayoutParams.h"
template< typename SIMD >
class GamePlayer
{
enum { LANES = SimdWidth< SIMD >::LANES };
const PlayoutParams* _Params;
RandomGen _RandomGen;
public:
PDECL GamePlayer( const PlayoutParams* params, u64 salt = 0 )
{
_Params = params;
_RandomGen.SetSeed( params->_RandomSeed + salt );
}
PDECL void PlayGames( const Position* pos, ScoreCard* dest, int simdCount )
{
assert( (uintptr_t) pos % sizeof( SIMD ) == 0 );
#if !ON_CUDA_DEVICE
int totalCores = PlatDetectCpuCores();
int coresToUse = _Params->_LimitPlayoutCores;
if( coresToUse < 0 )
coresToUse += totalCores;
omp_set_num_threads( coresToUse );
#endif
int totalIters = simdCount * _Params->_NumGamesEach;
#pragma omp parallel for schedule(dynamic) if(totalIters > 0)
for( int i = 0; i < totalIters; i++ )
{
#if !ON_CUDA_DEVICE
PlatLimitCores( coresToUse, false );
#endif
PositionT< SIMD > simdPos;
int idx = i % simdCount;
int offset = idx * LANES;
Swizzle< SIMD >( pos + offset, &simdPos );
___PLAYOUT___( simdPos, dest + offset );
}
}
protected:
PDECL void ___PLAYOUT___( const PositionT< SIMD >& startPos, ScoreCard* outScores )
{
PositionT< SIMD > simdPos = startPos;
MoveMapT< SIMD > simdMoveMap;
simdPos.CalcMoveMap( &simdMoveMap );
for( int i = 0; i < _Params->_MaxMovesPerGame; i++ )
{
MoveSpecT< SIMD > simdSpec = ChoosePlayoutMoves( simdPos, simdMoveMap );
simdPos.Step( simdSpec, &simdMoveMap );
if( GamesAreAllDone( simdPos ) )
break;
}
u64* results = (u64*) &simdPos._GameResult;
for( int lane = 0; lane < LANES; lane++ )
{
outScores[lane]._Wins[WHITE] += (results[lane] == RESULT_WHITE_WIN);
outScores[lane]._Wins[BLACK] += (results[lane] == RESULT_BLACK_WIN);
outScores[lane]._Plays++;
}
}
PDECL MoveSpecT< SIMD > ChoosePlayoutMoves( const PositionT< SIMD >& simdPos, const MoveMapT< SIMD >& simdMoveMap )
{
Position ALIGN_SIMD pos[LANES];
MoveMap ALIGN_SIMD moveMap[LANES];
MoveSpec ALIGN_SIMD spec[LANES];
Unswizzle< SIMD >( &simdPos, pos );
Unswizzle< SIMD >( &simdMoveMap, moveMap );
for( int lane = 0; lane < LANES; lane++ )
spec[lane] = this->ChooseMove( pos[lane], moveMap[lane] );
MoveSpecT< SIMD > simdSpec;
simdSpec.Unpack( spec );
return simdSpec;
}
PDECL MoveSpec ChooseMove( const Position& pos, const MoveMap& moveMap )
{
if( pos._GameResult == RESULT_UNKNOWN )
{
MoveSpec randomMove = SelectRandomMove( pos, moveMap );
return randomMove;
}
MoveSpec nullMove( 0, 0, 0 );
return nullMove;
}
PDECL bool GamesAreAllDone( const PositionT< SIMD >& simdPos )
{
u64* results = (u64*) &simdPos._GameResult;
for( int i = 0; i < LANES; i++ )
if( results[i] == RESULT_UNKNOWN )
return false;
return true;
}
PDECL MoveSpec SelectRandomMove( const Position& pos, const MoveMap& moveMap )
{
MoveList moveList;
moveList.UnpackMoveMap( pos, moveMap );
assert( moveList._Count > 0 );
u64 idx = _RandomGen.GetRange( moveList._Count );
return moveList._Move[idx];
}
};
|
GB_binop__iseq_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_uint64)
// A*D function (colscale): GB (_AxD__iseq_uint64)
// D*A function (rowscale): GB (_DxB__iseq_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_uint64)
// C=scalar+B GB (_bind1st__iseq_uint64)
// C=scalar+B' GB (_bind1st_tran__iseq_uint64)
// C=A+scalar GB (_bind2nd__iseq_uint64)
// C=A'+scalar GB (_bind2nd_tran__iseq_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_UINT64 || GxB_NO_ISEQ_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
LBCouetteSolver.h | // Copyright (C) 2015 Technische Universitaet Muenchen
// This file is part of the Mamico project. For conditions of distribution
// and use, please see the copyright notice in Mamico's main folder, or at
// www5.in.tum.de/mamico
#ifndef _MOLECULARDYNAMICS_COUPLING_SOLVERS_LBCOUETTESOLVER_H_
#define _MOLECULARDYNAMICS_COUPLING_SOLVERS_LBCOUETTESOLVER_H_
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "coupling/solvers/NumericalSolver.h"
namespace coupling {
namespace solvers{
class LBCouetteSolver;
}
}
/** In our scenario, the lower wall is accelerated and the upper wall stands still.
* The lower wall is located at zero height.
* @brief implements a three-dimensional Lattice-Boltzmann Couette flow solver.
* @author Philipp Neumann */
class coupling::solvers::LBCouetteSolver: public coupling::solvers::NumericalSolver {
public:
/** @brief a simple constructor
* @param channelheight the width and height of the channel in y and z direction
* @param wallVelocity velocity at the moving wall, refers to Couette scenario
* @param dx the spacial step size, and equidistant grid is applied
* @param dt the time step
* @param kinVisc the kinematic viscosity of the fluid
* @param plotEveryTimestep the time step interval for plotting data;
* 4 means, every 4th time step is plotted
* @param filestem the name of the plotted file
* @param processes defines on how many processes the solver will run;
* 1,1,1 - sequential run - 1,2,2 = 1*2*2 = 4 processes
* @param numThreads number of OpenMP threads */
LBCouetteSolver(
const double channelheight,
tarch::la::Vector<3,double> wallVelocity,
const double kinVisc,
const double dx,
const double dt,
const int plotEveryTimestep,
const std::string filestem,
const tarch::la::Vector<3,unsigned int> processes,
const unsigned int numThreads=1
):
coupling::solvers::NumericalSolver(channelheight, dx, dt, kinVisc, plotEveryTimestep,
filestem, processes),
_omega(1.0/(3.0*(kinVisc*dt/(dx*dx))+0.5)),
_wallVelocity((dt/dx)*wallVelocity)
{
// return if required
if (skipRank()){return;}
_pdf1 = new double[19*(_domainSizeX+2)*(_domainSizeY+2)*(_domainSizeZ+2)];
_pdf2 = new double[19*(_domainSizeX+2)*(_domainSizeY+2)*(_domainSizeZ+2)];
#if defined(_OPENMP)
omp_set_num_threads(numThreads);
#endif
#if (COUPLING_MD_DEBUG==COUPLING_MD_YES)
std::cout << "Domain size=" << _domainSizeX << "," << _domainSizeY << "," << _domainSizeZ << std::endl;
std::cout << "tau=" << 1.0/_omega << std::endl;
std::cout << "wallVelocity=" << _wallVelocity << std::endl;
for (int z = 0; z < _domainSizeZ+2; z++){ for (int y = 0; y < _domainSizeY+2; y++){ for (int x = 0; x < _domainSizeX+2; x++){
std::cout << x << "," << y << "," << z << "FLAG=" << _flag[get(x,y,z)] << std::endl;
}}}
#endif
// check pointers
if ( (_pdf1==NULL) || (_pdf2==NULL) || (_vel==NULL) || (_density==NULL) || (_flag==NULL) ){
std::cout << "ERROR LBCouetteSolver: NULL ptr!" << std::endl; exit(EXIT_FAILURE);
}
#if (COUPLING_MD_PARALLEL==COUPLING_MD_YES)
if ( (_sendBufferX==NULL) || (_recvBufferX==NULL) || (_sendBufferY==NULL) || (_recvBufferY==NULL) || (_sendBufferZ==NULL) || (_recvBufferZ==NULL) ){
std::cout << "ERROR LBCouetteSolver: NULL ptr in send/recv!" << std::endl; exit(EXIT_FAILURE);
}
#endif
// init everything with lattice weights
#pragma omp parallel for
for (int i = 0; i < (_domainSizeX+2)*(_domainSizeY+2)*(_domainSizeZ+2); i++){
for (int q = 0; q < 19; q++){ _pdf1[get(i)*19+q] = _W[q]; _pdf2[get(i)*19+q] = _W[q]; }
}
}
/** @brief a simple destructor */
virtual ~LBCouetteSolver(){
if (_pdf1!=NULL){delete [] _pdf1; _pdf1=NULL;}
if (_pdf2!=NULL){delete [] _pdf2; _pdf2=NULL;}
if (_vel !=NULL){delete [] _vel; _vel=NULL;}
if (_density!=NULL){delete [] _density; _density=NULL;}
if (_flag!=NULL){delete [] _flag; _flag=NULL;}
#if (COUPLING_MD_PARALLEL==COUPLING_MD_YES)
if (_sendBufferX!=NULL){delete [] _sendBufferX; _sendBufferX=NULL;}
if (_sendBufferY!=NULL){delete [] _sendBufferY; _sendBufferY=NULL;}
if (_sendBufferZ!=NULL){delete [] _sendBufferZ; _sendBufferZ=NULL;}
if (_recvBufferX!=NULL){delete [] _recvBufferX; _recvBufferX=NULL;}
if (_recvBufferY!=NULL){delete [] _recvBufferY; _recvBufferY=NULL;}
if (_recvBufferZ!=NULL){delete [] _recvBufferZ; _recvBufferZ=NULL;}
#endif
}
/** @brief advances one time step dt in time and triggers vtk plot if required */
void advance(double dt) override {
if (skipRank()){return;}
const int timesteps=floor( dt/_dt+0.5 );
if ( fabs(timesteps*_dt-dt)/_dt > 1.0e-8 ){std::cout << "ERROR LBCouetteSolver::advance(): time steps and dt do not match!" << std::endl; exit(EXIT_FAILURE);}
for (int i = 0; i < timesteps; i++){
plot();
collidestream();
communicate(); // exchange between neighbouring MPI subdomains
_counter++;
}
}
/** @brief applies the values received from the MD-solver within the conntinuum solver
* @param recvBuffer holds the data from the md solver
* @param recvIndice the indices to connect the data from the buffer with macroscopic cells
* @param indexConversion instance of the indexConversion */
void setMDBoundaryValues(
std::vector<coupling::datastructures::MacroscopicCell<3>* >& recvBuffer,const unsigned int * const recvIndices,
const coupling::IndexConversion<3>& indexConversion)override{
if (skipRank()){return ;}
// loop over all received cells
const unsigned int size = (unsigned int) recvBuffer.size();
for (unsigned int i = 0; i < size; i++){
// determine cell index of this cell in LB domain
tarch::la::Vector<3,unsigned int> globalCellCoords = indexConversion.getGlobalVectorCellIndex(recvIndices[i]);
globalCellCoords[0] = (globalCellCoords[0]+_offset[0]) - _coords[0]*_avgDomainSizeX;
globalCellCoords[1] = (globalCellCoords[1]+_offset[1]) - _coords[1]*_avgDomainSizeY;
globalCellCoords[2] = (globalCellCoords[2]+_offset[2]) - _coords[2]*_avgDomainSizeZ;
#if (COUPLING_MD_DEBUG==COUPLING_MD_YES)
std::cout << "Process coords: " << _coords << ": GlobalCellCoords for index " << indexConversion.getGlobalVectorCellIndex(recvIndices[i]) << ": " << globalCellCoords << std::endl;
#endif
const int index = get(globalCellCoords[0],globalCellCoords[1],globalCellCoords[2]);
#if (COUPLING_MD_DEBUG==COUPLING_MD_YES)
if (_flag[index]!=MD_BOUNDARY){std::cout << "ERROR LBCouetteSolver::setMDBoundaryValues(): Cell " << index << " is no MD boundary cell!" << std::endl; exit(EXIT_FAILURE);}
#endif
// set velocity value and pdfs in MD boundary cell (before streaming); the boundary velocities are interpolated between the neighbouring and this cell. This interpolation is valid for
// FLUID-MD_BOUNDARY neighbouring relations only.
// determine local velocity received from MaMiCo and convert it to LB units; store the velocity in _vel
tarch::la::Vector<3,double> localVel( (1.0/recvBuffer[i]->getMacroscopicMass())*(_dt/_dx)*recvBuffer[i]->getMacroscopicMomentum());
for (unsigned int d = 0; d<3; d++){ _vel[3*index+d] = localVel[d]; }
// loop over all pdfs and set them according to interpolated moving-wall conditions
for (unsigned int q = 0; q < 19; q++){
// index of neighbour cell; only if cell is located inside local domain
if ( ((int)globalCellCoords[0]+_C[q][0] > 0) && ((int)globalCellCoords[0]+_C[q][0]<_domainSizeX+1)
&& ((int)globalCellCoords[1]+_C[q][1] > 0) && ((int)globalCellCoords[1]+_C[q][1]<_domainSizeY+1)
&& ((int)globalCellCoords[2]+_C[q][2] > 0) && ((int)globalCellCoords[2]+_C[q][2]<_domainSizeZ+1)){
const int nbIndex = get((_C[q][0]+globalCellCoords[0]),(_C[q][1]+globalCellCoords[1]),(_C[q][2]+globalCellCoords[2]));
const tarch::la::Vector<3,double> interpolVel(0.5*(_vel[3*index]+_vel[3*nbIndex]),0.5*(_vel[3*index+1]+_vel[3*nbIndex+1]),0.5*(_vel[3*index+2]+_vel[3*nbIndex+2]));
_pdf1[19*index+q] = _pdf1[19*nbIndex+18-q] - 6.0*_W[q]*_density[nbIndex]*(_C[18-q][0]*interpolVel[0]+_C[18-q][1]*interpolVel[1]+_C[18-q][2]*interpolVel[2]);
}
}
}
}
/** @brief returns velocity at a certain position
* @param pos position for which the velocity will be returned
* @returns the velocity vector for the position */
tarch::la::Vector<3,double> getVelocity(tarch::la::Vector<3,double> pos) const override {
tarch::la::Vector<3,unsigned int> coords;
const tarch::la::Vector<3,double> domainOffset(_coords[0]*_dx*_avgDomainSizeX,_coords[1]*_dx*_avgDomainSizeY,_coords[2]*_dx*_avgDomainSizeZ);
// check pos-data for process locality (todo: put this in debug mode in future releases)
if ( (pos[0]<domainOffset[0]) || (pos[0]>domainOffset[0]+_domainSizeX*_dx)
|| (pos[1]<domainOffset[1]) || (pos[1]>domainOffset[1]+_domainSizeY*_dx)
|| (pos[2]<domainOffset[2]) || (pos[2]>domainOffset[2]+_domainSizeZ*_dx) ){
std::cout << "ERROR LBCouetteSolver::getVelocity(): Position " << pos << " out of range!" << std::endl; exit(EXIT_FAILURE);
}
// compute index for respective cell (_dx+... for ghost cells); use coords to store local cell coordinates
for (unsigned int d = 0; d < 3; d++){ coords[d] = (unsigned int) ((_dx+pos[d]-domainOffset[d])/_dx);}
const int index = get(coords[0],coords[1],coords[2]);
tarch::la::Vector<3,double> vel(0.0);
// extract and scale velocity to "real"=MD units
for (int d = 0; d < 3; d++){ vel[d] = _dx/_dt*_vel[3*index+d]; }
#if (COUPLING_MD_DEBUG==COUPLING_MD_YES)
std::cout << "Position " << pos << " corresponds to cell: " << coords << "; vel=" << vel << std::endl;
#endif
return vel;
}
/** @brief returns density at a certain position
* @param pos position for which the density will be returned
* @returns the density vector for the position */
double getDensity(tarch::la::Vector<3,double> pos) const override{
tarch::la::Vector<3,unsigned int> coords;
const tarch::la::Vector<3,double> domainOffset(_coords[0]*_dx*_avgDomainSizeX,_coords[1]*_dx*_avgDomainSizeY,_coords[2]*_dx*_avgDomainSizeZ);
// check pos-data for process locality (todo: put this in debug mode in future releases)
if ( (pos[0]<domainOffset[0]) || (pos[0]>domainOffset[0]+_domainSizeX*_dx)
|| (pos[1]<domainOffset[1]) || (pos[1]>domainOffset[1]+_domainSizeY*_dx)
|| (pos[2]<domainOffset[2]) || (pos[2]>domainOffset[2]+_domainSizeZ*_dx) ){
std::cout << "ERROR LBCouetteSolver::getDensity(): Position " << pos << " out of range!" << std::endl; exit(EXIT_FAILURE);
}
// compute index for respective cell (_dx+... for ghost cells); use coords to store local cell coordinates
for (unsigned int d = 0; d < 3; d++){ coords[d] = (unsigned int) ((_dx+pos[d]-domainOffset[d])/_dx);}
const int index = get(coords[0],coords[1],coords[2]);
return _density[index];
}
/** @brief changes the velocity at the moving wall (z=0)
* @param wallVelocity the velocity will be set at the moving wall */
virtual void setWallVelocity(const tarch::la::Vector<3,double> wallVelocity) override{
_wallVelocity = (_dt/_dx)*wallVelocity;
}
private:
/** calls stream() and collide() and swaps the fields
* @brief collide-stream algorithm for the Lattice-Boltzmann method */
void collidestream(){
#pragma omp parallel for
for (int z = 1; z < _domainSizeZ+1; z++){
for (int y = 1; y < _domainSizeY+1; y++){
for (int x = 1; x < _domainSizeX+1; x++){
const int index = get(x,y,z);
if (_flag[index]==FLUID){
stream(index);
collide(index,x,y,z);
}
}
}
}
// swap fields
double *swap=_pdf1;
_pdf1 = _pdf2;
_pdf2 = swap;
}
/** @brief the stream part of the LB algorithm (from pdf1 to pdf2) */
void stream(int index){
const int pI = 19*index;
for (int q = 0; q < 9; q++){
const int nb= 19*(_C[q][0]+_C[q][1]*_xO+_C[q][2]*_yO);
_pdf2[pI+q] = _pdf1[pI+q-nb];
_pdf2[pI+18-q] = _pdf1[pI+18-q+nb];
}
_pdf2[pI+9] = _pdf1[pI+9];
}
/** @brieff the collide step within pdf2 */
void collide(int index,int x, int y, int z){
// index of start of cell-local pdfs in AoS
const int pI = 19*index;
// compute and store density, velocity
double *vel = &_vel[3*index];
computeDensityAndVelocity(vel,_density[index],&_pdf2[pI]);
// collide (BGK); always handle pdfs no. q and inv(q)=18-q in one step
const double u2 = 1.0 - 1.5*(vel[0]*vel[0] + vel[1]*vel[1] + vel[2]*vel[2]);
// pdf 0,18
double cu = -vel[1]-vel[2];
int nb = -_xO-_yO;
double feq = _W[0]*_density[index]*(u2 + 3.0*cu + 4.5*cu*cu);
_pdf2[pI] -= _omega*(_pdf2[pI] - feq);
boundary(_pdf2,pI,x,y,z,0,_flag[index+nb],pI+19*nb);
feq -= 6.0*_W[0]*_density[index]*cu;
_pdf2[pI+18] -= _omega*(_pdf2[pI+18] - feq);
boundary(_pdf2,pI,x,y,z,18,_flag[index-nb],pI-19*nb);
// pdf 1,17
cu = -vel[0]-vel[2];
nb = -1-_yO;
feq = _W[1]*_density[index]*(u2 + 3.0*cu + 4.5*cu*cu);
_pdf2[pI+1] -= _omega*(_pdf2[pI+1] - feq);
boundary(_pdf2,pI,x,y,z,1,_flag[index+nb],pI+19*nb);
feq -= 6.0*_W[1]*_density[index]*cu;
_pdf2[pI+17] -= _omega*(_pdf2[pI+17] - feq);
boundary(_pdf2,pI,x,y,z,17,_flag[index-nb],pI-19*nb);
// pdf 2,16
cu = -vel[2];
nb = -_yO;
feq = _W[2]*_density[index]*(u2 + 3.0*cu + 4.5*cu*cu);
_pdf2[pI+2] -= _omega*(_pdf2[pI+2] - feq);
boundary(_pdf2,pI,x,y,z,2,_flag[index+nb],pI+19*nb);
feq -= 6.0*_W[2]*_density[index]*cu;
_pdf2[pI+16] -= _omega*(_pdf2[pI+16] - feq);
boundary(_pdf2,pI,x,y,z,16,_flag[index-nb],pI-19*nb);
// pdf 3,15
cu = vel[0]-vel[2];
nb = 1-_yO;
feq = _W[3]*_density[index]*(u2 + 3.0*cu + 4.5*cu*cu);
_pdf2[pI+3] -= _omega*(_pdf2[pI+3] - feq);
boundary(_pdf2,pI,x,y,z,3,_flag[index+nb],pI+19*nb);
feq -= 6.0*_W[3]*_density[index]*cu;
_pdf2[pI+15] -= _omega*(_pdf2[pI+15] - feq);
boundary(_pdf2,pI,x,y,z,15,_flag[index-nb],pI-19*nb);
// pdf 4,14
cu = vel[1]-vel[2];
nb = _xO-_yO;
feq = _W[4]*_density[index]*(u2 + 3.0*cu + 4.5*cu*cu);
_pdf2[pI+4] -= _omega*(_pdf2[pI+4] - feq);
boundary(_pdf2,pI,x,y,z,4,_flag[index+nb],pI+19*nb);
feq -= 6.0*_W[4]*_density[index]*cu;
_pdf2[pI+14] -= _omega*(_pdf2[pI+14] - feq);
boundary(_pdf2,pI,x,y,z,14,_flag[index-nb],pI-19*nb);
// pdf 5,13
cu = -vel[0]-vel[1];
nb = -1-_xO;
feq = _W[5]*_density[index]*(u2 + 3.0*cu + 4.5*cu*cu);
_pdf2[pI+5] -= _omega*(_pdf2[pI+5] - feq);
boundary(_pdf2,pI,x,y,z,5,_flag[index+nb],pI+19*nb);
feq -= 6.0*_W[5]*_density[index]*cu;
_pdf2[pI+13] -= _omega*(_pdf2[pI+13] - feq);
boundary(_pdf2,pI,x,y,z,13,_flag[index-nb],pI-19*nb);
// pdf 6,12
cu = -vel[1];
nb = -_xO;
feq = _W[6]*_density[index]*(u2 + 3.0*cu + 4.5*cu*cu);
_pdf2[pI+6] -= _omega*(_pdf2[pI+6] - feq);
boundary(_pdf2,pI,x,y,z,6,_flag[index+nb],pI+19*nb);
feq -= 6.0*_W[6]*_density[index]*cu;
_pdf2[pI+12] -= _omega*(_pdf2[pI+12] - feq);
boundary(_pdf2,pI,x,y,z,12,_flag[index-nb],pI-19*nb);
// pdf 7,11
cu = vel[0]-vel[1];
nb = 1-_xO;
feq = _W[7]*_density[index]*(u2 + 3.0*cu + 4.5*cu*cu);
_pdf2[pI+7] -= _omega*(_pdf2[pI+7] - feq);
boundary(_pdf2,pI,x,y,z,7,_flag[index+nb],pI+19*nb);
feq -= 6.0*_W[7]*_density[index]*cu;
_pdf2[pI+11] -= _omega*(_pdf2[pI+11] - feq);
boundary(_pdf2,pI,x,y,z,11,_flag[index-nb],pI-19*nb);
// pdf 8,10
cu = -vel[0];
nb = -1;
feq = _W[8]*_density[index]*(u2 + 3.0*cu + 4.5*cu*cu);
_pdf2[pI+8] -= _omega*(_pdf2[pI+8] - feq);
boundary(_pdf2,pI,x,y,z,8,_flag[index+nb],pI+19*nb);
feq -= 6.0*_W[8]*_density[index]*cu;
_pdf2[pI+10] -= _omega*(_pdf2[pI+10] - feq);
boundary(_pdf2,pI,x,y,z,10,_flag[index-nb],pI-19*nb);
// pdf 9
_pdf2[pI+9] -= _omega*(_pdf2[pI+9] - _W[9]*_density[index]*u2);
}
/** @brief takes care of the correct boundary treatment for the LB method
* @param pdf particle distribution function
* @param index start index for current cell in pdf-array
* @param x the position in x direction of the cell
* @param y the position in y direction of the cell
* @param z the position in z direction of the cell
* @param q distribution function number
* @param flag boundary flag of neighbouring cell
* @param nbIndex index of neighbouring cell */
void boundary(double * const pdf, int index, int x, int y, int z, int q, const Flag &flag, int nbIndex){
if (flag!=FLUID){
if (flag==NO_SLIP){
// half-way bounce back
pdf[nbIndex+18-q] = pdf[index+q];
}
else if (flag==MOVING_WALL){
// half-way bounce back + moving wall acceleration (only x-direction for wall supported at the moment)
pdf[nbIndex+18-q] = pdf[index+q] - 6.0*_W[q]*_density[index/19]*(_C[q][0]*_wallVelocity[0]+_C[q][1]*_wallVelocity[1]+_C[q][2]*_wallVelocity[2]);
}
else if (flag==PERIODIC){
// periodic treatment
int target[3] = {x,y,z};
if (target[0]+_C[q][0]==0){target[0] = _domainSizeX+1;} else if (target[0]+_C[q][0]==_domainSizeX+1){target[0] = 0;}
if (target[1]+_C[q][1]==0){target[1] = _domainSizeY+1;} else if (target[1]+_C[q][1]==_domainSizeY+1){target[1] = 0;}
if (target[2]+_C[q][2]==0){target[2] = _domainSizeZ+1;} else if (target[2]+_C[q][2]==_domainSizeZ+1){target[2] = 0;}
const int periodicNb = target[0] + (_domainSizeX+2)*(target[1] + (_domainSizeY+2)*target[2]);
pdf[19*periodicNb+q] = pdf[index+q];
}
}
}
/** @brief refers to the LB method; computes density and velocity on pdf
* @param vel velocity
* @param density density
* @param pdf partial distribution function */
void computeDensityAndVelocity(double * const vel, double &density, const double * const pdf){
vel[0] = -(pdf[1]+pdf[5]+pdf[8]+pdf[11]+pdf[15]);
density= pdf[3]+pdf[7]+pdf[10]+pdf[13]+pdf[17];
vel[1] = (pdf[4]+pdf[11]+pdf[12]+pdf[13]+pdf[18]) - (pdf[0]+pdf[5]+pdf[6]+pdf[7]+pdf[14]);
vel[0] = density + vel[0];
density= density + pdf[0]+pdf[1]+pdf[2] + pdf[4]+pdf[5]+pdf[6] + pdf[8]+pdf[9] + pdf[11]+pdf[12] + pdf[14]+pdf[15]+pdf[16] + pdf[18];
vel[2] = (pdf[14]+pdf[15]+pdf[16]+pdf[17]+pdf[18]) - (pdf[0]+pdf[1]+pdf[2]+pdf[3]+pdf[4]);
vel[0] = vel[0]/density;
vel[1] = vel[1]/density;
vel[2] = vel[2]/density;
}
/** takes care of communication across one face in one direction.
* @param pdf partial distribution function
* @param sendBuffer send buffer
* @param recvBuffer receive buffer
* @param nbFlagTo direction into which message is sent
* @param nbFlagFrom direction from which message is received
* @param startSend 3d coordinates that define the start of the data to be sent to neighbouring process
* @param endSend 3d coordinates that define the end of the data to to be sent to neighbouring process
* @param startRecv 3d coordinates that define the start of the data to be received from neighbouring process
* @param endRecv 3d coordinates that define the end of the data to be received from neighbouring process */
void communicatePart(double *pdf, double *sendBuffer, double *recvBuffer, NbFlag nbFlagTo, NbFlag nbFlagFrom,
tarch::la::Vector<3,int> startSend, tarch::la::Vector<3,int> endSend,
tarch::la::Vector<3,int> startRecv, tarch::la::Vector<3,int> endRecv){
#if (COUPLING_MD_PARALLEL==COUPLING_MD_YES)
// directions that point to LEFT/RIGHT,... -> same ordering as enums!
const int directions[6][5] = { { 1, 5, 8,11,15}, { 3, 7,10,13,17},
{ 4,11,12,13,18}, { 0, 5, 6, 7,14},
{ 0, 1, 2, 3, 4}, {14,15,16,17,18} };
MPI_Request requests[2];
MPI_Status status[2];
tarch::la::Vector<2,int> plane;
tarch::la::Vector<2,int> domainSize;
// find out plane coordinates
if (nbFlagTo==LEFT || nbFlagTo==RIGHT){plane[0]=1; plane[1]=2; domainSize[0] = _domainSizeY; domainSize[1] = _domainSizeZ;
} else if (nbFlagTo==FRONT|| nbFlagTo==BACK) {plane[0]=0; plane[1]=2; domainSize[0] = _domainSizeX; domainSize[1] = _domainSizeZ;
} else if (nbFlagTo==TOP || nbFlagTo==BOTTOM){plane[0]=0; plane[1]=1; domainSize[0] = _domainSizeX; domainSize[1] = _domainSizeY; }
else { std::cout << "ERROR LBCouetteSolver::communicatePart: d >2 or d < 0!" << std::endl; exit(EXIT_FAILURE);}
// extract data and write to send buffer
tarch::la::Vector<3,int> coords(0);
for (coords[2] = startSend[2]; coords[2] < endSend[2]; coords[2]++){
for (coords[1] = startSend[1]; coords[1] < endSend[1]; coords[1]++){
for (coords[0] = startSend[0]; coords[0] < endSend[0]; coords[0]++){
for (int q = 0; q < 5; q++){
sendBuffer[q+5*getParBuf(coords[plane[0]],coords[plane[1]],domainSize[0],domainSize[1])] = pdf[directions[nbFlagTo][q]+19*get(coords[0],coords[1],coords[2])];
}
}}}
// send and receive data
MPI_Irecv(recvBuffer,(domainSize[0]+2)*(domainSize[1]+2)*5,MPI_DOUBLE,_parallelNeighbours[nbFlagFrom],1000,MPI_COMM_WORLD,&requests[0]);
MPI_Isend(sendBuffer,(domainSize[0]+2)*(domainSize[1]+2)*5,MPI_DOUBLE,_parallelNeighbours[nbFlagTo], 1000,MPI_COMM_WORLD,&requests[1]);
MPI_Waitall(2,requests,status);
// write data back to pdf field
if (_parallelNeighbours[nbFlagFrom]!=MPI_PROC_NULL){
for (coords[2] = startRecv[2]; coords[2] < endRecv[2]; coords[2]++){
for (coords[1] = startRecv[1]; coords[1] < endRecv[1]; coords[1]++){
for (coords[0] = startRecv[0]; coords[0] < endRecv[0]; coords[0]++){
for (int q = 0; q < 5; q++){
if (_flag[get(coords[0],coords[1],coords[2])] == PARALLEL_BOUNDARY){
pdf[directions[nbFlagTo][q]+19*get(coords[0],coords[1],coords[2])] = recvBuffer[q+5*getParBuf(coords[plane[0]],coords[plane[1]],domainSize[0],domainSize[1])];
}
}
}}}
}
#endif
}
/** @brief comunicates the boundary field data between the different processes */
void communicate(){
#if (COUPLING_MD_PARALLEL==COUPLING_MD_YES)
// send from right to left
communicatePart(_pdf1,_sendBufferX,_recvBufferX,LEFT,RIGHT,
tarch::la::Vector<3,int>(1,1,1), tarch::la::Vector<3,int>(2,_domainSizeY+1,_domainSizeZ+1),
tarch::la::Vector<3,int>(_domainSizeX+1,1,1),tarch::la::Vector<3,int>(_domainSizeX+2,_domainSizeY+1,_domainSizeZ+1));
// send from left to right
communicatePart(_pdf1,_sendBufferX,_recvBufferX,RIGHT,LEFT,
tarch::la::Vector<3,int>(_domainSizeX,1,1),tarch::la::Vector<3,int>(_domainSizeX+1,_domainSizeY+1,_domainSizeZ+1),
tarch::la::Vector<3,int>(0,1,1), tarch::la::Vector<3,int>(1,_domainSizeY+1,_domainSizeZ+1));
// send from back to front
communicatePart(_pdf1,_sendBufferY,_recvBufferY,FRONT,BACK,
tarch::la::Vector<3,int>(0,1,1), tarch::la::Vector<3,int>(_domainSizeX+2,2,_domainSizeZ+1),
tarch::la::Vector<3,int>(0,_domainSizeY+1,1), tarch::la::Vector<3,int>(_domainSizeX+2,_domainSizeY+2,_domainSizeZ+1));
// send from front to back
communicatePart(_pdf1,_sendBufferY,_recvBufferY,BACK,FRONT,
tarch::la::Vector<3,int>(0,_domainSizeY,1), tarch::la::Vector<3,int>(_domainSizeX+2,_domainSizeY+1,_domainSizeZ+1),
tarch::la::Vector<3,int>(0,0,1), tarch::la::Vector<3,int>(_domainSizeX+2,1,_domainSizeZ+1));
// send from top to bottom
communicatePart(_pdf1,_sendBufferZ,_recvBufferZ,BOTTOM,TOP,
tarch::la::Vector<3,int>(0,0,1), tarch::la::Vector<3,int>(_domainSizeX+2,_domainSizeY+2,2),
tarch::la::Vector<3,int>(0,0,_domainSizeZ+1), tarch::la::Vector<3,int>(_domainSizeX+2,_domainSizeY+2,_domainSizeZ+2));
// send from bottom to top
communicatePart(_pdf1,_sendBufferZ,_recvBufferZ,TOP,BOTTOM,
tarch::la::Vector<3,int>(0,0,_domainSizeZ), tarch::la::Vector<3,int>(_domainSizeX+2,_domainSizeY+2,_domainSizeZ+1),
tarch::la::Vector<3,int>(0,0,0), tarch::la::Vector<3,int>(_domainSizeX+2,_domainSizeY+2,1));
#endif
}
/** @brief relaxation frequency */
const double _omega;
/** @brief velocity of moving wall of Couette flow */
tarch::la::Vector<3,double> _wallVelocity;
/** @brief partical distribution function field */
double *_pdf1{NULL};
/** @brief partial distribution function field (stores the old time step)*/
double *_pdf2{NULL};
/** @brief lattice velocities*/
const int _C[19][3]{{ 0,-1,-1}, {-1, 0,-1}, { 0, 0,-1}, { 1, 0,-1}, { 0, 1,-1},
{-1,-1, 0}, { 0,-1, 0}, { 1,-1, 0}, {-1, 0, 0}, { 0, 0, 0}, { 1, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, { 1, 1, 0},
{ 0,-1, 1}, {-1, 0, 1}, { 0, 0, 1}, { 1, 0, 1}, { 0, 1, 1}};
/** @brief lattice weights */
const double _W[19]{1.0/36.0, 1.0/36.0, 1.0/18.0, 1.0/36.0, 1.0/36.0,
1.0/36.0, 1.0/18.0, 1.0/36.0, 1.0/18.0, 1.0/ 3.0, 1.0/18.0, 1.0/36.0, 1.0/18.0, 1.0/36.0,
1.0/36.0, 1.0/36.0, 1.0/18.0, 1.0/36.0, 1.0/36.0};
};
#endif // _MOLECULARDYNAMICS_COUPLING_SOLVERS_LBCOUETTESOLVER_H_
|
dgemm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgemm.c, normal z -> d, Fri Sep 28 17:38:01 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gemm
*
* Performs one of the matrix-matrix operations
*
* \f[ C = \alpha [op( A )\times op( B )] + \beta C, \f]
*
* where op( X ) is one of:
* \f[ op( X ) = X, \f]
* \f[ op( X ) = X^T, \f]
* \f[ op( X ) = X^T, \f]
*
* alpha and beta are scalars, and A, B and C are matrices, with op( A )
* an m-by-k matrix, op( B ) a k-by-n matrix and C an m-by-n matrix.
*
*******************************************************************************
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] transb
* - PlasmaNoTrans: B is not transposed,
* - PlasmaTrans: B is transposed,
* - PlasmaConjTrans: B is conjugate transposed.
*
* @param[in] m
* The number of rows of the matrix op( A ) and of the matrix C.
* m >= 0.
*
* @param[in] n
* The number of columns of the matrix op( B ) and of the matrix C.
* n >= 0.
*
* @param[in] k
* The number of columns of the matrix op( A ) and the number of rows
* of the matrix op( B ). k >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* An lda-by-ka matrix, where ka is k when transa = PlasmaNoTrans,
* and is m otherwise.
*
* @param[in] lda
* The leading dimension of the array A.
* When transa = PlasmaNoTrans, lda >= max(1,m),
* otherwise, lda >= max(1,k).
*
* @param[in] pB
* An ldb-by-kb matrix, where kb is n when transb = PlasmaNoTrans,
* and is k otherwise.
*
* @param[in] ldb
* The leading dimension of the array B.
* When transb = PlasmaNoTrans, ldb >= max(1,k),
* otherwise, ldb >= max(1,n).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] pC
* An ldc-by-n matrix. On exit, the array is overwritten by the m-by-n
* matrix ( alpha*op( A )*op( B ) + beta*C ).
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_dgemm
* @sa plasma_cgemm
* @sa plasma_dgemm
* @sa plasma_sgemm
*
******************************************************************************/
int plasma_dgemm(plasma_enum_t transa, plasma_enum_t transb,
int m, int n, int k,
double alpha, double *pA, int lda,
double *pB, int ldb,
double beta, double *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
return -1;
}
if ((transb != PlasmaNoTrans) &&
(transb != PlasmaTrans) &&
(transb != PlasmaConjTrans)) {
plasma_error("illegal value of transb");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
if (k < 0) {
plasma_error("illegal value of k");
return -5;
}
int am, an;
int bm, bn;
if (transa == PlasmaNoTrans) {
am = m;
an = k;
}
else {
am = k;
an = m;
}
if (transb == PlasmaNoTrans) {
bm = k;
bn = n;
}
else {
bm = n;
bn = k;
}
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -8;
}
if (ldb < imax(1, bm)) {
plasma_error("illegal value of ldb");
return -10;
}
if (ldc < imax(1, m)) {
plasma_error("illegal value of ldc");
return -13;
}
// quick return
if (m == 0 || n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gemm(plasma, PlasmaRealDouble, m, n, k);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
am, an, 0, 0, am, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
bm, bn, 0, 0, bm, bn, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
plasma_omp_dge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_dgemm(transa, transb,
alpha, A,
B,
beta, C,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_gemm
*
* Performs matrix multiplication.
* Non-blocking tile version of plasma_dgemm().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] transb
* - PlasmaNoTrans: B is not transposed,
* - PlasmaTrans: B is transposed,
* - PlasmaConjTrans: B is conjugate transposed.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] B
* Descriptor of matrix B.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* Descriptor of matrix C.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dgemm
* @sa plasma_omp_cgemm
* @sa plasma_omp_dgemm
* @sa plasma_omp_sgemm
*
******************************************************************************/
void plasma_omp_dgemm(plasma_enum_t transa, plasma_enum_t transb,
double alpha, plasma_desc_t A,
plasma_desc_t B,
double beta, plasma_desc_t C,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((transb != PlasmaNoTrans) &&
(transb != PlasmaTrans) &&
(transb != PlasmaConjTrans)) {
plasma_error("illegal value of transb");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
int k = transa == PlasmaNoTrans ? A.n : A.m;
if (C.m == 0 || C.n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return;
// Call the parallel function.
plasma_pdgemm(transa, transb,
alpha, A,
B,
beta, C,
sequence, request);
}
|
Par-32-ParSectionsAssignments.c |
int main(int argc, char **argv) {
int a[4] = {1,2,3,4};
#pragma omp parallel
{
#pragma omp sections
{
#pragma omp section
{
a[0] = 2;
a[3] = 1;
}
#pragma omp section
{
a[1] = 3;
a[2] = 1;
}
#pragma omp section
{
a[2] = 10;
a[0] = 2;
}
}
}
return 0;
}
|
GB_unaryop__minv_fp64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp64_uint32
// op(A') function: GB_tran__minv_fp64_uint32
// C type: double
// A type: uint32_t
// cast: double cij = (double) aij
// unaryop: cij = 1./aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1./x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp64_uint32
(
double *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
jacobi.c | #include <stdio.h>
#include "jacobi.h"
void jacobistep(double **psinew, double **psi, int m, int n)
{
int i, j;
#pragma omp parallel for default(none), private(i,j), shared(psi,psinew,m,n)
for(i=1;i<=m;i++)
{
for(j=1;j<=n;j++)
{
psinew[i][j]=0.25*(psi[i-1][j]+psi[i+1][j]+psi[i][j-1]+psi[i][j+1]);
}
}
}
void jacobistepvort(double **zetnew, double **psinew,
double **zet, double **psi,
int m, int n, double re)
{
int i, j;
#pragma omp parallel for default(none), \
private(i,j), shared(psi,psinew,zet,m,n)
for(i=1;i<=m;i++)
{
for(j=1;j<=n;j++)
{
psinew[i][j]=0.25*( psi[i-1][j]+psi[i+1][j]+psi[i][j-1]+psi[i][j+1]
- zet[i][j] );
}
}
#pragma omp parallel for default(none), \
private(i,j), shared(zet,zetnew,psi,m,n,re)
for(i=1;i<=m;i++)
{
for(j=1;j<=n;j++)
{
zetnew[i][j]=0.25*(zet[i-1][j]+zet[i+1][j]+zet[i][j-1]+zet[i][j+1])
- re/16.0*(
( psi[i][j+1]-psi[i][j-1])*(zet[i+1][j]-zet[i-1][j])
- (psi[i+1][j]-psi[i-1][j])*(zet[i][j+1]-zet[i][j-1])
);
}
}
}
double deltasq(double **newarr, double **oldarr, int m, int n)
{
int i, j;
double dsq=0.0;
double tmp;
#pragma omp parallel for default(none), \
private(i,j,tmp), shared(newarr,oldarr,m,n), reduction(+:dsq)
for(i=1;i<=m;i++)
{
for(j=1;j<=n;j++)
{
tmp = newarr[i][j]-oldarr[i][j];
dsq += tmp*tmp;
}
}
return dsq;
}
|
jacobi-omp5.c | /*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file jacobi.c
* @author Alessandro Capotondi
* @date 27 Mar 2020
* @brief This code solves the steady state heat equation on a rectangular region.
* This code solves the steady state heat equation on a rectangular region.
* The sequential version of this program needs approximately
* 18/epsilon iterations to complete.
* The physical region, and the boundary conditions, are suggested
* by this diagram;
* W = 0
* +------------------+
* | |
* W = 100 | | W = 100
* | |
* +------------------+
* W = 100
* The region is covered with a grid of M by N nodes, and an N by N
* array W is used to record the temperature. The correspondence between
* array indices and locations in the region is suggested by giving the
* indices of the four corners:
* I = 0
* [0][0]-------------[0][N-1]
* | |
* J = 0 | | J = N-1
* | |
* [M-1][0]-----------[M-1][N-1]
* I = M-1
* The steady state solution to the discrete heat equation satisfies the
* following condition at an interior grid point:
* W[Central] = (1/4) * ( W[North] + W[South] + W[East] + W[West] )
* where "Central" is the index of the grid point, "North" is the index
* of its immediate neighbor to the "north", and so on.
*
* Given an approximate solution of the steady state heat equation, a
* "better" solution is given by replacing each interior point by the
* average of its 4 neighbors - in other words, by using the condition
* as an ASSIGNMENT statement:
* W[Central] <= (1/4) * ( W[North] + W[South] + W[East] + W[West] )
* If this process is repeated often enough, the difference between successive
* estimates of the solution will go to zero.
* This program carries out such an iteration, using a tolerance specified by
* the user, and writes the final estimate of the solution to a file that can
* be used for graphic processing.
* icensing:
* This code is distributed under the GNU LGPL license.
* odified:
* 18 October 2011
* uthor:
* Original C version by Michael Quinn.
* This C version by John Burkardt.
* eference:
* Michael Quinn,
* Parallel Programming in C with MPI and OpenMP,
* McGraw-Hill, 2004,
* ISBN13: 978-0071232654,
* LC: QA76.73.C15.Q55.
* ocal parameters:
* Local, double DIFF, the norm of the change in the solution from one iteration
* to the next.
* Local, double MEAN, the average of the boundary values, used to initialize
* the values of the solution in the interior.
* Local, double U[M][N], the solution at the previous iteration.
* Local, double W[M][N], the solution computed at the latest iteration.
*
*
* @see https://en.wikipedia.org/wiki/Jacobi_method
* @see http://algo.ing.unimo.it/people/andrea/Didattica/HPC/index.html
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "utils.h"
static int N;
static int MAX_ITERATIONS;
static int SEED;
static double CONVERGENCE_THRESHOLD;
static FILE *data;
#define SEPARATOR "------------------------------------\n"
// Return the current time in seconds since the Epoch
double get_timestamp();
// Parse command line arguments to set solver parameters
void parse_arguments(int argc, char *argv[]);
// Run the Jacobi solver
// Returns the number of iterations performed
int run(double *restrict A, double *restrict xtmp)
{
int iter = 0, iterations_print = 1;
double err = 0.0;
#pragma omp target enter data map(to \
: A [0:N * N]) map(alloc \
: xtmp [0:N * N])
do
{
err = 0.0;
#pragma omp target teams num_teams(N / NTHREADS_GPU) thread_limit(NTHREADS_GPU) map(tofrom \
: err)
#pragma omp distribute parallel for collapse(2) num_threads(NTHREADS_GPU) dist_schedule(static, NTHREADS_GPU) reduction(max \
: err)
for (int i = 1; i < N - 1; i++)
{
for (int j = 1; j < N - 1; j++)
{
xtmp[i * N + j] = 0.25 * (A[(i - 1) * N + j] + A[(i + 1) * N + j] + A[i * N + j - 1] + A[i * N + j + 1]);
err = fmax(err, fabs(xtmp[i * N + j] - A[i * N + j]));
}
}
//#pragma omp target update from(xtmp[0:N*N])
#pragma omp target teams num_teams(N / NTHREADS_GPU) thread_limit(NTHREADS_GPU)
#pragma omp distribute parallel for collapse(2) num_threads(NTHREADS_GPU) dist_schedule(static, NTHREADS_GPU)
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
A[i * N + j] = xtmp[i * N + j];
}
}
iter++;
#ifdef DEBUG
if (iter == iterations_print)
{
printf(" %8d %f\n", iter, err);
iterations_print = 2 * iterations_print;
}
#endif
} while (err > CONVERGENCE_THRESHOLD && iter < MAX_ITERATIONS);
#pragma omp target exit data map(from \
: A [0:N * N]) map(release \
: xtmp)
return iter;
}
int main(int argc, char *argv[])
{
parse_arguments(argc, argv);
double *A = malloc(N * N * sizeof(double));
double *xtmp = malloc(N * N * sizeof(double));
printf(SEPARATOR);
printf("Matrix size: %dx%d\n", N, N);
printf("Maximum iterations: %d\n", MAX_ITERATIONS);
printf("Convergence threshold: %lf\n", CONVERGENCE_THRESHOLD);
printf(SEPARATOR);
for (int ii = 0; ii < N; ii++)
{
for (int jj = 0; jj < N; jj++)
{
double f;
fread(&f, sizeof(double), 1, data);
A[ii * N + jj] = f;
}
}
// Run Jacobi solver
start_timer();
int itr = run(A, xtmp);
stop_timer();
printf("Iterations = %d\n", itr);
printf("Solver runtime = %lf ms\n", elapsed_ns() / 1E6);
if (itr == MAX_ITERATIONS)
printf("WARNING: solution did not converge\n");
printf(SEPARATOR);
free(A);
free(xtmp);
fclose(data);
return 0;
}
int parse_int(const char *str)
{
char *next;
int value = strtoul(str, &next, 10);
return strlen(next) ? -1 : value;
}
double parse_double(const char *str)
{
char *next;
double value = strtod(str, &next);
return strlen(next) ? -1 : value;
}
void parse_arguments(int argc, char *argv[])
{
// Set default values
N = 500;
MAX_ITERATIONS = 2000;
CONVERGENCE_THRESHOLD = 0.001;
SEED = 0;
for (int i = 1; i < argc; i++)
{
if (!strcmp(argv[i], "--convergence") || !strcmp(argv[i], "-c"))
{
if (++i >= argc || (CONVERGENCE_THRESHOLD = parse_double(argv[i])) < 0)
{
printf("Invalid convergence threshold\n");
exit(1);
}
}
else if (!strcmp(argv[i], "--iterations") || !strcmp(argv[i], "-i"))
{
if (++i >= argc || (MAX_ITERATIONS = parse_int(argv[i])) < 0)
{
printf("Invalid number of iterations\n");
exit(1);
}
}
else if (!strcmp(argv[i], "--norder") || !strcmp(argv[i], "-n"))
{
if (++i >= argc || (N = parse_int(argv[i])) < 0)
{
printf("Invalid matrix order\n");
exit(1);
}
}
else if (!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h"))
{
printf("\n");
printf("Usage: ./jacobi [OPTIONS]\n\n");
printf("Options:\n");
printf(" -h --help Print this message\n");
printf(" -c --convergence C Set convergence threshold\n");
printf(" -i --iterations I Set maximum number of iterations\n");
printf(" -n --norder N Set maxtrix order (500 or 1000)\n");
printf("\n");
exit(0);
}
else
{
printf("Unrecognized argument '%s' (try '--help')\n", argv[i]);
exit(1);
}
}
if (N == 1000)
data = fopen("data/jacobi-1000.bin", "rb");
else if (N == 500)
data = fopen("data/jacobi-500.bin", "rb");
else
{
printf("Invalid matrix order\n");
exit(1);
}
}
|
ve_urpc.c | /**
* TODO: add License header
*
* VE specific parts of VE-URPC.
*
* (C)opyright 2020 Erich Focht
*/
#include <stdlib.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <assert.h>
#include <errno.h>
#include <sys/mman.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <pthread.h>
#include <vhshm.h>
#include <vedma.h>
#include "urpc_common.h"
#include "urpc_time.h"
/*
Initialize VH-SHM segment, map it as VEHVA.
*/
static int vhshm_register(urpc_peer_t *up)
{
struct shmid_ds ds;
uint64_t remote_vehva = 0;
void *remote_addr = NULL;
int err = 0;
dprintf("VE: shm_segid = %d\n", up->shm_segid);
//
// attach shared memory VH address space and register it to DMAATB,
// the region is accessible for DMA unter its VEHVA remote_vehva
//
up->shm_addr = vh_shmat(up->shm_segid, NULL, 0, (void **)&up->shm_vehva);
if (up->shm_addr == NULL) {
eprintf("VE: (shm_addr == NULL)\n");
return -ENOMEM;
}
if (up->shm_vehva == (uint64_t)-1) {
eprintf("VE: failed to attach to shm segment %d, shm_vehva=-1\n", up->shm_segid);
return -ENOMEM;
}
return 0;
}
/**
* @brief Pin the thread to a core. In case of OpenMP: pin all threads to consecutive cores.
*
* The VE side process must be pinned to a core because it is not allowed to change the core
* and the DMA descriptor set.
*
* @param core the core of the main thread or thread #0 (for OpenMP)
*/
static void _pin_threads_to_cores(int core)
{
#ifdef _OPENMP
#pragma omp parallel
{
int thr = omp_get_thread_num();
cpu_set_t set;
memset(&set, 0, sizeof(cpu_set_t));
set.__bits[0] = (1 << (thr + core));
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &set);
}
#else
cpu_set_t set;
memset(&set, 0, sizeof(cpu_set_t));
set.__bits[0] = (1 << core);
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &set);
#endif
}
/**
* @brief Unpin the thread(s).
*
* This function is needed when spawning a new pthread inside a VE handler (NEWPEER).
* Without unpinning the new pthread inherits the parent's mask and is scheduled on
* the same core, competing with the parent.
*
*/
void ve_urpc_unpin(void)
{
uint64_t mask = (1 << MAX_VE_CORES) - 1;
#ifdef _OPENMP
#pragma omp parallel
{
int thr = omp_get_thread_num();
cpu_set_t set;
memset(&set, 0, sizeof(cpu_set_t));
set.__bits[0] = mask;
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &set);
}
#else
cpu_set_t set;
memset(&set, 0, sizeof(cpu_set_t));
set.__bits[0] = mask;
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &set);
#endif
}
static void ve_urpc_comm_init(urpc_comm_t *uc)
{
uc->mem[0].begin = 0;
uc->mem[0].end = DATA_BUFF_END;
uc->mem[1].begin = 0;
uc->mem[1].end = 0;
uc->active = &uc->mem[0];
pthread_mutex_init(&uc->lock, NULL);
}
// TODO: add pinning to a VE core!
urpc_peer_t *ve_urpc_init(int segid)
{
int err = 0;
char *e;
long syscall(long n, ...);
pid_t tid = syscall(SYS_gettid);
if (getpid() != tid) {
eprintf("You called ve_urpc_init() inside a forked/cloned thread.\n");
eprintf("VE DMA registration must be called from the main thread!\n");
return NULL;
}
urpc_peer_t *up = (urpc_peer_t *)malloc(sizeof(urpc_peer_t));
if (up == NULL) {
eprintf("unable to allocate urpc_peer struct memory\n");
errno = ENOMEM;
return NULL;
}
//
// shm_segid is either in argument or in environment variable
//
if (segid) {
up->shm_segid = segid;
} else {
if ((e = getenv("URPC_SHM_SEGID")) != NULL)
up->shm_segid = atol(e);
else {
eprintf("env variable URPC_SHM_SEGID not found.\n");
free(up);
errno = ENOENT;
return NULL;
}
}
// find and register shm segment
err = vhshm_register(up);
if (err) {
free(up);
up = NULL;
eprintf("VE: vh_shm_register failed, err=%d.\n", err);
return NULL;
}
up->recv.tq = (transfer_queue_t *)(up->shm_vehva);
up->send.tq = (transfer_queue_t *)(up->shm_vehva + URPC_BUFF_LEN);
up->recv.shm_data_vehva = up->shm_vehva + offsetof(transfer_queue_t, data);
up->send.shm_data_vehva = up->shm_vehva + URPC_BUFF_LEN
+ offsetof(transfer_queue_t, data);
ve_urpc_comm_init(&up->send);
char *buff_base;
uint64_t buff_base_vehva;
size_t align_64mb = 64 * 1024 * 1024;
size_t buff_size = 2 * URPC_BUFF_LEN;
buff_size = (buff_size + align_64mb - 1) & ~(align_64mb - 1);
// allocate read and write buffers in one call
posix_memalign(&up->mirr_buff, align_64mb, buff_size);
if (up->mirr_buff == NULL) {
eprintf("VE: allocating urpc mirror buffer failed! buffsize=%lu\n", buff_size);
errno = ENOMEM;
return NULL;
}
dprintf("ve allocated buff at %p, size=%lu\n", up->mirr_buff, buff_size);
buff_base = (char *)up->mirr_buff;
buff_base_vehva = ve_register_mem_to_dmaatb(buff_base, buff_size);
if (buff_base_vehva == (uint64_t)-1) {
eprintf("VE: mapping urpc mirror buffer failed! buffsize=%lu\n", buff_size);
errno = ENOMEM;
return NULL;
}
dprintf("ve_register_mem_to_dmaatb succeeded for %p\n", buff_base);
up->recv.mirr_data_buff = buff_base + offsetof(transfer_queue_t, data);
up->send.mirr_data_buff = buff_base + URPC_BUFF_LEN
+ offsetof(transfer_queue_t, data);
up->recv.mirr_data_vehva = buff_base_vehva + offsetof(transfer_queue_t, data);
up->send.mirr_data_vehva = buff_base_vehva + URPC_BUFF_LEN
+ offsetof(transfer_queue_t, data);
// initialize handler table
for (int i = 0; i <= URPC_MAX_HANDLERS; i++)
up->handler[i] = NULL;
urpc_run_handler_init_hooks(up);
// don't remove this
up->core = -1;
return up;
}
/**
* We need this split iof init because VE-SHM functions can only be done in the main thread.
*/
int ve_urpc_init_dma(urpc_peer_t *up, int core)
{
char *e;
// pinning to VE core must happen before initializing UDMA
if (core < 0 && (e = getenv("URPC_VE_CORE")) != NULL) {
core = atoi(e);
}
if (core >= 0)
_pin_threads_to_cores(core);
up->core = core;
// Initialize DMA
int err = ve_dma_init();
if (err) {
eprintf("Failed to initialize DMA\n");
return -1;
}
return 0;
}
void ve_urpc_fini(urpc_peer_t *up)
{
int err;
// unregister local buffer from DMAATB
err = ve_unregister_mem_from_dmaatb(up->recv.mirr_data_vehva -
offsetof(transfer_queue_t, data));
if (err)
eprintf("VE: Failed to unregister local buffer from DMAATB\n");
// free the mirror buffer
free(up->mirr_buff);
// detach VH sysV shm segment
if (up->shm_addr) {
err = vh_shmdt(up->shm_addr);
if (err)
eprintf("VE: Failed to detach from VH sysV shm\n");
else {
up->shm_addr = NULL;
up->shm_vehva = 0;
}
}
free(up);
}
/*
Transfer buffer to from SHM area.
The transfer length is smaller that the maximum transfer doable in one
DMA descriptor (<128MiB).
*/
int ve_transfer_data_sync(uint64_t dst_vehva, uint64_t src_vehva, int len)
{
int err;
err = ve_dma_post_wait(dst_vehva, src_vehva, len);
if (err) {
pid_t pid = getpid();
eprintf("DMA encountered exception, rc=0x%x\n", err);
if (err & 0x8000) {
eprintf("memory protection exception\n");
} else if (err & 0x4000) {
eprintf("memory protection exception\n");
} else if (err & 0x2000) {
eprintf("missing space exception\n");
} else if (err & 0x1000) {
eprintf("memory access exception\n");
} else if (err & 0x0800) {
eprintf("I/O access exception\n");
}
eprintf("Sleeping for 40s such that you can attach a debugger.\n");
eprintf("Command: /opt/nec/ve/bin/gdb -p %d\n", pid);
sleep(40);
}
return err;
}
/*
URPC progress function.
Process at most 'ncmds' requests from the RECV communicator.
Return number of requests processed, -1 if error
*/
int ve_urpc_recv_progress(urpc_peer_t *up, int ncmds)
{
int64_t req, dhq_req;
int done = 0;
urpc_mb_t m;
urpc_comm_t *uc = &up->recv;
transfer_queue_t *tq = uc->tq;
urpc_handler_func func = NULL;
void *payload;
size_t plen;
int err;
while (done < ncmds) {
int64_t req = urpc_get_cmd(tq, &m);
if (req < 0)
break;
//
// set/receive payload, if needed
//
set_recv_payload(uc, &m, &payload, &plen);
//
// call handler
//
func = up->handler[m.c.cmd];
if (func) {
err = func(up, &m, req, payload, plen);
if (err)
eprintf("Warning: RPC handler %d returned %d\n",
m.c.cmd, err);
}
urpc_slot_done(tq, REQ2SLOT(req), &m);
++done;
}
return done;
}
/*
Progress loop with timeout.
*/
int ve_urpc_recv_progress_timeout(urpc_peer_t *up, int ncmds, long timeout_us)
{
long done_ts = 0;
do {
int done = ve_urpc_recv_progress(up, ncmds);
if (done == 0) {
if (done_ts == 0)
done_ts = get_time_us();
} else
done_ts = 0;
} while (done_ts == 0 || timediff_us(done_ts) < timeout_us);
}
|
profiler_interface.h | /*
# =============================================================================
# Copyright (c) 2016 - 2021 Blue Brain Project/EPFL
#
# See top-level LICENSE file for details.
# =============================================================================
*/
#pragma once
#include <initializer_list>
#include <type_traits>
#if defined(CORENEURON_CALIPER)
#include <caliper/cali.h>
#endif
#if defined(CORENEURON_CUDA_PROFILING)
#include <cuda_profiler_api.h>
#endif
#if defined(CRAYPAT)
#include <pat_api.h>
#endif
#if defined(TAU)
#include <TAU.h>
#endif
#if defined(LIKWID_PERFMON)
#include <likwid.h>
#endif
namespace coreneuron {
namespace detail {
/*! \class Instrumentor
* \brief Instrumentation infrastructure for benchmarking and profiling.
*
* The Instrumentor class exposes static methods that can be used to
* toggle with fine-grained resolution the profiling of specific
* areas within the code.
*/
template <class... TProfilerImpl>
struct Instrumentor {
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-value"
/*! \fn phase_begin
* \brief Activate the collection of profiling data within a code region.
*
* This function semantically defines the beginning of a region
* of code that the user wishes to profile.
* Loops through all enabled profilers and calls the relevant
* `phase_begin` function.
* This function should have a non-empty implementation only for
* profilers that allow multiple code regions with different names
* to be profiled concurrently.
*
* @param name the (unique) identifier of the code region to be profiled
*/
inline static void phase_begin(const char* name) {
std::initializer_list<int>{(TProfilerImpl::phase_begin(name), 0)...};
}
/*! \fn phase_end
* \brief Deactivate the collection of profiling data within a code region.
*
* This function semantically defines the end of a region
* of code that the user wishes to profile.
* Loops through all enabled profilers and calls the relevant
* `phase_end` function.
* This function should have a non-empty implementation only for
* profilers that allow multiple code regions with different names
* to be profiled concurrently.
*
* @param name the (unique) identifier of the code region to be profiled
*/
inline static void phase_end(const char* name) {
std::initializer_list<int>{(TProfilerImpl::phase_end(name), 0)...};
}
/*! \fn start_profile
* \brief Globally activate the collection of profiling data.
*
* Activate the collection of profiler data without defining
* a region of interest with a given name, as opposed to `phase_begin`.
* Loops through all enabled profilers and calls the relevant
* `start_profile` function.
* This function should have a non-empty implementation only for
* profilers that expose simply a global begin/end interface, without
* named regions.
*/
inline static void start_profile() {
std::initializer_list<int>{(TProfilerImpl::start_profile(), 0)...};
}
/*! \fn stop_profile
* \brief Globally deactivate the collection of profiling data.
*
* Deactivate the collection of profiler data without defining
* a region of interest with a given name, as opposed to `phase_end`.
* Loops through all enabled profilers and calls the relevant
* `stop_profile` function.
* This function should have a non-empty implementation only for
* profilers that expose simply a global begin/end interface, without
* named regions.
*/
inline static void stop_profile() {
std::initializer_list<int>{(TProfilerImpl::stop_profile(), 0)...};
}
/*! \fn init_profile
* \brief Initialize the profiler.
*
* Initialize a profiler's internal structure, without activating yet
* any data collection, similar in concept to MPI_Init.
* Loops through all enabled profilers and calls the relevant
* `init_profile` function.
* This function should have a non-empty implementation only for
* profilers that require special initialization, typically before
* any memory allocation is done.
*/
inline static void init_profile() {
std::initializer_list<int>{(TProfilerImpl::init_profile(), 0)...};
}
/*! \fn finalize_profile
* \brief Finalize the profiler.
*
* Finalize a profiler's internal structure, without activating yet
* any data collection, similar in concept to MPI_Finalize.
* Loops through all enabled profilers and calls the relevant
* `finalize_profile` function.
* This function should have a non-empty implementation only for
* profilers that require special finalization.
*/
inline static void finalize_profile() {
std::initializer_list<int>{(TProfilerImpl::finalize_profile(), 0)...};
}
#pragma clang diagnostic pop
};
#if defined(CORENEURON_CALIPER)
struct Caliper {
inline static void phase_begin(const char* name) {
CALI_MARK_BEGIN(name);
};
inline static void phase_end(const char* name) {
CALI_MARK_END(name);
};
inline static void start_profile(){};
inline static void stop_profile(){};
inline static void init_profile(){};
inline static void finalize_profile(){};
};
#endif
#if defined(CORENEURON_CUDA_PROFILING)
struct CudaProfiling {
inline static void phase_begin(const char* name){};
inline static void phase_end(const char* name){};
inline static void start_profile() {
cudaProfilerStart();
};
inline static void stop_profile() {
cudaProfilerStop();
};
inline static void init_profile(){};
inline static void finalize_profile(){};
};
#endif
#if defined(CRAYPAT)
struct CrayPat {
inline static void phase_begin(const char* name){};
inline static void phase_end(const char* name){};
inline static void start_profile() {
PAT_record(PAT_STATE_ON);
};
inline static void stop_profile() {
PAT_record(PAT_STATE_OFF);
};
inline static void init_profile(){};
inline static void finalize_profile(){};
};
#endif
#if defined(TAU)
struct Tau {
inline static void phase_begin(const char* name){};
inline static void phase_end(const char* name){};
inline static void start_profile() {
TAU_ENABLE_INSTRUMENTATION();
};
inline static void stop_profile() {
TAU_DISABLE_INSTRUMENTATION();
};
inline static void init_profile(){};
inline static void finalize_profile(){};
};
#endif
#if defined(LIKWID_PERFMON)
struct Likwid {
inline static void phase_begin(const char* name) {
LIKWID_MARKER_START(name);
};
inline static void phase_end(const char* name) {
LIKWID_MARKER_STOP(name);
};
inline static void start_profile(){};
inline static void stop_profile(){};
inline static void init_profile() {
LIKWID_MARKER_INIT;
#pragma omp parallel
{ LIKWID_MARKER_THREADINIT; }
};
inline static void finalize_profile() {
LIKWID_MARKER_CLOSE;
};
};
#endif
struct NullInstrumentor {
inline static void phase_begin(const char* name){};
inline static void phase_end(const char* name){};
inline static void start_profile(){};
inline static void stop_profile(){};
inline static void init_profile(){};
inline static void finalize_profile(){};
};
using InstrumentorImpl = detail::Instrumentor<
#if defined CORENEURON_CALIPER
detail::Caliper,
#endif
#if defined(CORENEURON_CUDA_PROFILING)
detail::CudaProfiling,
#endif
#if defined(CRAYPAT)
detail::CrayPat,
#endif
#if defined(TAU)
detail::Tau,
#endif
#if defined(LIKWID_PERFMON)
detail::Likwid,
#endif
detail::NullInstrumentor>;
} // namespace detail
namespace Instrumentor {
struct phase {
const char* phase_name;
phase(const char* name)
: phase_name(name) {
detail::InstrumentorImpl::phase_begin(phase_name);
}
~phase() {
detail::InstrumentorImpl::phase_end(phase_name);
}
};
inline static void start_profile() {
detail::InstrumentorImpl::start_profile();
}
inline static void stop_profile() {
detail::InstrumentorImpl::stop_profile();
}
inline static void phase_begin(const char* name) {
detail::InstrumentorImpl::phase_begin(name);
}
inline static void phase_end(const char* name) {
detail::InstrumentorImpl::phase_end(name);
}
inline static void init_profile() {
detail::InstrumentorImpl::init_profile();
}
inline static void finalize_profile() {
detail::InstrumentorImpl::finalize_profile();
}
} // namespace Instrumentor
} // namespace coreneuron
|
WaterSurfaceMesh.h | #pragma once
#include <Magnum/GL/Buffer.h>
#include <Magnum/DefaultFramebuffer.h>
#include <Magnum/Image.h>
#include <Magnum/ImageView.h>
#include <Magnum/Math/Color.h>
#include <Magnum/Mesh.h>
#include <Magnum/MeshTools/Compile.h>
#include <Magnum/MeshTools/CompressIndices.h>
#include <Magnum/MeshTools/Interleave.h>
#include <Magnum/PixelFormat.h>
#include <Magnum/Primitives/Cube.h>
#include <Magnum/Primitives/Icosphere.h>
#include <Magnum/Primitives/Plane.h>
#include <Magnum/Primitives/UVSphere.h>
#include <Magnum/GL/Renderer.h>
#include <Magnum/SceneGraph/Camera.h>
#include <Magnum/SceneGraph/Drawable.h>
#include <Magnum/SceneGraph/MatrixTransformation3D.h>
#include <Magnum/SceneGraph/Scene.h>
#include <Magnum/GL/Shader.h>
#include <Magnum/Shaders/Flat.h>
#include <Magnum/Shaders/Generic.h>
#include <Magnum/Shaders/MeshVisualizer.h>
#include <Magnum/Shaders/Phong.h>
#include <Magnum/Shaders/VertexColor.h>
#include <Magnum/GL/Texture.h>
#include <Magnum/GL/TextureFormat.h>
#include <Magnum/Trade/MeshData3D.h>
#include <iostream>
#include "../base/SceneBase3D.h"
#include "WaterSurfaceShader.h"
#include "../../ProfileBuffer.h"
namespace Magnum {
class WaterSurfaceMesh : public SceneBase3D::Object3D, public SceneGraph::Drawable3D {
public:
struct VertexData {
Vector3 position;
Math::Vector<DIR_NUM, Float> amplitude;
};
public:
explicit WaterSurfaceMesh(SceneBase3D::Object3D * parent,
SceneGraph::DrawableGroup3D *group, int n);
public:
template <class Fun> void setVertices(Fun fun) {
// std::vector<VertexData> newData = _data;
#pragma omp parallel for
for (size_t i = 0; i < _data.size(); i++) {
fun(i, _data[i]);
}
bindBuffers(_data);
}
void loadProfile(WaterWavelets::ProfileBuffer const& profileBuffer);
void showTriangulationToggle();
protected:
void bindBuffers(std::vector<VertexData> const &data);
void bindTexture();
private:
void draw(const Matrix4 & transformationMatrix,
SceneGraph::Camera3D &camera) override;
public:
GL::Mesh _mesh;
GL::Buffer _vertexBuffer, _indexBuffer;
bool _showTriangulation = false;
Shaders::WaterSurfaceShader _shader;
std::vector<VertexData> _data;
std::vector<UnsignedInt> _indices;
GL::Texture1D _profileTexture;
};
} // namespace Magnum
|
rwpng.c | /*---------------------------------------------------------------------------
pngquant: RGBA -> RGBA-palette quantization program rwpng.c
---------------------------------------------------------------------------
© 1998-2000 by Greg Roelofs.
© 2009-2014 by Kornel Lesiński.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "png.h"
#include "rwpng.h"
#if USE_LCMS
#include "lcms2.h"
#endif
#ifndef Z_BEST_COMPRESSION
#define Z_BEST_COMPRESSION 9
#endif
#ifndef Z_BEST_SPEED
#define Z_BEST_SPEED 1
#endif
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#endif
#if PNG_LIBPNG_VER < 10600
typedef png_const_charp png_const_bytep;
#endif
static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg);
static void rwpng_warning_stderr_handler(png_structp png_ptr, png_const_charp msg);
static void rwpng_warning_silent_handler(png_structp png_ptr, png_const_charp msg);
int rwpng_read_image24_cocoa(FILE *infile, png24_image *mainprog_ptr);
void rwpng_version_info(FILE *fp)
{
const char *pngver = png_get_header_ver(NULL);
#if USE_COCOA
fprintf(fp, " Using Apple Cocoa image reader and libpng %s.\n", pngver);
#elif USE_LCMS
fprintf(fp, " Using libpng %s with Little CMS color profile support.\n", pngver);
#else
fprintf(fp, " Using libpng %s.\n", pngver);
#endif
#if PNG_LIBPNG_VER < 10600
if (strcmp(pngver, "1.3.") < 0) {
fputs("\nWARNING: Your version of libpng is outdated and may produce corrupted files.\n"
"Please recompile pngquant with the current version of libpng (1.6 or later).\n", fp);
} else if (strcmp(pngver, "1.6.") < 0) {
#if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED)
fputs("\nWARNING: Your version of libpng is old and has buggy support for custom chunks.\n"
"Please recompile pngquant with the current version of libpng (1.6 or later).\n", fp);
#endif
}
#endif
}
struct rwpng_read_data {
FILE *const fp;
png_size_t bytes_read;
};
static void user_read_data(png_structp png_ptr, png_bytep data, png_size_t length)
{
struct rwpng_read_data *read_data = (struct rwpng_read_data *)png_get_io_ptr(png_ptr);
png_size_t read = fread(data, 1, length, read_data->fp);
if (!read) {
png_error(png_ptr, "Read error");
}
read_data->bytes_read += read;
}
struct rwpng_write_state {
FILE *outfile;
png_size_t maximum_file_size;
png_size_t bytes_written;
pngquant_error retval;
};
static void user_write_data(png_structp png_ptr, png_bytep data, png_size_t length)
{
struct rwpng_write_state *write_state = (struct rwpng_write_state *)png_get_io_ptr(png_ptr);
if (SUCCESS != write_state->retval) {
return;
}
if (write_state->maximum_file_size && write_state->bytes_written + length > write_state->maximum_file_size) {
write_state->retval = TOO_LARGE_FILE;
}
if (!fwrite(data, 1, length, write_state->outfile)) {
write_state->retval = CANT_WRITE_ERROR;
}
write_state->bytes_written += length;
}
static void user_flush_data(png_structp png_ptr)
{
// libpng never calls this :(
}
static png_bytepp rwpng_create_row_pointers(png_infop info_ptr, png_structp png_ptr, unsigned char *base, unsigned int height, unsigned int rowbytes)
{
if (!rowbytes) {
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
}
png_bytepp row_pointers = malloc(height * sizeof(row_pointers[0]));
if (!row_pointers) return NULL;
for(unsigned int row = 0; row < height; ++row) {
row_pointers[row] = base + row * rowbytes;
}
return row_pointers;
}
static int read_chunk_callback(png_structp png_ptr, png_unknown_chunkp in_chunk)
{
if (0 == memcmp("iCCP", in_chunk->name, 5) ||
0 == memcmp("cHRM", in_chunk->name, 5) ||
0 == memcmp("gAMA", in_chunk->name, 5)) {
return 0; // not handled
}
struct rwpng_chunk **head = (struct rwpng_chunk **)png_get_user_chunk_ptr(png_ptr);
struct rwpng_chunk *chunk = malloc(sizeof(struct rwpng_chunk));
memcpy(chunk->name, in_chunk->name, 5);
chunk->size = in_chunk->size;
chunk->location = in_chunk->location;
chunk->data = in_chunk->size ? malloc(in_chunk->size) : NULL;
if (in_chunk->size) {
memcpy(chunk->data, in_chunk->data, in_chunk->size);
}
chunk->next = *head;
*head = chunk;
return 1; // marks as "handled", libpng won't store it
}
/*
retval:
0 = success
21 = bad sig
22 = bad IHDR
24 = insufficient memory
25 = libpng error (via longjmp())
26 = wrong PNG color type (no alpha channel)
*/
pngquant_error rwpng_read_image24_libpng(FILE *infile, png24_image *mainprog_ptr, int verbose)
{
png_structp png_ptr = NULL;
png_infop info_ptr = NULL;
png_size_t rowbytes;
int color_type, bit_depth;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr,
rwpng_error_handler, verbose ? rwpng_warning_stderr_handler : rwpng_warning_silent_handler);
if (!png_ptr) {
return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */
}
info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_read_struct(&png_ptr, NULL, NULL);
return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */
}
/* setjmp() must be called in every function that calls a non-trivial
* libpng function */
if (setjmp(mainprog_ptr->jmpbuf)) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return LIBPNG_FATAL_ERROR; /* fatal libpng error (via longjmp()) */
}
#if PNG_LIBPNG_VER >= 10500 && defined(PNG_UNKNOWN_CHUNKS_SUPPORTED)
/* copy standard chunks too */
png_set_keep_unknown_chunks(png_ptr, PNG_HANDLE_CHUNK_IF_SAFE, (png_const_bytep)"pHYs\0iTXt\0tEXt\0zTXt", 4);
#endif
png_set_read_user_chunk_fn(png_ptr, &mainprog_ptr->chunks, read_chunk_callback);
struct rwpng_read_data read_data = {infile, 0};
png_set_read_fn(png_ptr, &read_data, user_read_data);
png_read_info(png_ptr, info_ptr); /* read all PNG info up to image data */
/* alternatively, could make separate calls to png_get_image_width(),
* etc., but want bit_depth and color_type for later [don't care about
* compression_type and filter_type => NULLs] */
png_get_IHDR(png_ptr, info_ptr, &mainprog_ptr->width, &mainprog_ptr->height,
&bit_depth, &color_type, NULL, NULL, NULL);
/* expand palette images to RGB, low-bit-depth grayscale images to 8 bits,
* transparency chunks to full alpha channel; strip 16-bit-per-sample
* images to 8 bits per sample; and convert grayscale to RGB[A] */
/* GRR TO DO: preserve all safe-to-copy ancillary PNG chunks */
if (!(color_type & PNG_COLOR_MASK_ALPHA)) {
#ifdef PNG_READ_FILLER_SUPPORTED
png_set_expand(png_ptr);
png_set_filler(png_ptr, 65535L, PNG_FILLER_AFTER);
#else
fprintf(stderr, "pngquant readpng: image is neither RGBA nor GA\n");
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
mainprog_ptr->retval = 26;
return mainprog_ptr->retval;
#endif
}
if (bit_depth == 16) {
png_set_strip_16(png_ptr);
}
if (!(color_type & PNG_COLOR_MASK_COLOR)) {
png_set_gray_to_rgb(png_ptr);
}
/* get source gamma for gamma correction, or use sRGB default */
double gamma = 0.45455;
if (!png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB)) {
png_get_gAMA(png_ptr, info_ptr, &gamma);
if (gamma < 0 || gamma > 1.0) {
fprintf(stderr, "pngquant readpng: ignored out-of-range gamma %f\n", gamma);
gamma = 0.45455;
}
}
mainprog_ptr->gamma = gamma;
png_set_interlace_handling(png_ptr);
/* all transformations have been registered; now update info_ptr data,
* get rowbytes and channels, and allocate image memory */
png_read_update_info(png_ptr, info_ptr);
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
if ((mainprog_ptr->rgba_data = malloc(rowbytes*mainprog_ptr->height)) == NULL) {
fprintf(stderr, "pngquant readpng: unable to allocate image data\n");
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return PNG_OUT_OF_MEMORY_ERROR;
}
png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0);
/* now we can go ahead and just read the whole image */
png_read_image(png_ptr, row_pointers);
/* and we're done! (png_read_end() can be omitted if no processing of
* post-IDAT text/time/etc. is desired) */
png_read_end(png_ptr, NULL);
#if USE_LCMS
#if PNG_LIBPNG_VER < 10500
png_charp ProfileData;
#else
png_bytep ProfileData;
#endif
png_uint_32 ProfileLen;
cmsHPROFILE hInProfile = NULL;
/* color_type is read from the image before conversion to RGBA */
int COLOR_PNG = color_type & PNG_COLOR_MASK_COLOR;
mainprog_ptr->lcms_status = NONE;
/* embedded ICC profile */
if (png_get_iCCP(png_ptr, info_ptr, &(png_charp){0}, &(int){0}, &ProfileData, &ProfileLen)) {
hInProfile = cmsOpenProfileFromMem(ProfileData, ProfileLen);
cmsColorSpaceSignature colorspace = cmsGetColorSpace(hInProfile);
/* only RGB (and GRAY) valid for PNGs */
if (colorspace == cmsSigRgbData && COLOR_PNG) {
mainprog_ptr->lcms_status = ICCP;
} else {
if (colorspace == cmsSigGrayData && !COLOR_PNG) {
mainprog_ptr->lcms_status = ICCP_WARN_GRAY;
}
cmsCloseProfile(hInProfile);
hInProfile = NULL;
}
}
/* build RGB profile from cHRM and gAMA */
if (hInProfile == NULL && COLOR_PNG &&
!png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB) &&
png_get_valid(png_ptr, info_ptr, PNG_INFO_gAMA) &&
png_get_valid(png_ptr, info_ptr, PNG_INFO_cHRM)) {
cmsCIExyY WhitePoint;
cmsCIExyYTRIPLE Primaries;
png_get_cHRM(png_ptr, info_ptr, &WhitePoint.x, &WhitePoint.y,
&Primaries.Red.x, &Primaries.Red.y,
&Primaries.Green.x, &Primaries.Green.y,
&Primaries.Blue.x, &Primaries.Blue.y);
WhitePoint.Y = Primaries.Red.Y = Primaries.Green.Y = Primaries.Blue.Y = 1.0;
cmsToneCurve *GammaTable[3];
GammaTable[0] = GammaTable[1] = GammaTable[2] = cmsBuildGamma(NULL, 1/gamma);
hInProfile = cmsCreateRGBProfile(&WhitePoint, &Primaries, GammaTable);
cmsFreeToneCurve(GammaTable[0]);
mainprog_ptr->lcms_status = GAMA_CHRM;
}
/* transform image to sRGB colorspace */
if (hInProfile != NULL) {
cmsHPROFILE hOutProfile = cmsCreate_sRGBProfile();
cmsHTRANSFORM hTransform = cmsCreateTransform(hInProfile, TYPE_RGBA_8,
hOutProfile, TYPE_RGBA_8,
INTENT_PERCEPTUAL,
omp_get_max_threads() > 1 ? cmsFLAGS_NOCACHE : 0);
#pragma omp parallel for \
if (mainprog_ptr->height*mainprog_ptr->width > 8000) \
schedule(static)
for (unsigned int i = 0; i < mainprog_ptr->height; i++) {
/* It is safe to use the same block for input and output,
when both are of the same TYPE. */
cmsDoTransform(hTransform, row_pointers[i],
row_pointers[i],
mainprog_ptr->width);
}
cmsDeleteTransform(hTransform);
cmsCloseProfile(hOutProfile);
cmsCloseProfile(hInProfile);
mainprog_ptr->gamma = 0.45455;
}
#endif
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
mainprog_ptr->file_size = read_data.bytes_read;
mainprog_ptr->row_pointers = (unsigned char **)row_pointers;
return SUCCESS;
}
static void rwpng_free_chunks(struct rwpng_chunk *chunk) {
if (!chunk) return;
rwpng_free_chunks(chunk->next);
free(chunk->data);
free(chunk);
}
void rwpng_free_image24(png24_image *image)
{
free(image->row_pointers);
image->row_pointers = NULL;
free(image->rgba_data);
image->rgba_data = NULL;
rwpng_free_chunks(image->chunks);
image->chunks = NULL;
}
void rwpng_free_image8(png8_image *image)
{
free(image->indexed_data);
image->indexed_data = NULL;
free(image->row_pointers);
image->row_pointers = NULL;
rwpng_free_chunks(image->chunks);
image->chunks = NULL;
}
pngquant_error rwpng_read_image24(FILE *infile, png24_image *input_image_p, int verbose)
{
#if USE_COCOA
return rwpng_read_image24_cocoa(infile, input_image_p);
#else
return rwpng_read_image24_libpng(infile, input_image_p, verbose);
#endif
}
static pngquant_error rwpng_write_image_init(rwpng_png_image *mainprog_ptr, png_structpp png_ptr_p, png_infopp info_ptr_p, int fast_compression)
{
/* could also replace libpng warning-handler (final NULL), but no need: */
*png_ptr_p = png_create_write_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr, rwpng_error_handler, NULL);
if (!(*png_ptr_p)) {
return LIBPNG_INIT_ERROR; /* out of memory */
}
*info_ptr_p = png_create_info_struct(*png_ptr_p);
if (!(*info_ptr_p)) {
png_destroy_write_struct(png_ptr_p, NULL);
return LIBPNG_INIT_ERROR; /* out of memory */
}
/* setjmp() must be called in every function that calls a PNG-writing
* libpng function, unless an alternate error handler was installed--
* but compatible error handlers must either use longjmp() themselves
* (as in this program) or exit immediately, so here we go: */
if (setjmp(mainprog_ptr->jmpbuf)) {
png_destroy_write_struct(png_ptr_p, info_ptr_p);
return LIBPNG_INIT_ERROR; /* libpng error (via longjmp()) */
}
png_set_compression_level(*png_ptr_p, fast_compression ? Z_BEST_SPEED : Z_BEST_COMPRESSION);
png_set_compression_mem_level(*png_ptr_p, fast_compression ? 9 : 5); // judging by optipng results, smaller mem makes libpng compress slightly better
return SUCCESS;
}
void rwpng_write_end(png_infopp info_ptr_p, png_structpp png_ptr_p, png_bytepp row_pointers)
{
png_write_info(*png_ptr_p, *info_ptr_p);
png_set_packing(*png_ptr_p);
png_write_image(*png_ptr_p, row_pointers);
png_write_end(*png_ptr_p, NULL);
png_destroy_write_struct(png_ptr_p, info_ptr_p);
}
void rwpng_set_gamma(png_infop info_ptr, png_structp png_ptr, double gamma)
{
/* remap sets gamma to 0.45455 */
png_set_gAMA(png_ptr, info_ptr, gamma);
png_set_sRGB(png_ptr, info_ptr, 0); // 0 = Perceptual
}
pngquant_error rwpng_write_image8(FILE *outfile, const png8_image *mainprog_ptr)
{
png_structp png_ptr;
png_infop info_ptr;
pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, mainprog_ptr->fast_compression);
if (retval) return retval;
struct rwpng_write_state write_state;
write_state = (struct rwpng_write_state){
.outfile = outfile,
.maximum_file_size = mainprog_ptr->maximum_file_size,
.retval = SUCCESS,
};
png_set_write_fn(png_ptr, &write_state, user_write_data, user_flush_data);
// Palette images generally don't gain anything from filtering
png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_VALUE_NONE);
rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma);
/* set the image parameters appropriately */
int sample_depth;
#if PNG_LIBPNG_VER > 10400 /* old libpng corrupts files with low depth */
if (mainprog_ptr->num_palette <= 2)
sample_depth = 1;
else if (mainprog_ptr->num_palette <= 4)
sample_depth = 2;
else if (mainprog_ptr->num_palette <= 16)
sample_depth = 4;
else
#endif
sample_depth = 8;
struct rwpng_chunk *chunk = mainprog_ptr->chunks;
int chunk_num=0;
while(chunk) {
png_unknown_chunk pngchunk = {
.size = chunk->size,
.data = chunk->data,
.location = chunk->location,
};
memcpy(pngchunk.name, chunk->name, 5);
png_set_unknown_chunks(png_ptr, info_ptr, &pngchunk, 1);
#if defined(PNG_HAVE_IHDR) && PNG_LIBPNG_VER < 10600
png_set_unknown_chunk_location(png_ptr, info_ptr, chunk_num, pngchunk.location ? pngchunk.location : PNG_HAVE_IHDR);
#endif
chunk = chunk->next;
chunk_num++;
}
png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height,
sample_depth, PNG_COLOR_TYPE_PALETTE,
0, PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_BASE);
png_set_PLTE(png_ptr, info_ptr, &mainprog_ptr->palette[0], mainprog_ptr->num_palette);
if (mainprog_ptr->num_trans > 0) {
png_set_tRNS(png_ptr, info_ptr, mainprog_ptr->trans, mainprog_ptr->num_trans, NULL);
}
rwpng_write_end(&info_ptr, &png_ptr, mainprog_ptr->row_pointers);
return write_state.retval;
}
pngquant_error rwpng_write_image24(FILE *outfile, const png24_image *mainprog_ptr)
{
png_structp png_ptr;
png_infop info_ptr;
pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, 0);
if (retval) return retval;
png_init_io(png_ptr, outfile);
rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma);
png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height,
8, PNG_COLOR_TYPE_RGB_ALPHA,
0, PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_BASE);
png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0);
rwpng_write_end(&info_ptr, &png_ptr, row_pointers);
free(row_pointers);
return SUCCESS;
}
static void rwpng_warning_stderr_handler(png_structp png_ptr, png_const_charp msg) {
fprintf(stderr, " %s\n", msg);
}
static void rwpng_warning_silent_handler(png_structp png_ptr, png_const_charp msg) {
}
static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg)
{
rwpng_png_image *mainprog_ptr;
/* This function, aside from the extra step of retrieving the "error
* pointer" (below) and the fact that it exists within the application
* rather than within libpng, is essentially identical to libpng's
* default error handler. The second point is critical: since both
* setjmp() and longjmp() are called from the same code, they are
* guaranteed to have compatible notions of how big a jmp_buf is,
* regardless of whether _BSD_SOURCE or anything else has (or has not)
* been defined. */
fprintf(stderr, " error: %s\n", msg);
fflush(stderr);
mainprog_ptr = png_get_error_ptr(png_ptr);
if (mainprog_ptr == NULL) abort();
longjmp(mainprog_ptr->jmpbuf, 1);
}
|
Gemm_MT_Loop3_MRxNRKernel.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include<immintrin.h>
#define alpha( i,j ) A[ (j)*ldA + (i) ] // map alpha( i,j ) to array A
#define beta( i,j ) B[ (j)*ldB + (i) ] // map beta( i,j ) to array B
#define gamma( i,j ) C[ (j)*ldC + (i) ] // map gamma( i,j ) to array C
#define min( x, y ) ( ( x ) < ( y ) ? x : y )
void LoopFive( int, int, int, double *, int, double *, int, double *, int );
void LoopFour( int, int, int, double *, int, double *, int, double *, int );
void LoopThree( int, int, int, double *, int, double *, double *, int );
void LoopTwo( int, int, int, double *, double *, double *, int );
void LoopOne( int, int, int, double *, double *, double *, int );
void Gemm_MRxNRKernel_Packed( int, double *, double *, double *, int );
void PackBlockA_MCxKC( int, int, double *, int, double * );
void PackPanelB_KCxNC( int, int, double *, int, double * );
void MyGemm( int m, int n, int k, double *A, int ldA,
double *B, int ldB, double *C, int ldC )
{
if ( m % MR != 0 || MC % MR != 0 ){
printf( "m and MC must be multiples of MR\n" );
exit( 0 );
}
if ( n % NR != 0 || NC % NR != 0 ){
printf( "n and NC must be multiples of NR\n" );
exit( 0 );
}
LoopFive( m, n, k, A, ldA, B, ldB, C, ldC );
}
void LoopFive( int m, int n, int k, double *A, int ldA,
double *B, int ldB, double *C, int ldC )
{
for ( int j=0; j<n; j+=NC ) {
int jb = min( NC, n-j ); /* Last loop may not involve a full block */
LoopFour( m, jb, k, A, ldA, &beta( 0,j ), ldB, &gamma( 0,j ), ldC );
}
}
void LoopFour( int m, int n, int k, double *A, int ldA, double *B, int ldB,
double *C, int ldC )
{
double *Btilde = ( double * ) _mm_malloc( KC * NC * sizeof( double ), 64 );
for ( int p=0; p<k; p+=KC ) {
int pb = min( KC, k-p ); /* Last loop may not involve a full block */
PackPanelB_KCxNC( pb, n, &beta( p, 0 ), ldB, Btilde );
LoopThree( m, n, pb, &alpha( 0, p ), ldA, Btilde, C, ldC );
}
_mm_free( Btilde);
}
void LoopThree( int m, int n, int k, double *A, int ldA, double *Btilde, double *C, int ldC )
{
#if 0
#pragma omp parallel for
for ( int i=0; i<m; i+=MC ) {
int ib = min( MC, m-i ); /* Last loop may not involve a full block */
double *Atilde = ( double * ) _mm_malloc( MC * KC * sizeof( double ), 64 );
PackBlockA_MCxKC( ib, k, &alpha( i, 0 ), ldA, Atilde );
LoopTwo( ib, n, k, Atilde, Btilde, &gamma( i,0 ), ldC );
_mm_free( Atilde);
}
#endif
#if 0
double *Atilde[OMP_NUM_THREADS];
for (int i = 0; i < OMP_NUM_THREADS; i++)
{
Atilde[i] = ( double * ) _mm_malloc( MC * KC * sizeof( double ), 64 );
}
for ( int i=0; i<m; i+=MC ) {
int ib = min( MC, m-i ); /* Last loop may not involve a full block */
int thidx = omp_get_thread_num();
PackBlockA_MCxKC( ib, k, &alpha( i, 0 ), ldA, Atilde[thidx]);
LoopTwo( ib, n, k, Atilde[thidx], Btilde, &gamma( i,0 ), ldC );
}
for (int i = 0; i < OMP_NUM_THREADS; i++)
{
_mm_free( Atilde[i]);
}
#endif
double *Atilde = ( double * ) _mm_malloc( MC * KC * omp_get_max_threads() * sizeof( double ), 64 );
int max_threads = omp_get_max_threads();
/* Distribute m/MC blocks among threads */
int loadbalanced_part = (m / (MC * max_threads) ) * (MC * max_threads); // integer division /
int remainder = m - loadbalanced_part;
// Distribute remainder load equally among total threads but should be multiple of MR
int remainder_per_thread = ( (remainder / max_threads ) / MR ) * MR;
if (remainder_per_thread == 0 ) remainder_per_thread = MR;
// Compute the loadbalanced part in parallel
#pragma omp parallel for
for (int i = 0; i < loadbalanced_part; i += MC) {
int ib = MC; /* Last loop may not involve a full block */
PackBlockA_MCxKC( ib, k, &alpha( i, 0 ), ldA, Ã[ MC * KC * omp_get_thread_num() ] );
LoopTwo( ib, n, k, Ã[ MC * KC * omp_get_thread_num() ], Btilde, &gamma( i,0 ), ldC );
}
// Compute the rest in parallel
#pragma omp parallel for
for (int i=loadbalanced_part; i < m; i += remainder_per_thread ) {
int ib = min( m-i, remainder_per_thread ); // last loop may not involve full block
PackBlockA_MCxKC( ib, k, &alpha( i, 0 ), ldA, Ã[ MC * KC * omp_get_thread_num() ] );
LoopTwo( ib, n, k, Ã[ MC * KC * omp_get_thread_num() ], Btilde, &gamma( i,0 ), ldC );
}
_mm_free( Atilde);
}
void LoopTwo( int m, int n, int k, double *Atilde, double *Btilde, double *C, int ldC )
{
for ( int j=0; j<n; j+=NR ) {
int jb = min( NR, n-j );
LoopOne( m, jb, k, Atilde, &Btilde[ j*k ], &gamma( 0,j ), ldC );
}
}
void LoopOne( int m, int n, int k, double *Atilde, double *MicroPanelB, double *C, int ldC )
{
for ( int i=0; i<m; i+=MR ) {
int ib = min( MR, m-i );
Gemm_MRxNRKernel_Packed( k, Ã[ i*k ], MicroPanelB, &gamma( i,0 ), ldC );
}
}
|
critical.c | //////////////////////////////////////////////////////////////
//
// critical.c
//
// Copyright (c) 2017, Hassan Salehe Matar
// All rights reserved.
//
// This file is part of Clanomp. For details, see
// https://github.com/hassansalehe/Clanomp. Please also
// see the LICENSE file for additional BSD notice
//
// Redistribution and use in source and binary forms, with
// or without modification, are permitted provided that
// the following conditions are met:
//
// * Redistributions of source code must retain the above
// copyright notice, this list of conditions and the
// following disclaimer.
//
// * Redistributions in binary form must reproduce the
// above copyright notice, this list of conditions and
// the following disclaimer in the documentation and/or
// other materials provided with the distribution.
//
// * Neither the name of the copyright holder nor the names
// of its contributors may be used to endorse or promote
// products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
// CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////
// From the OpenMP specification:
// The critical construct restricts execution of the
// associated structured block to a single thread at a time.
//
// References:
// 1. http://www.openmp.org/wp-content/uploads/openmp-examples-4.5.0.pdf
// 2. http://www.openmp.org/wp-content/uploads/openmp-4.5.pdf
#include <stdio.h>
#include <omp.h>
int main() {
int count = 0;
#pragma omp parallel shared(count)
{
#pragma omp critical
{
count++;
}
}
printf("Value of count: %d, construct: <critical>\n", count);
return 0;
} |
tree.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/dataset.h>
#include <LightGBM/meta.h>
#include <string>
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>
namespace LightGBM {
#define kCategoricalMask (1)
#define kDefaultLeftMask (2)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
*/
explicit Tree(int max_leaves);
/*!
* \brief Constructor, from a string
* \param str Model string
* \param used_len used count of str
*/
Tree(const char* str, size_t* used_len);
~Tree();
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \param missing_type missing type
* \param default_left default direction for missing value
* \return The index of new leaf.
*/
int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin,
double threshold_double, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight,
float gain, MissingType missing_type, bool default_left);
/*!
* \brief Performing a split on tree leaves, with categorical feature
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split, use bitset to represent
* \param num_threshold_bin size of threshold_bin
* \param threshold Thresholds of real feature value, use bitset to represent
* \param num_threshold size of threshold
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin,
const uint32_t* threshold, int num_threshold, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = MaybeRoundToZero(output);
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
data_size_t num_data,
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data, double* score) const;
/*!
* \brief Get upper bound leaf value of this tree model
*/
double GetUpperBoundValue() const;
/*!
* \brief Get lower bound leaf value of this tree model
*/
double GetLowerBoundValue() const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const;
inline void PredictContrib(const double* feature_values, int num_features, double* output);
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
inline double split_gain(int split_idx) const { return split_gain_[split_idx]; }
inline double internal_value(int node_idx) const {
return internal_value_[node_idx];
}
inline bool IsNumericalSplit(int node_idx) const {
return !GetDecisionType(decision_type_[node_idx], kCategoricalMask);
}
inline int left_child(int node_idx) const { return left_child_[node_idx]; }
inline int right_child(int node_idx) const { return right_child_[node_idx]; }
inline int split_feature_inner(int node_idx) const {
return split_feature_inner_[node_idx];
}
inline int leaf_parent(int leaf_idx) const { return leaf_parent_[leaf_idx]; }
inline uint32_t threshold_in_bin(int node_idx) const {
return threshold_in_bin_[node_idx];
}
/*! \brief Get the number of data points that fall at or below this node*/
inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the training process
* \param rate The factor of shrinkage
*/
inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] * rate);
internal_value_[i] = MaybeRoundToZero(internal_value_[i] * rate);
}
leaf_value_[num_leaves_ - 1] =
MaybeRoundToZero(leaf_value_[num_leaves_ - 1] * rate);
shrinkage_ *= rate;
}
inline double shrinkage() const { return shrinkage_; }
inline void AddBias(double val) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] + val);
internal_value_[i] = MaybeRoundToZero(internal_value_[i] + val);
}
leaf_value_[num_leaves_ - 1] =
MaybeRoundToZero(leaf_value_[num_leaves_ - 1] + val);
// force to 1.0
shrinkage_ = 1.0f;
}
inline void AsConstantTree(double val) {
num_leaves_ = 1;
shrinkage_ = 1.0f;
leaf_value_[0] = val;
}
/*! \brief Serialize this object to string*/
std::string ToString() const;
/*! \brief Serialize this object to json*/
std::string ToJSON() const;
/*! \brief Serialize this object to if-else statement*/
std::string ToIfElse(int index, bool predict_leaf_index) const;
inline static bool IsZero(double fval) {
return (fval >= -kZeroThreshold && fval <= kZeroThreshold);
}
inline static double MaybeRoundToZero(double fval) {
return IsZero(fval) ? 0 : fval;
}
inline static bool GetDecisionType(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
inline static int8_t GetMissingType(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
inline static void SetMissingType(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
void RecomputeMaxDepth();
int NextLeafId() const { return num_leaves_; }
private:
std::string NumericalDecisionIfElse(int node) const;
std::string CategoricalDecisionIfElse(int node) const;
inline int NumericalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if (std::isnan(fval) && missing_type != MissingType::NaN) {
fval = 0.0f;
}
if ((missing_type == MissingType::Zero && IsZero(fval))
|| (missing_type == MissingType::NaN && std::isnan(fval))) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if ((missing_type == MissingType::Zero && fval == default_bin)
|| (missing_type == MissingType::NaN && fval == max_bin)) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_in_bin_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int CategoricalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
int int_fval = static_cast<int>(fval);
if (int_fval < 0) {
return right_child_[node];;
} else if (std::isnan(fval)) {
// NaN is always in the right
if (missing_type == MissingType::NaN) {
return right_child_[node];
}
int_fval = 0;
}
int cat_idx = static_cast<int>(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const {
int cat_idx = static_cast<int>(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int Decision(double fval, int node) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecision(fval, node);
} else {
return NumericalDecision(fval, node);
}
}
inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecisionInner(fval, node);
} else {
return NumericalDecisionInner(fval, node, default_bin, max_bin);
}
}
inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain);
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const;
/*! \brief Serialize one node to json*/
std::string NodeToJSON(int index) const;
/*! \brief Serialize one node to if-else statement*/
std::string NodeToIfElse(int index, bool predict_leaf_index) const;
std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const;
double ExpectedValue() const;
/*! \brief This is used fill in leaf_depth_ after reloading a model*/
inline void RecomputeLeafDepths(int node = 0, int depth = 0);
/*!
* \brief Used by TreeSHAP for data we keep about our decision path
*/
struct PathElement {
int feature_index;
double zero_fraction;
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {}
};
/*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/
void TreeSHAP(const double *feature_values, double *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
/*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/
static void ExtendPath(PathElement *unique_path, int unique_depth,
double zero_fraction, double one_fraction, int feature_index);
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current leaves*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner_;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
int num_cat_;
std::vector<int> cat_boundaries_inner_;
std::vector<uint32_t> cat_threshold_inner_;
std::vector<int> cat_boundaries_;
std::vector<uint32_t> cat_threshold_;
/*! \brief Store the information for categorical feature handle and missing value handle. */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief weight of leaves */
std::vector<double> leaf_weight_;
/*! \brief DataCount of leaves */
std::vector<int> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief weight of non-leaf nodes */
std::vector<double> internal_weight_;
/*! \brief DataCount of non-leaf nodes */
std::vector<int> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
double shrinkage_;
int max_depth_;
};
inline void Tree::Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain) {
int new_node_idx = num_leaves_ - 1;
// update parent info
int parent = leaf_parent_[leaf];
if (parent >= 0) {
// if cur node is left child
if (left_child_[parent] == ~leaf) {
left_child_[parent] = new_node_idx;
} else {
right_child_[parent] = new_node_idx;
}
}
// add new node
split_feature_inner_[new_node_idx] = feature;
split_feature_[new_node_idx] = real_feature;
split_gain_[new_node_idx] = gain;
// add two new leaves
left_child_[new_node_idx] = ~leaf;
right_child_[new_node_idx] = ~num_leaves_;
// update new leaves
leaf_parent_[leaf] = new_node_idx;
leaf_parent_[num_leaves_] = new_node_idx;
// save current leaf value to internal node before change
internal_weight_[new_node_idx] = leaf_weight_[leaf];
internal_value_[new_node_idx] = leaf_value_[leaf];
internal_count_[new_node_idx] = left_cnt + right_cnt;
leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value;
leaf_weight_[leaf] = left_weight;
leaf_count_[leaf] = left_cnt;
leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value;
leaf_weight_[num_leaves_] = right_weight;
leaf_count_[num_leaves_] = right_cnt;
// update leaf depth
leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1;
leaf_depth_[leaf]++;
}
inline double Tree::Predict(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return leaf;
} else {
return 0;
}
}
inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) {
output[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK_GE(max_depth_, 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::RecomputeLeafDepths(int node, int depth) {
if (node == 0) leaf_depth_.resize(num_leaves());
if (node < 0) {
leaf_depth_[~node] = depth;
} else {
RecomputeLeafDepths(left_child_[node], depth + 1);
RecomputeLeafDepths(right_child_[node], depth + 1);
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values[split_feature_[node]], node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values[split_feature_[node]], node);
}
}
return ~node;
}
inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
PoolLayer.c | /*
* PoolLayer.c
* Francesco Conti <f.conti@unibo.it>
*
* Copyright (C) 2015 ETH Zurich, University of Bologna
* All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD license. See the LICENSE file for details.
*/
#include "PoolLayer.h"
#define _nf nfeat_tile
#define _h height_tile
#define _w width_tile
#define _oh layer->out_height
#define _ow layer->out_width
#define _ps layer->pool_stride
#define X(k,i,j) x[((k*layer->height)+i)*layer->width+j]
#define Y(k,i,j) y[((k*layer->out_height)+i)*layer->out_width+j]
/**
* Allocates a new PoolLayer data structure and its output feature maps.
*
* @return a pointer to the new PoolLayer data structure.
*
* @param n_feat
* the number of input feature maps.
* @param pool_stride
* the pooling factor.
* @param height
* the height of the input feature maps.
* @param width
* the width of the input feature maps.
* @param out_height
* the height of the output feature maps.
* @param out_width
* the width of the output feature maps.
* @param *x
* a *mandatory* pointer to the input feature maps.
* @param *y
* an *optional* pointer to the already-allocated output feature maps. If
* NULL, PoolLayer_new() will allocate y automatically.
*/
PoolLayer *PoolLayer_new(
#ifdef CCN_NOALLOC
PoolLayer *layer,
#endif /* CCN_NOALLOC */
data_t *x,
data_t *y,
data_t *loc_x0,
data_t *loc_x1,
data_t *loc_y0,
data_t *loc_y1,
int n_feat,
int pool_stride,
int height,
int width,
int tiling_max_nfeat,
int tiling_max_height,
int tiling_max_width,
int parallel_type
) {
#ifndef CCN_NOALLOC
// build PoolLayer
PoolLayer *layer;
layer = ccn_malloc(sizeof(PoolLayer));
#endif /* CCN_NOALLOC */
layer->n_feat = n_feat;
layer->pool_stride = pool_stride;
layer->height = height;
layer->width = width;
layer->out_height = height/pool_stride + height%pool_stride;
layer->out_width = width /pool_stride + width %pool_stride;
layer->x = x;
layer->y = y;
#ifndef CCN_CACHE
layer->loc_x0 = loc_x0;
layer->loc_y0 = loc_y0;
layer->loc_x1 = loc_x1;
layer->loc_y1 = loc_y1;
#endif /* ifndef CCN_CACHE */
layer->tiling_max_nfeat = tiling_max_nfeat;
layer->tiling_max_height = tiling_max_height;
layer->tiling_max_width = tiling_max_width;
layer->parallel_type = parallel_type;
return layer;
}
void PoolLayer_delete(PoolLayer *layer) {
free(layer);
}
static void PoolLayer_tile_loop(PoolLayer *layer, int nfeat_tile, int height_tile, int width_tile, int ii, int jj, int kk, int *doublebuf) {
int i,j,k,i1,j1;
data_t max, xtmp;
data_t *_x;
data_t *_y;
data_t *l2_x, *l2_y;
int sum;
// if(*doublebuf) {
_x = layer->loc_x0;
_y = layer->loc_y0;
// }
// else {
// _x = layer->loc_x1;
// _y = layer->loc_y1;
// }
l2_x = layer->x + (ii*_h+jj)*_w+kk;
l2_y = layer->y + (ii*_oh+jj)*_ow+kk;
#ifndef CCN_CACHE
// DMA-in
// #pragma omp master
{
ccn_memcpy(_x, l2_x, sizeof(data_t)*_nf*_h*_w);
}
// #pragma omp barrier
#else /* ifdef CCN_CACHE */
_x = l2_x;
_y = l2_y;
#endif /* ifdef CCN_CACHE */
#ifdef INTERM_CHECKSUM
// #pragma omp master
{
int sum = 0;
for(k=0; k<_nf; k++) {
for(i=0; i<_oh; i++) {
for(j=0; j<_ow; j++) {
for(i1=0; i1<_ps; i1++) {
for(j1=0; j1<_ps; j1++) {
sum += _x[((k*_h)+(i*_ps+i1))*_w+(j*_ps+j1)];
}
}
}
}
}
printf("[PoolLayer] in : %d\n", sum);
}
// #pragma omp barrier
#endif
if(layer->parallel_type == PARALLEL_FEAT) {
#pragma omp parallel for
for(k=0; k<_nf; k++) {
for(i=0; i<_oh; i++) {
for(j=0; j<_ow; j++) {
max = -DATA_T_MAX;
for(i1=0; i1<_ps; i1++) {
for(j1=0; j1<_ps; j1++) {
xtmp = _x[((k*_h)+(i*_ps+i1))*_w+(j*_ps+j1)];
if(xtmp > max)
max = xtmp;
}
}
_y[((k*_oh)+i)*_ow+j] = max;
}
}
}
}
else {
for(k=0; k<_nf; k++) {
#pragma omp parallel for
for(i=0; i<_oh; i++) {
for(j=0; j<_ow; j++) {
max = -DATA_T_MAX;
for(i1=0; i1<_ps; i1++) {
for(j1=0; j1<_ps; j1++) {
xtmp = _x[((k*_h)+(i*_ps+i1))*_w+(j*_ps+j1)];
if(xtmp > max)
max = xtmp;
}
}
_y[((k*_oh)+i)*_ow+j] = max;
}
}
}
}
#ifdef INTERM_CHECKSUM
// #pragma omp master
{
int sum = 0;
for(k=0; k<_nf; k++) {
for(i=0; i<_oh; i++) {
for(j=0; j<_ow; j++) {
sum += _y[((k*_oh)+i)*_ow+j];
}
}
}
printf("[PoolLayer] out : %d\n", sum);
}
// #pragma omp barrier
#endif
#ifndef CCN_CACHE
// DMA-out
// #pragma omp master
{
ccn_memcpy_async(l2_y, _y, sizeof(data_t)*_nf*_oh*_ow);
}
// #pragma omp barrier
#endif /* ifndef CCN_CACHE */
// *doublebuf = (*doublebuf == 0) ? 1 : 0;
}
/**
* Executes the given PoolLayer, i.e. computes its outputs given the inputs
* defined in the data structure.
* The PoolLayer reduces the size of the feature maps by max-pooling.
*
* @param *layer
* a pointer to the PoolLayer data structure to execute.
*/
void PoolLayer_exec(PoolLayer *layer) {
int i,j, ii,jj,kk;
int max_nfeat_tile = layer->tiling_max_nfeat;
int max_height_tile = layer->tiling_max_height;
int max_width_tile = layer->tiling_max_width;
int nfeat_int = layer->n_feat;
int height_int = layer->height;
int width_int = layer->width;
int nfeat_tile;
int doublebuf = 0;
#ifdef CCN_TILING
// normal nfeat
for(ii=0; nfeat_int>=max_nfeat_tile; ii+=max_nfeat_tile) {
int height_tile;
nfeat_tile = max_nfeat_tile;
// normal height
for(jj=0; height_int>=max_height_tile; jj+=max_height_tile) {
int width_tile;
height_tile = max_height_tile;
// normal width
for(kk=0; width_int>=max_width_tile; kk+=max_width_tile) {
width_tile = max_width_tile;
PoolLayer_tile_loop(layer, nfeat_tile, height_tile, width_tile, ii, jj, kk, &doublebuf);
width_int -= width_tile;
}
// last width
if (width_int > 0) {
width_tile = width_int;
kk += width_tile;
PoolLayer_tile_loop(layer, nfeat_tile, height_tile, width_tile, ii, jj, kk, &doublebuf);
}
height_int -= height_tile;
width_int = layer->width;
}
// last height
if (height_int > 0) {
int width_tile;
height_tile = height_int;
for(kk=0; width_int>=max_width_tile; kk+=max_width_tile) {
width_tile = max_width_tile;
PoolLayer_tile_loop(layer, nfeat_tile, height_tile, width_tile, ii, jj, kk, &doublebuf);
width_int -= width_tile;
}
if (width_int > 0) {
width_tile = width_int;
PoolLayer_tile_loop(layer, nfeat_tile, height_tile, width_tile, ii, jj, kk, &doublebuf);
}
width_int = layer->width;
}
nfeat_int -= nfeat_tile;
height_int = layer->height;
}
// last nfeat
if (nfeat_int > 0) {
int height_tile;
nfeat_tile = nfeat_int;
// normal height
for(jj=0; height_int > max_height_tile-1; jj+=max_height_tile) {
unsigned int sptr;
int width_tile;
height_tile = max_height_tile;
// normal width
for(kk=0; width_int>=max_width_tile; kk+=max_width_tile) {
width_tile = max_width_tile;
PoolLayer_tile_loop(layer, nfeat_tile, height_tile, width_tile, ii, jj, kk, &doublebuf);
width_int -= width_tile;
}
// last width
if (width_int > 0) {
width_tile = width_int;
PoolLayer_tile_loop(layer, nfeat_tile, height_tile, width_tile, ii, jj, kk, &doublebuf);
}
height_int -= height_tile;
width_int = layer->width;
}
// last height
if (height_int > 0) {
int width_tile;
height_tile = height_int;
for(kk=0; width_int>=max_width_tile; kk+=max_width_tile) {
width_tile = max_width_tile;
PoolLayer_tile_loop(layer, nfeat_tile, height_tile, width_tile, ii, jj, kk, &doublebuf);
width_int -= width_tile;
}
if (width_int > 0) {
width_tile = width_int;
PoolLayer_tile_loop(layer, nfeat_tile, height_tile, width_tile, ii, jj, kk, &doublebuf);
}
width_int = layer->width;
}
height_int = layer->height;
}
#else /* ifndef CCN_TILING */
PoolLayer_tile_loop(layer, nfeat_int, height_int, width_int, 0, 0, 0, &doublebuf);
#endif /* ifndef CCN_TILING */
}
|
timing.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
double start_time;
double end_time;
float *dirty;
float ressss;
int flushsz=100000000;
int num_of_core=8;
void get_time(int flag){
float tttmp[num_of_core];
if (flag == 1){
dirty = (float *)malloc(flushsz * sizeof(float));
#pragma omp parallel for
for (int dirt = 0; dirt < flushsz; dirt++){
dirty[dirt] += dirt%100;
tttmp[dirt%num_of_core] += dirty[dirt];
}
for(int ii =0; ii<num_of_core;ii++){ressss+= tttmp[ii];}
//printf("flush\n");
start_time = omp_get_wtime();
}
else{
end_time = omp_get_wtime() - start_time;
printf("time is : %lf\n", end_time);
free(dirty);
}
}
|
pr39154.c | /* PR middle-end/39154 */
/* { dg-do compile } */
/* { dg-options "-O2 -std=gnu99" } */
extern void abort (void);
int n = 20;
int
main (void)
{
int a[n], b[n][n];
#pragma omp parallel for
for (int i = 0; i < n; i++)
{
a[i] = i + 1;
#pragma omp parallel for
for (int j = 0; j < n; j++)
b[i][j] = a[i];
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
if (b[i][j] != i + 1)
abort ();
if (a[i] != i + 1)
abort ();
}
#pragma omp parallel for shared (n, a, b)
for (int i = 0; i < n; i++)
{
a[i] = i + 3;
#pragma omp parallel for
for (int j = 0; j < n; j++)
b[i][j] = a[i];
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
if (b[i][j] != i + 3)
abort ();
if (a[i] != i + 3)
abort ();
}
#pragma omp parallel for
for (int i = 0; i < n; i++)
{
a[i] = i + 5;
#pragma omp parallel for shared (n, a, b)
for (int j = 0; j < n; j++)
b[i][j] = a[i];
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
if (b[i][j] != i + 5)
abort ();
if (a[i] != i + 5)
abort ();
}
#pragma omp parallel for shared (n, a, b)
for (int i = 0; i < n; i++)
{
a[i] = i + 7;
#pragma omp parallel for shared (n, a, b)
for (int j = 0; j < n; j++)
b[i][j] = a[i];
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
if (b[i][j] != i + 7)
abort ();
if (a[i] != i + 7)
abort ();
}
#pragma omp parallel for private (a, b)
for (int i = 0; i < n; i++)
{
a[i] = i + 1;
#pragma omp parallel for
for (int j = 0; j < n; j++)
b[i][j] = a[i];
}
#pragma omp parallel for private (a, b)
for (int i = 0; i < n; i++)
{
a[i] = i + 1;
#pragma omp parallel for private (b)
for (int j = 0; j < n; j++)
b[i][j] = a[i];
}
return 0;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-19,32));t3<=min(min(min(floord(4*Nt+Ny-9,32),floord(8*t1+Ny+7,32)),floord(16*t2+Ny+3,32)),floord(16*t1-16*t2+Nz+Ny+5,32));t3++) {
for (t4=max(max(max(0,ceild(t1-127,128)),ceild(16*t2-Nz-1011,1024)),ceild(32*t3-Ny-1011,1024));t4<=min(min(min(min(floord(4*Nt+Nx-9,1024),floord(8*t1+Nx+7,1024)),floord(16*t2+Nx+3,1024)),floord(32*t3+Nx+19,1024)),floord(16*t1-16*t2+Nz+Nx+5,1024));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),8*t3+6),256*t4+254);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) {
lbv=max(1024*t4,4*t5+4);
ubv=min(1024*t4+1023,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
DRB099-targetparallelfor2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
/*
use of omp target + map + array sections derived from pointers
*/
#include <omp.h>
void foo(double *a,double *b,int N)
{
int i;
for (i = 0; i <= N - 1; i += 1) {
b[i] = a[i] * ((double )i);
}
}
int main(int argc,char *argv[])
{
int i;
int len = 1000;
double a[len];
double b[len];
#pragma omp parallel for private (i)
for (i = 0; i <= len - 1; i += 1) {
a[i] = ((double )i) / 2.0;
b[i] = 0.0;
}
foo(a,b,len);
printf("b[50]=%f\n",b[50]);
return 0;
}
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
// //
///////////////////////////////////////////////////////////////////////////////
/// \brief This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
/// \brief Kind of the directive.
OpenMPDirectiveKind Kind;
/// \brief Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// \brief Ending location of the directive.
SourceLocation EndLoc;
/// \brief Numbers of clauses.
const unsigned NumClauses;
/// \brief Number of child expressions/stmts.
const unsigned NumChildren;
/// \brief Offset from this to the start of clauses.
/// There are NumClauses pointers to clauses, they are followed by
/// NumChildren pointers to child stmts/exprs (if the directive type
/// requires an associated stmt, then it has to be the first of them).
const unsigned ClausesOffset;
/// \brief Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>(
reinterpret_cast<char *>(this) + ClausesOffset);
return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses);
}
protected:
/// \brief Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
template <typename T>
OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses, unsigned NumChildren)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)), NumClauses(NumClauses),
NumChildren(NumChildren),
ClausesOffset(llvm::RoundUpToAlignment(sizeof(T),
llvm::alignOf<OMPClause *>())) {}
/// \brief Sets the list of variables for this clause.
///
/// \param Clauses The list of clauses for the directive.
///
void setClauses(ArrayRef<OMPClause *> Clauses);
/// \brief Set the associated statement for the directive.
///
/// /param S Associated statement.
///
void setAssociatedStmt(Stmt *S) {
assert(hasAssociatedStmt() && "no associated statement.");
*child_begin() = S;
}
public:
/// \brief Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only those declarations that meet some run-time
/// criteria.
template <class FilterPredicate> class filtered_clause_iterator {
protected:
ArrayRef<OMPClause *>::const_iterator Current;
ArrayRef<OMPClause *>::const_iterator End;
FilterPredicate Pred;
void SkipToNextClause() {
while (Current != End && !Pred(*Current))
++Current;
}
public:
typedef const OMPClause *value_type;
filtered_clause_iterator() : Current(), End() {}
filtered_clause_iterator(ArrayRef<OMPClause *> Arr, FilterPredicate Pred)
: Current(Arr.begin()), End(Arr.end()), Pred(std::move(Pred)) {
SkipToNextClause();
}
value_type operator*() const { return *Current; }
value_type operator->() const { return *Current; }
filtered_clause_iterator &operator++() {
++Current;
SkipToNextClause();
return *this;
}
filtered_clause_iterator operator++(int) {
filtered_clause_iterator tmp(*this);
++(*this);
return tmp;
}
bool operator!() { return Current == End; }
explicit operator bool() { return Current != End; }
bool empty() const { return Current == End; }
};
template <typename Fn>
filtered_clause_iterator<Fn> getFilteredClauses(Fn &&fn) const {
return filtered_clause_iterator<Fn>(clauses(), std::move(fn));
}
struct ClauseKindFilter {
OpenMPClauseKind Kind;
bool operator()(const OMPClause *clause) const {
return clause->getClauseKind() == Kind;
}
};
filtered_clause_iterator<ClauseKindFilter>
getClausesOfKind(OpenMPClauseKind Kind) const {
return getFilteredClauses(ClauseKindFilter{Kind});
}
/// \brief Gets a single clause of the specified kind \a K associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of kind \a K is associated with
/// the directive.
const OMPClause *getSingleClause(OpenMPClauseKind K) const;
/// \brief Returns starting location of directive kind.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns ending location of directive.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Get number of clauses.
unsigned getNumClauses() const { return NumClauses; }
/// \brief Returns specified clause.
///
/// \param i Number of clause.
///
OMPClause *getClause(unsigned i) const { return clauses()[i]; }
/// \brief Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return NumChildren > 0; }
/// \brief Returns statement associated with the directive.
Stmt *getAssociatedStmt() const {
assert(hasAssociatedStmt() && "no associated statement.");
return const_cast<Stmt *>(*child_begin());
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!hasAssociatedStmt())
return child_range();
Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end());
return child_range(ChildStorage, ChildStorage + NumChildren);
}
ArrayRef<OMPClause *> clauses() { return getClauses(); }
ArrayRef<OMPClause *> clauses() const {
return const_cast<OMPExecutableDirective *>(this)->getClauses();
}
};
/// \brief This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// \brief This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum;
/// \brief Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are nesessary for all the loop directives, and
/// the next 7 are specific to the worksharing ones.
/// After the fixed children, three arrays of length CollapsedNum are
/// allocated: loop counters, their updates and final values.
///
enum {
AssociatedStmtOffset = 0,
IterationVariableOffset = 1,
LastIterationOffset = 2,
CalcLastIterationOffset = 3,
PreConditionOffset = 4,
CondOffset = 5,
InitOffset = 6,
IncOffset = 7,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals arrays).
DefaultEnd = 8,
// The following 7 exprs are used by worksharing loops only.
IsLastIterVariableOffset = 8,
LowerBoundVariableOffset = 9,
UpperBoundVariableOffset = 10,
StrideVariableOffset = 11,
EnsureUpperBoundOffset = 12,
NextLowerBoundOffset = 13,
NextUpperBoundOffset = 14,
// Offset to the end (and start of the following counters/updates/finals
// arrays) for worksharing loop directives.
WorksharingEnd = 15,
};
/// \brief Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
Expr **Storage = reinterpret_cast<Expr **>(
&(*(std::next(child_begin(), getArraysOffset(getDirectiveKind())))));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getInits() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
protected:
/// \brief Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
/// \param NumClauses Number of clauses.
/// \param NumSpecialChildren Number of additional directive-specific stmts.
///
template <typename T>
OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses,
unsigned NumSpecialChildren = 0)
: OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses,
numLoopChildren(CollapsedNum, Kind) +
NumSpecialChildren),
CollapsedNum(CollapsedNum) {}
/// \brief Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
return isOpenMPWorksharingDirective(Kind) ? WorksharingEnd
: DefaultEnd;
}
/// \brief Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) +
4 * CollapsedNum; // Counters, Inits, Updates and Finals
}
void setIterationVariable(Expr *IV) {
*std::next(child_begin(), IterationVariableOffset) = IV;
}
void setLastIteration(Expr *LI) {
*std::next(child_begin(), LastIterationOffset) = LI;
}
void setCalcLastIteration(Expr *CLI) {
*std::next(child_begin(), CalcLastIterationOffset) = CLI;
}
void setPreCond(Expr *PC) {
*std::next(child_begin(), PreConditionOffset) = PC;
}
void setCond(Expr *Cond) {
*std::next(child_begin(), CondOffset) = Cond;
}
void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; }
void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; }
void setIsLastIterVariable(Expr *IL) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), IsLastIterVariableOffset) = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), LowerBoundVariableOffset) = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), UpperBoundVariableOffset) = UB;
}
void setStrideVariable(Expr *ST) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), StrideVariableOffset) = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), EnsureUpperBoundOffset) = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextLowerBoundOffset) = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextUpperBoundOffset) = NUB;
}
void setCounters(ArrayRef<Expr *> A);
void setInits(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
public:
/// \brief The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// \brief Loop iteration variable.
Expr *IterationVarRef;
/// \brief Loop last iteration number.
Expr *LastIteration;
/// \brief Loop number of iterations.
Expr *NumIterations;
/// \brief Calculation of last iteration.
Expr *CalcLastIteration;
/// \brief Loop pre-condition.
Expr *PreCond;
/// \brief Loop condition.
Expr *Cond;
/// \brief Loop iteration variable init.
Expr *Init;
/// \brief Loop increment.
Expr *Inc;
/// \brief IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// \brief LowerBound - local variable passed to runtime.
Expr *LB;
/// \brief UpperBound - local variable passed to runtime.
Expr *UB;
/// \brief Stride - local variable passed to runtime.
Expr *ST;
/// \brief EnsureUpperBound -- expression LB = min(LB, NumIterations).
Expr *EUB;
/// \brief Update of LowerBound for statically sheduled 'omp for' loops.
Expr *NLB;
/// \brief Update of UpperBound for statically sheduled 'omp for' loops.
Expr *NUB;
/// \brief Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// \brief Expressions for loop counters inits for CodeGen.
SmallVector<Expr *, 4> Inits;
/// \brief Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// \brief Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// \brief Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && Init != nullptr && Inc != nullptr;
}
/// \brief Initialize all the fields to null.
/// \param Size Number of elements in the counters/finals/updates arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
Counters.resize(Size);
Inits.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
for (unsigned i = 0; i < Size; ++i) {
Counters[i] = nullptr;
Inits[i] = nullptr;
Updates[i] = nullptr;
Finals[i] = nullptr;
}
}
};
/// \brief Get number of collapsed loops.
unsigned getCollapsedNumber() const { return CollapsedNum; }
Expr *getIterationVariable() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IterationVariableOffset)));
}
Expr *getLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LastIterationOffset)));
}
Expr *getCalcLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CalcLastIterationOffset)));
}
Expr *getPreCond() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PreConditionOffset)));
}
Expr *getCond() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset)));
}
Expr *getInit() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset)));
}
Expr *getInc() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset)));
}
Expr *getIsLastIterVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IsLastIterVariableOffset)));
}
Expr *getLowerBoundVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LowerBoundVariableOffset)));
}
Expr *getUpperBoundVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), UpperBoundVariableOffset)));
}
Expr *getStrideVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), StrideVariableOffset)));
}
Expr *getEnsureUpperBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), EnsureUpperBoundOffset)));
}
Expr *getNextLowerBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextLowerBoundOffset)));
}
Expr *getNextUpperBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextUpperBoundOffset)));
}
const Stmt *getBody() const {
// This relies on the loop form is already checked by Sema.
Stmt *Body = getAssociatedStmt()->IgnoreContainers(true);
Body = cast<ForStmt>(Body)->getBody();
for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) {
Body = Body->IgnoreContainers();
Body = cast<ForStmt>(Body)->getBody();
}
return Body;
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> inits() { return getInits(); }
ArrayRef<Expr *> inits() const {
return const_cast<OMPLoopDirective *>(this)->getInits();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// \brief This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// \brief This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSingleDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// \brief This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// \brief This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Name of the directive.
DeclarationNameInfo DirName;
/// \brief Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
StartLoc, EndLoc, 0, 1),
DirName(Name) {}
/// \brief Build an empty directive.
///
explicit OMPCriticalDirective()
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
SourceLocation(), SourceLocation(), 0, 1),
DirName() {}
/// \brief Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// \brief Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, StartLoc, EndLoc,
NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc,
EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTaskDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// \brief This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskgroup' directive.
///
/// \code
/// #pragma omp taskgroup
/// \endcode
///
class OMPTaskgroupDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPTaskgroupDirective()
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTaskgroupDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskgroupDirectiveClass;
}
};
/// \brief This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
StartLoc, EndLoc, NumClauses, 0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPFlushDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
SourceLocation(), SourceLocation(), NumClauses,
0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// \brief This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPOrderedDirective()
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// \brief This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is true for the first form of the expression and false for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
bool IsXLHSInRHSPart;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is true for the first(postfix) form of the expression and false
/// otherwise.
bool IsPostfixUpdate;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
StartLoc, EndLoc, NumClauses, 5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPAtomicDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
SourceLocation(), SourceLocation(), NumClauses,
5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Set 'x' part of the associated expression/statement.
void setX(Expr *X) { *std::next(child_begin()) = X; }
/// \brief Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; }
/// \brief Set 'v' part of the associated expression/statement.
void setV(Expr *V) { *std::next(child_begin(), 3) = V; }
/// \brief Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; }
public:
/// \brief Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
/// \param UE Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
/// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
/// second.
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); }
const Expr *getX() const {
return cast_or_null<Expr>(*std::next(child_begin()));
}
/// \brief Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
/// \brief Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// \brief Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
/// \brief Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); }
const Expr *getV() const {
return cast_or_null<Expr>(*std::next(child_begin(), 3));
}
/// \brief Get 'expr' part of the associated expression/statement.
Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); }
const Expr *getExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 4));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// \brief This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// \brief This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
/// \brief This represents '#pragma omp cancellation point' directive.
///
/// \code
/// #pragma omp cancellation point for
/// \endcode
///
/// In this example a cancellation point is created for innermost 'for' region.
class OMPCancellationPointDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, StartLoc, EndLoc, 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Build an empty directive.
///
explicit OMPCancellationPointDirective()
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, SourceLocation(),
SourceLocation(), 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancellationPointDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C,
EmptyShell);
/// \brief Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancellationPointDirectiveClass;
}
};
/// \brief This represents '#pragma omp cancel' directive.
///
/// \code
/// #pragma omp cancel for
/// \endcode
///
/// In this example a cancel is created for innermost 'for' region.
class OMPCancelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
StartLoc, EndLoc, 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Build an empty directive.
///
explicit OMPCancelDirective()
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
SourceLocation(), SourceLocation(), 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancelDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancelDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// \brief Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancelDirectiveClass;
}
};
} // end namespace clang
#endif
|
GI.h | #include <parse.h>
#define SELF_GRAVITY
#define FLAG_GI
#ifdef PARTICLE_SIMULATOR_TWO_DIMENSION
#error
#endif
template <class Ptcl> class GI : public Problem<Ptcl>{
public:
static double end_time;
static double damping;
static void setupIC(PS::ParticleSystem<Ptcl>& sph_system, system_t& sysinfo, PS::DomainInfo& dinfo,
ParameterFile ¶meter_file){
const double Corr = .98;//Correction Term
/////////
//place ptcls
/////////
std::vector<Ptcl> ptcl;
std::vector<Ptcl> tar;//Target
std::vector<Ptcl> imp;//Impactor
/////////
// Use parameters from input file, or defaults if none provided
PS::F64 UnitMass = parameter_file.getValueOf("UnitMass", 6.0e+24);
PS::F64 UnitRadi = parameter_file.getValueOf("UnitRadi", 6400e+3);
PS::F64 coreFracRadi = parameter_file.getValueOf("coreFracRadi", 3500.0e+3 / 6400.0e+3);
PS::F64 coreFracMass = parameter_file.getValueOf("coreFracMass", 0.3);
PS::F64 imptarMassRatio = parameter_file.getValueOf("imptarMassRatio", 0.1);
int mode = parameter_file.getValueOf("mode", 2 );
PS::F64 impVel = parameter_file.getValueOf("impVel",0.);
end_time = parameter_file.getValueOf("end_time",1.0e+4);
damping = parameter_file.getValueOf("damping",1.);
const PS::F64 Expand = 1.1;
const PS::F64 tarMass = UnitMass;
const PS::F64 tarRadi = UnitRadi;
const PS::F64 tarCoreMass = tarMass * coreFracMass;
const PS::F64 tarCoreRadi = tarRadi * coreFracRadi;
const PS::F64 impMass = imptarMassRatio * tarMass;
const PS::F64 impRadi = Expand * cbrt(impMass / tarMass) * UnitRadi;
const PS::F64 impCoreMass = impMass * coreFracMass;
const PS::F64 impCoreRadi = impRadi * coreFracRadi;
const double offset = 5.0 * UnitRadi;
const PS::F64 dx = 1.0 / 39;
const PS::F64 Grav = 6.67e-11;
//std::cout << impRadi / tarRadi << std::endl;
//std::cout << impCoreRadi / impRadi << std::endl;
///////////////////
//Dummy put to determine # of ptcls
///////////////////
//target
int tarNmntl = 0;
for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){
for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){
for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){
const PS::F64 r = sqrt(x*x + y*y + z*z) * UnitRadi;
if(r >= tarRadi || r <= tarCoreRadi) continue;
++ tarNmntl;
}
}
}
int tarNcore;
double tarCoreShrinkFactor = 1.0;
while(tarCoreShrinkFactor *= 0.99){
tarNcore = 0;
for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){
for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){
for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){
const PS::F64 r = tarCoreShrinkFactor * sqrt(x*x + y*y + z*z) * UnitRadi;
if(r >= Corr * tarCoreRadi) continue;
++ tarNcore;
}
}
}
if((double)(tarNcore) / (double)(tarNcore + tarNmntl) > coreFracMass) break;
}
//imp
int impNmntl = 0;
for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){
for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){
for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){
const PS::F64 r = Expand * sqrt(x*x + y*y + z*z) * UnitRadi;
if(r >= impRadi || r <= impCoreRadi) continue;
++ impNmntl;
}
}
}
double impCoreShrinkFactor = 1.0;
int impNcore;
while(impCoreShrinkFactor *= 0.99){
impNcore = 0;
for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){
for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){
for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){
const PS::F64 r = Expand * impCoreShrinkFactor * sqrt(x*x + y*y + z*z) * UnitRadi;
if(r >= Corr * impCoreRadi) continue;
++ impNcore;
}
}
}
if((double)(impNcore) / (double)(impNcore + impNmntl) > coreFracMass) break;
}
///////////////////
//Dummy end
///////////////////
const int tarNptcl = tarNcore + tarNmntl;
const int impNptcl = impNcore + impNmntl;
const int Nptcl = tarNptcl + impNptcl;
std::cout << "Target :" << tarNptcl << std::endl;
std::cout << " radius : " << tarRadi << std::endl;
std::cout << " total-to-core : " << (double)(tarNcore) / (double)(tarNptcl) << std::endl;
std::cout << " # of core ptcls : " << tarNcore << std::endl;
std::cout << " # of mantle ptcls: " << tarNmntl << std::endl;
std::cout << " core density : " << tarCoreMass / (4.0 * math::pi / 3.0 * tarCoreRadi * tarCoreRadi * tarCoreRadi * Corr * Corr * Corr) << std::endl;
std::cout << " mantle density : " << (tarMass - tarCoreMass) / (4.0 * math::pi / 3.0 * (tarRadi * tarRadi * tarRadi - tarCoreRadi * tarCoreRadi * tarCoreRadi)) << std::endl;
std::cout << " mean density : " << tarMass / (4.0 * math::pi / 3.0 * tarRadi * tarRadi * tarRadi) << std::endl;
std::cout << "Impactor:" << impNptcl << std::endl;
std::cout << " radius : " << impRadi << std::endl;
std::cout << " total-to-core : " << (double)(impNcore) / (double)(impNptcl) << std::endl;
std::cout << " # of core ptcls : " << impNcore << std::endl;
std::cout << " # of mantle ptcls: " << impNmntl << std::endl;
std::cout << " core density : " << impCoreMass / (4.0 * math::pi / 3.0 * impCoreRadi * impCoreRadi * impCoreRadi * Corr * Corr * Corr) << std::endl;
std::cout << " mantle density : " << (impMass - impCoreMass) / (4.0 * math::pi / 3.0 * (impRadi * impRadi * impRadi - impCoreRadi * impCoreRadi * impCoreRadi)) << std::endl;
std::cout << " mean density : " << impMass / (4.0 * math::pi / 3.0 * impRadi * impRadi * impRadi) << std::endl;
std::cout << " velocity : " << impVel << std::endl;
std::cout << "Total:" << Nptcl << std::endl;
std::cout << "Tar-to-Imp mass ratio: " << (double)(impNmntl) / (double)(tarNmntl) << std::endl;
const int NptclIn1Node = Nptcl / PS::Comm::getNumberOfProc();
///////////////////
//Real put
///////////////////
PS::S32 id = 0;
switch (mode){
case 1:
std::cout << "creating target from tar.dat" << std::endl;
FILE * tarFile;
tarFile = fopen("input/tar.dat","r");
FileHeader tarheader;
int nptcltar;
nptcltar = tarheader.readAscii(tarFile);
std::cout << "num tar ptcl: " << nptcltar << std::endl;
for(int i=0; i<nptcltar; i++){
Ptcl ith;
ith.readAscii(tarFile);
if(ith.id / NptclIn1Node == PS::Comm::getRank()) tar.push_back(ith);
}
for(PS::U32 i = 0 ; i < tar.size() ; ++ i){
tar[i].mass /= (PS::F64)(Nptcl);
}
for(PS::U32 i = 0 ; i < tar.size() ; ++ i){
ptcl.push_back(tar[i]);
}
std::cout << "creating impactor from imp.dat" << std::endl;
FILE * impFile;
impFile = fopen("input/imp.dat","r");
FileHeader impheader;
int nptclimp;
nptclimp = impheader.readAscii(impFile);
std::cout << "num imp ptcl: " << nptclimp << std::endl;
for(int i=0; i<nptclimp; i++){
Ptcl ith;
ith.readAscii(impFile);
ith.vel.x += (-1) * impVel;
if(ith.id / NptclIn1Node == PS::Comm::getRank()) imp.push_back(ith);
}
for(PS::U32 i = 0 ; i < imp.size() ; ++ i){
imp[i].mass /= (PS::F64)(Nptcl);
}
for(PS::U32 i = 0 ; i < imp.size() ; ++ i){
ptcl.push_back(imp[i]);
}
break;
case 2:
//Put Tar.
std::cout << "creating target" << std::endl;
for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){
for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){
for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){
const PS::F64 r = sqrt(x*x + y*y + z*z) * UnitRadi;
if(r >= tarRadi || r <= tarCoreRadi) continue;
Ptcl ith;
ith.pos.x = UnitRadi * x;
ith.pos.y = UnitRadi * y;
ith.pos.z = UnitRadi * z;
ith.dens = (tarMass - tarCoreMass) / (4.0 / 3.0 * math::pi * (tarRadi * tarRadi * tarRadi - tarCoreRadi * tarCoreRadi * tarCoreRadi));
ith.mass = tarMass + impMass;
ith.eng = 0.1 * Grav * tarMass / tarRadi;
ith.id = id++;
// TODO: Modify this line for all particles that need new EoS
ith.setPressure(&AGranite);
ith.tag = 0;
if(ith.id / NptclIn1Node == PS::Comm::getRank()) tar.push_back(ith);
}
}
}
for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){
for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){
for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){
const PS::F64 r = tarCoreShrinkFactor * sqrt(x*x + y*y + z*z) * UnitRadi;
if(r >= Corr * tarCoreRadi) continue;
Ptcl ith;
ith.pos.x = tarCoreShrinkFactor * UnitRadi * x;
ith.pos.y = tarCoreShrinkFactor * UnitRadi * y;
ith.pos.z = tarCoreShrinkFactor * UnitRadi * z;
ith.dens = tarCoreMass / (4.0 / 3.0 * math::pi * tarCoreRadi * tarCoreRadi * tarCoreRadi * Corr * Corr * Corr);
ith.mass = tarMass + impMass;
ith.eng = 0.1 * Grav * tarMass / tarRadi;
ith.id = id++;
// TODO: Modify this line for all particles that need new EoS
ith.setPressure(&Iron);
ith.tag = 1;
if(ith.id / NptclIn1Node == PS::Comm::getRank()) tar.push_back(ith);
}
}
}
for(PS::U32 i = 0 ; i < tar.size() ; ++ i){
tar[i].mass /= (PS::F64)(Nptcl);
}
for(PS::U32 i = 0 ; i < tar.size() ; ++ i){
ptcl.push_back(tar[i]);
}
break;
case 3:
//imp
std::cout << "creating impactor" << std::endl;
for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){
for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){
for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){
const PS::F64 r = Expand * sqrt(x*x + y*y + z*z) * UnitRadi;
if(r >= impRadi || r <= impCoreRadi) continue;
Ptcl ith;
ith.pos.x = Expand * UnitRadi * x + offset;
ith.pos.y = Expand * UnitRadi * y;
ith.pos.z = Expand * UnitRadi * z;
ith.dens = (impMass - impCoreMass) / (4.0 / 3.0 * math::pi * (impRadi * impRadi * impRadi - impCoreRadi * impCoreRadi * impCoreRadi));
ith.mass = tarMass + impMass;
ith.eng = 0.1 * Grav * tarMass / tarRadi;
ith.id = id++;
// TODO: Modify this line for all particles that need new EoS
ith.setPressure(&AGranite);
ith.tag = 2;
if(ith.id / NptclIn1Node == PS::Comm::getRank()) imp.push_back(ith);
}
}
}
for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){
for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){
for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){
const PS::F64 r = Expand * impCoreShrinkFactor * sqrt(x*x + y*y + z*z) * UnitRadi;
if(r >= impCoreRadi) continue;
Ptcl ith;
ith.pos.x = Expand * impCoreShrinkFactor * UnitRadi * x + offset;
ith.pos.y = Expand * impCoreShrinkFactor * UnitRadi * y;
ith.pos.z = Expand * impCoreShrinkFactor * UnitRadi * z;
ith.dens = impCoreMass / (4.0 / 3.0 * math::pi * impCoreRadi * impCoreRadi * impCoreRadi * Corr * Corr * Corr);
ith.mass = tarMass + impMass;
ith.eng = 0.1 * Grav * tarMass / tarRadi;
ith.id = id++;
// TODO: Modify this line for all particles that need new EoS
ith.setPressure(&Iron);
ith.tag = 3;
if(ith.id / NptclIn1Node == PS::Comm::getRank()) imp.push_back(ith);
}
}
}
for(PS::U32 i = 0 ; i < imp.size() ; ++ i){
imp[i].mass /= (PS::F64)(Nptcl);
}
for(PS::U32 i = 0 ; i < imp.size() ; ++ i){
ptcl.push_back(imp[i]);
}
break;
}
const PS::S32 numPtclLocal = ptcl.size();
sph_system.setNumberOfParticleLocal(numPtclLocal);
for(PS::U32 i = 0 ; i < ptcl.size() ; ++ i){
sph_system[i] = ptcl[i];
}
//Fin.
std::cout << "# of ptcls = " << ptcl.size() << std::endl;
std::cout << "setup..." << std::endl;
}
static void setEoS(PS::ParticleSystem<Ptcl>& sph_system){
for(PS::U64 i = 0 ; i < sph_system.getNumberOfParticleLocal() ; ++ i){
// TODO: Modify the lines below for all particles that need new EoS
if(sph_system[i].tag % 2 == 0){
sph_system[i].setPressure(&AGranite);
}else{
sph_system[i].setPressure(&Iron);
}
}
}
static void addExternalForce(PS::ParticleSystem<Ptcl>& sph_system, system_t& sysinfo){
if(sysinfo.time >= 5000) return;
std::cout << "Add Ext. Force!!!" << std::endl;
#pragma omp parallel for
for(PS::S32 i = 0 ; i < sph_system.getNumberOfParticleLocal() ; ++ i){
sph_system[i].acc += - sph_system[i].vel * 0.05 / sph_system[i].dt;
}
}
};
|
GB_binop__pair_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_uint8)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_uint8)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint8_t
// A type: uint8_t
// A pattern? 1
// B type: uint8_t
// B pattern? 1
// BinaryOp: cij = 1
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_UINT8 || GxB_NO_PAIR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pair_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/compare.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/statistic.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImageChannels() compares one or more image channels of an image
% to a reconstructed image and returns the difference image.
%
% The format of the CompareImageChannels method is:
%
% Image *CompareImageChannels(const Image *image,
% const Image *reconstruct_image,const ChannelType channel,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o channel: the channel.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
Image
*highlight_image;
highlight_image=CompareImageChannels(image,reconstruct_image,
CompositeChannels,metric,distortion,exception);
return(highlight_image);
}
static size_t GetNumberChannels(const Image *image,const ChannelType channel)
{
size_t
channels;
channels=0;
if ((channel & RedChannel) != 0)
channels++;
if ((channel & GreenChannel) != 0)
channels++;
if ((channel & BlueChannel) != 0)
channels++;
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
channels++;
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
channels++;
return(channels == 0 ? 1UL : channels);
}
static inline MagickBooleanType ValidateImageMorphology(
const Image *magick_restrict image,
const Image *magick_restrict reconstruct_image)
{
/*
Does the image match the reconstructed image morphology?
*/
if (GetNumberChannels(image,DefaultChannels) !=
GetNumberChannels(reconstruct_image,DefaultChannels))
return(MagickFalse);
return(MagickTrue);
}
MagickExport Image *CompareImageChannels(Image *image,
const Image *reconstruct_image,const ChannelType channel,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
MagickPixelPacket
highlight,
lowlight,
zero;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
ThrowImageException(ImageError,"ImageMorphologyDiffers");
status=GetImageChannelDistortion(image,reconstruct_image,channel,metric,
distortion,exception);
if (status == MagickFalse)
return((Image *) NULL);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,(Image *) NULL);
difference_image=CloneImage(clone_image,0,0,MagickTrue,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
if (SetImageStorageClass(highlight_image,DirectClass) == MagickFalse)
{
InheritException(exception,&highlight_image->exception);
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,(Image *) NULL);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel);
(void) QueryMagickColor("#f1001ecc",&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryMagickColor(artifact,&highlight,exception);
(void) QueryMagickColor("#ffffffcc",&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryMagickColor(artifact,&lowlight,exception);
if (highlight_image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&lowlight);
}
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel,
reconstruct_pixel;
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register IndexPacket
*magick_restrict highlight_indexes;
register PixelPacket
*magick_restrict r;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
highlight_indexes=GetCacheViewAuthenticIndexQueue(highlight_view);
pixel=zero;
reconstruct_pixel=zero;
for (x=0; x < (ssize_t) columns; x++)
{
MagickStatusType
difference;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x,
&reconstruct_pixel);
difference=MagickFalse;
if (channel == CompositeChannels)
{
if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse)
difference=MagickTrue;
}
else
{
double
Da,
distance,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(q) :
(QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=Sa*GetPixelRed(p)-Da*GetPixelRed(q);
if ((distance*distance) > fuzz)
difference=MagickTrue;
}
if ((channel & GreenChannel) != 0)
{
distance=Sa*GetPixelGreen(p)-Da*GetPixelGreen(q);
if ((distance*distance) > fuzz)
difference=MagickTrue;
}
if ((channel & BlueChannel) != 0)
{
distance=Sa*GetPixelBlue(p)-Da*GetPixelBlue(q);
if ((distance*distance) > fuzz)
difference=MagickTrue;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=(double) GetPixelOpacity(p)-GetPixelOpacity(q);
if ((distance*distance) > fuzz)
difference=MagickTrue;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
distance=Sa*indexes[x]-Da*reconstruct_indexes[x];
if ((distance*distance) > fuzz)
difference=MagickTrue;
}
}
if (difference != MagickFalse)
SetPixelPacket(highlight_image,&highlight,r,highlight_indexes+x);
else
SetPixelPacket(highlight_image,&lowlight,r,highlight_indexes+x);
p++;
q++;
r++;
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,image->compose,highlight_image,0,0);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDistortion() compares one or more image channels of an image
% to a reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageChannelDistortion method is:
%
% MagickBooleanType GetImageChannelDistortion(const Image *image,
% const Image *reconstruct_image,const ChannelType channel,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o channel: the channel.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelDistortion(image,reconstruct_image,CompositeChannels,
metric,distortion,exception);
return(status);
}
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=MagickMin(GetNumberChannels(image,channel),
GetNumberChannels(reconstruct_image,channel))*
GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
distance,
pixel,
Sa;
MagickBooleanType
difference;
difference=MagickFalse;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(q) :
(QuantumRange-OpaqueOpacity));
distance=0.0;
if ((channel & RedChannel) != 0)
{
pixel=Sa*GetPixelRed(p)-Da*GetPixelRed(q);
distance+=pixel*pixel;
if (distance > fuzz)
{
channel_distortion[RedChannel]++;
difference=MagickTrue;
}
}
if ((channel & GreenChannel) != 0)
{
pixel=Sa*GetPixelGreen(p)-Da*GetPixelGreen(q);
distance+=pixel*pixel;
if (distance > fuzz)
{
channel_distortion[GreenChannel]++;
difference=MagickTrue;
}
}
if ((channel & BlueChannel) != 0)
{
pixel=Sa*GetPixelBlue(p)-Da*GetPixelBlue(q);
distance+=pixel*pixel;
if (distance > fuzz)
{
channel_distortion[BlueChannel]++;
difference=MagickTrue;
}
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=(double) GetPixelOpacity(p)-GetPixelOpacity(q);
distance+=pixel*pixel;
if (distance > fuzz)
{
channel_distortion[OpacityChannel]++;
difference=MagickTrue;
}
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=Sa*indexes[x]-Da*reconstruct_indexes[x];
distance+=pixel*pixel;
if (distance > fuzz)
{
channel_distortion[BlackChannel]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositeChannels]++;
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
channel_distortion[RedChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) ||
(reconstruct_image->matte != MagickFalse)))
{
distance=QuantumScale*((image->matte != MagickFalse ?
GetPixelOpacity(p) : OpaqueOpacity)-
(reconstruct_image->matte != MagickFalse ?
GetPixelOpacity(q): OpaqueOpacity));
channel_distortion[OpacityChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*(Sa*GetPixelIndex(indexes+x)-
Da*GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) columns*rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
channel_distortion[RedChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*fabs(GetPixelOpacity(p)-(double)
GetPixelOpacity(q));
channel_distortion[OpacityChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
distance=QuantumScale*fabs(Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) columns*rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
MagickRealType
area,
gamma,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=fabs(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
distortion[RedChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((channel & GreenChannel) != 0)
{
distance=fabs(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
distortion[GreenChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((channel & BlueChannel) != 0)
{
distance=fabs(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
distortion[BlueChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=fabs((double) GetPixelOpacity(p)-
GetPixelOpacity(q));
distortion[OpacityChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=fabs(Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x));
distortion[BlackChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p++;
q++;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*distortion[CompositeChannels];
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
channel_distortion[RedChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*(GetPixelOpacity(p)-(MagickRealType)
GetPixelOpacity(q));
channel_distortion[OpacityChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*(Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) columns*rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
area;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageChannelStatistics(image,exception);
reconstruct_statistics=GetImageChannelStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=1.0/((MagickRealType) columns*rows);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
distortion[RedChannel]+=area*QuantumScale*(Sa*GetPixelRed(p)-
image_statistics[RedChannel].mean)*(Da*GetPixelRed(q)-
reconstruct_statistics[RedChannel].mean);
if ((channel & GreenChannel) != 0)
distortion[GreenChannel]+=area*QuantumScale*(Sa*GetPixelGreen(p)-
image_statistics[GreenChannel].mean)*(Da*GetPixelGreen(q)-
reconstruct_statistics[GreenChannel].mean);
if ((channel & BlueChannel) != 0)
distortion[BlueChannel]+=area*QuantumScale*(Sa*GetPixelBlue(p)-
image_statistics[BlueChannel].mean)*(Da*GetPixelBlue(q)-
reconstruct_statistics[BlueChannel].mean);
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
distortion[OpacityChannel]+=area*QuantumScale*(
GetPixelOpacity(p)-image_statistics[OpacityChannel].mean)*
(GetPixelOpacity(q)-reconstruct_statistics[OpacityChannel].mean);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
distortion[BlackChannel]+=area*QuantumScale*(Sa*
GetPixelIndex(indexes+x)-image_statistics[BlackChannel].mean)*(Da*
GetPixelIndex(reconstruct_indexes+x)-
reconstruct_statistics[BlackChannel].mean);
p++;
q++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
double
gamma;
gamma=image_statistics[i].standard_deviation*
reconstruct_statistics[i].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
}
distortion[CompositeChannels]=0.0;
if ((channel & RedChannel) != 0)
distortion[CompositeChannels]+=distortion[RedChannel]*
distortion[RedChannel];
if ((channel & GreenChannel) != 0)
distortion[CompositeChannels]+=distortion[GreenChannel]*
distortion[GreenChannel];
if ((channel & BlueChannel) != 0)
distortion[CompositeChannels]+=distortion[BlueChannel]*
distortion[BlueChannel];
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
distortion[CompositeChannels]+=distortion[OpacityChannel]*
distortion[OpacityChannel];
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
distortion[CompositeChannels]+=distortion[BlackChannel]*
distortion[BlackChannel];
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]/
GetNumberChannels(image,channel));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
if (distance > channel_distortion[RedChannel])
channel_distortion[RedChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
if (distance > channel_distortion[GreenChannel])
channel_distortion[GreenChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
if (distance > channel_distortion[BlueChannel])
channel_distortion[BlueChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*fabs(GetPixelOpacity(p)-(double)
GetPixelOpacity(q));
if (distance > channel_distortion[OpacityChannel])
channel_distortion[OpacityChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*fabs(Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x));
if (distance > channel_distortion[BlackChannel])
channel_distortion[BlackChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
if (channel_distortion[i] > distortion[i])
distortion[i]=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion,
exception);
if ((channel & RedChannel) != 0)
{
if (fabs(distortion[RedChannel]) < MagickEpsilon)
distortion[RedChannel]=INFINITY;
else
distortion[RedChannel]=20.0*MagickLog10(1.0/
sqrt(distortion[RedChannel]));
}
if ((channel & GreenChannel) != 0)
{
if (fabs(distortion[GreenChannel]) < MagickEpsilon)
distortion[GreenChannel]=INFINITY;
else
distortion[GreenChannel]=20.0*MagickLog10(1.0/
sqrt(distortion[GreenChannel]));
}
if ((channel & BlueChannel) != 0)
{
if (fabs(distortion[BlueChannel]) < MagickEpsilon)
distortion[BlueChannel]=INFINITY;
else
distortion[BlueChannel]=20.0*MagickLog10(1.0/
sqrt(distortion[BlueChannel]));
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
if (fabs(distortion[OpacityChannel]) < MagickEpsilon)
distortion[OpacityChannel]=INFINITY;
else
distortion[OpacityChannel]=20.0*MagickLog10(1.0/
sqrt(distortion[OpacityChannel]));
}
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
if (fabs(distortion[BlackChannel]) < MagickEpsilon)
distortion[BlackChannel]=INFINITY;
else
distortion[BlackChannel]=20.0*MagickLog10(1.0/
sqrt(distortion[BlackChannel]));
}
if (fabs(distortion[CompositeChannels]) >= MagickEpsilon)
{
if (fabs(distortion[CompositeChannels]) < MagickEpsilon)
distortion[CompositeChannels]=INFINITY;
else
distortion[CompositeChannels]=20.0*MagickLog10(1.0/
sqrt(distortion[CompositeChannels]));
}
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*image_phash,
*reconstruct_phash;
double
difference;
register ssize_t
i;
/*
Compute perceptual hash in the sRGB colorspace.
*/
image_phash=GetImageChannelPerceptualHash(image,exception);
if (image_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImageChannelPerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
image_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(image_phash);
return(MagickFalse);
}
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
/*
Compute sum of moment differences squared.
*/
if ((channel & RedChannel) != 0)
{
difference=reconstruct_phash[RedChannel].P[i]-
image_phash[RedChannel].P[i];
distortion[RedChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & GreenChannel) != 0)
{
difference=reconstruct_phash[GreenChannel].P[i]-
image_phash[GreenChannel].P[i];
distortion[GreenChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & BlueChannel) != 0)
{
difference=reconstruct_phash[BlueChannel].P[i]-
image_phash[BlueChannel].P[i];
distortion[BlueChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse) &&
(reconstruct_image->matte != MagickFalse))
{
difference=reconstruct_phash[OpacityChannel].P[i]-
image_phash[OpacityChannel].P[i];
distortion[OpacityChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
difference=reconstruct_phash[IndexChannel].P[i]-
image_phash[IndexChannel].P[i];
distortion[IndexChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
}
/*
Compute perceptual hash in the HCLP colorspace.
*/
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
/*
Compute sum of moment differences squared.
*/
if ((channel & RedChannel) != 0)
{
difference=reconstruct_phash[RedChannel].Q[i]-
image_phash[RedChannel].Q[i];
distortion[RedChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & GreenChannel) != 0)
{
difference=reconstruct_phash[GreenChannel].Q[i]-
image_phash[GreenChannel].Q[i];
distortion[GreenChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & BlueChannel) != 0)
{
difference=reconstruct_phash[BlueChannel].Q[i]-
image_phash[BlueChannel].Q[i];
distortion[BlueChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse) &&
(reconstruct_image->matte != MagickFalse))
{
difference=reconstruct_phash[OpacityChannel].Q[i]-
image_phash[OpacityChannel].Q[i];
distortion[OpacityChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
difference=reconstruct_phash[IndexChannel].Q[i]-
image_phash[IndexChannel].Q[i];
distortion[IndexChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
image_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(image_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion,
exception);
if ((channel & RedChannel) != 0)
distortion[RedChannel]=sqrt(distortion[RedChannel]);
if ((channel & GreenChannel) != 0)
distortion[GreenChannel]=sqrt(distortion[GreenChannel]);
if ((channel & BlueChannel) != 0)
distortion[BlueChannel]=sqrt(distortion[BlueChannel]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
distortion[OpacityChannel]=sqrt(distortion[OpacityChannel]);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
distortion[BlackChannel]=sqrt(distortion[BlackChannel]);
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]);
return(status);
}
MagickExport MagickBooleanType GetImageChannelDistortion(Image *image,
const Image *reconstruct_image,const ChannelType channel,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
ThrowBinaryException(ImageError,"ImageMorphologyDiffers",image->filename);
/*
Get image distortion.
*/
length=CompositeChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel,channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositeChannels];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDistortions() compares the image channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageChannelDistortions method is:
%
% double *GetImageChannelDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageChannelDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ImageMorphologyDiffers","`%s'",image->filename);
return((double *) NULL);
}
/*
Get image distortion.
*/
length=CompositeChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case MeanErrorPerPixelMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(Image *image,
% const Image *reconstruct_image)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
*/
MagickExport MagickBooleanType IsImagesEqual(Image *image,
const Image *reconstruct_image)
{
CacheView
*image_view,
*reconstruct_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickRealType
area,
gamma,
maximum_error,
mean_error,
mean_error_per_pixel;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
exception=(&image->exception);
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
ThrowBinaryException(ImageError,"ImageMorphologyDiffers",image->filename);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance;
distance=fabs(GetPixelRed(p)-(double) GetPixelRed(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
distance=fabs(GetPixelGreen(p)-(double) GetPixelGreen(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
distance=fabs(GetPixelBlue(p)-(double) GetPixelBlue(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
if (image->matte != MagickFalse)
{
distance=fabs(GetPixelOpacity(p)-(double) GetPixelOpacity(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=fabs(GetPixelIndex(indexes+x)-(double)
GetPixelIndex(reconstruct_indexes+x));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p++;
q++;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*mean_error_per_pixel;
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
(void) status;
similarity_image=DestroyImage(similarity_image);
return(distortion);
}
MagickExport Image *SimilarityImage(Image *image,const Image *reference,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
Image
*similarity_image;
similarity_image=SimilarityMetricImage(image,reference,
RootMeanSquaredErrorMetric,offset,similarity_metric,exception);
return(similarity_image);
}
MagickExport Image *SimilarityMetricImage(Image *image,const Image *reference,
const MetricType metric,RectangleInfo *offset,double *similarity_metric,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
const char
*artifact;
double
similarity_threshold;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
if (ValidateImageMorphology(image,reference) == MagickFalse)
ThrowImageException(ImageError,"ImageMorphologyDiffers");
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(similarity_image,DirectClass) == MagickFalse)
{
InheritException(exception,&similarity_image->exception);
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel);
/*
Measure similarity of reference image against image.
*/
similarity_threshold=(-1.0);
artifact=GetImageArtifact(image,"compare:similarity-threshold");
if (artifact != (const char *) NULL)
similarity_threshold=StringToDouble(artifact,(char **) NULL);
status=MagickTrue;
progress=0;
similarity_view=AcquireVirtualCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
*similarity_metric=similarity;
offset->x=x;
offset->y=y;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
SetPixelRed(q,ClampToQuantum(QuantumRange-QuantumRange*similarity));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
proceed=SetImageProgress(image,SimilarityImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
read_ascii.c | /*******************************************************************************
* read_ascii.c: this file is part of the FCFC program.
* FCFC: Fast Correlation Function Calculator.
* Github repository:
https://github.com/cheng-zhao/FCFC
* Copyright (c) 2020 -- 2021 Cheng Zhao <zhaocheng03@gmail.com> [MIT license]
*******************************************************************************/
#include "read_file.h"
#include "ascii_fmtr.h"
#include "libast.h"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <ctype.h>
#ifdef OMP
#include <omp.h>
#endif
/*============================================================================*\
Definitions for libast compatible data types
\*============================================================================*/
#define AST_DTYPE_NULL 0 /* not supported by libast */
#define AST_DTYPE_STR_SPACE (-AST_DTYPE_STRING) /* string with spaces */
#define AST_DTYPE(x) ((x < 0) ? -(unsigned)(x) : x) /* abs(x) */
/*============================================================================*\
Macros for error handling
\*============================================================================*/
#define P_AST_ERR(ast) ast_perror(ast, stderr, FMT_ERR);
#define CLEAN_PTR_NOWT \
ast_destroy(ast_pos[0]); ast_destroy(ast_pos[1]); \
ast_destroy(ast_pos[2]); ast_destroy(ast_sel); \
ascii_arg_destroy(arg, nc); free(col); \
if (chunk) free(chunk); \
if (fp) fclose(fp); \
if (dat) free(dat);
#ifdef FCFC_DATA_WEIGHT
#define CLEAN_PTR {CLEAN_PTR_NOWT; ast_destroy(ast_wt);}
#else
#define CLEAN_PTR {CLEAN_PTR_NOWT}
#endif
#ifdef OMP
#define FCFC_QUIT(x) { \
printf(FMT_FAIL); \
P_EXT("failed to read the ASCII file\n"); \
exit(x); \
}
#endif
/*============================================================================*\
Data structure for ASCII columns
\*============================================================================*/
/* Structure for recording libast compatible data types. */
typedef struct {
int dtype; /* AST_DTYPE_NULL or libast compatible data type */
union { /* variable for storing the value */
int ival; long lval; float fval; double dval;
struct ast_string_struct_t { int len; const char *str; } sval;
} v;
} asc_col_t;
/*============================================================================*\
Functions for parsing ASCII columns
\*============================================================================*/
/******************************************************************************
Function `ascii_col_init`:
Initialise columns according to the arguments parsed from the formatter.
Arguments:
* `arg`: arguments parsed from the formatter;
* `num`: number of parsed arguments;
* `rnum`: number of arguments that are not suppressed.
Return:
Pointer to the structure array for ASCII columns on success; NULL on error.
******************************************************************************/
static asc_col_t *ascii_col_init(asc_arg_t *arg, const int num,
const int rnum) {
if (rnum <= 0) return NULL;
asc_col_t *col = malloc(rnum * sizeof(asc_col_t));
if (!col) return NULL;
int j = -1;
for (int i = 0; i < num; i++) {
if (arg[i].dtype == ASCII_DTYPE_SKIP) continue;
if (++j >= rnum) {
P_ERR("unknown error for identifying ASCII columns\n");
free(col);
return NULL;
}
/* Convert the `fscanf` type to libast type. */
switch (arg[i].dtype) {
case ASCII_DTYPE_INT: col[j].dtype = AST_DTYPE_INT; break;
case ASCII_DTYPE_LONG: col[j].dtype = AST_DTYPE_LONG; break;
case ASCII_DTYPE_FLT: col[j].dtype = AST_DTYPE_FLOAT; break;
case ASCII_DTYPE_DBL: col[j].dtype = AST_DTYPE_DOUBLE; break;
case ASCII_DTYPE_STR: col[j].dtype = AST_DTYPE_STRING; break;
case ASCII_DTYPE_CHAR: col[j].dtype = AST_DTYPE_STRING; break;
default: /* data types that are not supported by libast */
col[j].dtype = AST_DTYPE_NULL;
P_WRN("unsupported formatter `%s', "
"the corresponding column is not used\n", arg[i].fmtr);
break;
}
/* Add a suppressing symbol '*' to the formatter string. */
if (col[j].dtype == AST_DTYPE_NULL || col[j].dtype == AST_DTYPE_STRING) {
int len = strlen(arg[i].fmtr) + 1; /* fmtr is null terminated */
char *tmp = realloc(arg[i].fmtr, (len + 1) * sizeof(char));
if (!tmp) {
free(col);
return NULL;
}
arg[i].fmtr = tmp;
/* Parse the formatter string to find the right place for '*'. */
char *fmt = tmp;
while (*fmt) {
char c = *fmt++;
if (c != '%') continue;
c = *fmt++;
if (c == '%') continue;
/* Check if the formatter parses whitespaces. */
if (col[j].dtype == AST_DTYPE_STRING && fmt - tmp > 2 && !isspace(*tmp))
col[j].dtype = AST_DTYPE_STR_SPACE;
/* Now add '*'. */
memmove(fmt, fmt - 1, tmp + len - fmt + 1);
*(fmt - 1) = '*';
/* The formatter can still parse whitespaces with '[]'. */
if (col[j].dtype == AST_DTYPE_STRING) {
while (c != '[' && c != '\0') c = *fmt++;
if (c == '[') {
if (*fmt == '^') ++fmt;
if (*fmt == ']') ++fmt;
while ((c = *fmt++) != '\0' && c != ']' && c != ' ');
if (c == ' ') col[j].dtype = AST_DTYPE_STR_SPACE;
}
}
break;
}
}
}
return col;
}
/******************************************************************************
Function `ascii_read_line`:
Read a string line to column variables.
Arguments:
* `line`: the line to be read;
* `arg`: arguments parsed from the formatter;
* `num`: number of columns to be read;
* `col`: variables and their data types for each column;
* `end`: pointer to the first character that is not interpreted.
Return:
Zero on success; non-zero on error.
******************************************************************************/
static int ascii_read_line(const char *line, const asc_arg_t *arg,
const int num, asc_col_t *col, const char **end) {
int j = 0;
for (int i = 0; i < num; i++) {
int n = 0;
/* User-suppressed columns. */
if (arg[i].dtype == ASCII_DTYPE_SKIP) {
if (sscanf(line, arg[i].fmtr, &n) != 0 || n == 0) {
*end = line;
return FCFC_ERR_ASCII;
}
}
else {
switch (col[j].dtype) {
case AST_DTYPE_NULL:
/* Suppressed columns due to compatibility with libast. */
if (sscanf(line, arg[i].fmtr, &n) != 0 || n == 0) {
*end = line;
return FCFC_ERR_ASCII;
}
break;
case AST_DTYPE_INT:
if (sscanf(line, arg[i].fmtr, &(col[j].v.ival), &n) != 1 || n == 0) {
*end = line;
return FCFC_ERR_ASCII;
}
break;
case AST_DTYPE_LONG:
if (sscanf(line, arg[i].fmtr, &(col[j].v.lval), &n) != 1 || n == 0) {
*end = line;
return FCFC_ERR_ASCII;
}
break;
case AST_DTYPE_FLOAT:
if (sscanf(line, arg[i].fmtr, &(col[j].v.fval), &n) != 1 || n == 0) {
*end = line;
return FCFC_ERR_ASCII;
}
break;
case AST_DTYPE_DOUBLE:
if (sscanf(line, arg[i].fmtr, &(col[j].v.dval), &n) != 1 || n == 0) {
*end = line;
return FCFC_ERR_ASCII;
}
break;
case AST_DTYPE_STRING:
while (isspace(*line)) line++; /* omit whitespaces */
case AST_DTYPE_STR_SPACE:
/* Do not actually save the string, but compute the position. */
if (sscanf(line, arg[i].fmtr, &n) != 0 || n == 0) {
*end = line;
return FCFC_ERR_ASCII;
}
col[j].v.sval.str = line;
col[j].v.sval.len = n;
break;
default:
P_ERR("unknown data type for column: ${%d}\n", j + 1);
*end = line;
return FCFC_ERR_UNKNOWN;
}
j++;
}
line += n;
}
return 0;
}
/******************************************************************************
Function `ascii_read_sel`:
Read the selection criteria based on the AST and ASCII columns.
Arguments:
* `ast`: abstract syntax tree for the selection criteria;
* `col`: columns to be parsed;
* `res`: the result.
Return:
Zero on success; non-zero on error.
******************************************************************************/
static inline int ascii_read_sel(ast_t *ast, const asc_col_t *col, bool *res) {
for (int i = 0; i < ast->nvar; i++) {
long idx = ast->vidx[i];
const asc_col_t *c = col + (idx - 1);
switch (AST_DTYPE(c->dtype)) {
case AST_DTYPE_INT:
if (ast_set_var(ast, idx, &c->v.ival, 0, AST_DTYPE_INT)) {
P_AST_ERR(ast);
return FCFC_ERR_AST;
}
break;
case AST_DTYPE_LONG:
if (ast_set_var(ast, idx, &c->v.lval, 0, AST_DTYPE_LONG)) {
P_AST_ERR(ast);
return FCFC_ERR_AST;
}
break;
case AST_DTYPE_FLOAT:
if (ast_set_var(ast, idx, &c->v.fval, 0, AST_DTYPE_FLOAT)) {
P_AST_ERR(ast);
return FCFC_ERR_AST;
}
break;
case AST_DTYPE_DOUBLE:
if (ast_set_var(ast, idx, &c->v.dval, 0, AST_DTYPE_DOUBLE)) {
P_AST_ERR(ast);
return FCFC_ERR_AST;
}
break;
case AST_DTYPE_STRING:
if (ast_set_var(ast, idx, c->v.sval.str, c->v.sval.len,
AST_DTYPE_STRING)) {
P_AST_ERR(ast);
return FCFC_ERR_AST;
}
break;
default:
P_ERR("column ${%ld} not appropriate for selection\n", idx);
return FCFC_ERR_AST;
}
}
bool val = false;
if (ast_eval(ast, &val)) {
P_AST_ERR(ast);
return FCFC_ERR_AST;
}
*res = val;
return 0;
}
/******************************************************************************
Function `ascii_read_real`:
Read a floating-point number based on the AST and ASCII columns.
Arguments:
* `ast`: abstract syntax tree for the number evaluation;
* `col`: columns to be parsed;
* `res`: the result.
Return:
Zero on success; non-zero on error.
******************************************************************************/
static inline int ascii_read_real(ast_t *ast, const asc_col_t *col, real *res) {
for (int i = 0; i < ast->nvar; i++) {
long idx = ast->vidx[i];
const asc_col_t *c = col + (idx - 1);
switch (AST_DTYPE(c->dtype)) {
case AST_DTYPE_INT:
if (ast_set_var(ast, idx, &c->v.ival, 0, AST_DTYPE_INT)) {
P_AST_ERR(ast);
return FCFC_ERR_AST;
}
break;
case AST_DTYPE_LONG:
if (ast_set_var(ast, idx, &c->v.lval, 0, AST_DTYPE_LONG)) {
P_AST_ERR(ast);
return FCFC_ERR_AST;
}
break;
case AST_DTYPE_FLOAT:
if (ast_set_var(ast, idx, &c->v.fval, 0, AST_DTYPE_FLOAT)) {
P_AST_ERR(ast);
return FCFC_ERR_AST;
}
break;
case AST_DTYPE_DOUBLE:
if (ast_set_var(ast, idx, &c->v.dval, 0, AST_DTYPE_DOUBLE)) {
P_AST_ERR(ast);
return FCFC_ERR_AST;
}
break;
default:
P_ERR("column ${%ld} not appropriate for the numerical evaluation\n",
idx);
return FCFC_ERR_AST;
}
}
real val = 0;
if (ast_eval(ast, &val)) {
P_AST_ERR(ast);
return FCFC_ERR_AST;
}
*res = val;
return 0;
}
/*============================================================================*\
Functions for reading file by chunks
\*============================================================================*/
/******************************************************************************
Function `chunk_resize`:
Enlarge the size of a chunk.
Arguments:
* `chunk`: address of the chunk;
* `size`: size of the chunk.
Return:
Zero on success; non-zero on error.
******************************************************************************/
static inline int chunk_resize(char **chunk, size_t *size) {
/* Assume the arguments are not NULL. */
size_t num;
if (!(*chunk)) num = FCFC_FILE_CHUNK;
else {
if (FCFC_MAX_CHUNK / 2 < *size) return FCFC_ERR_FILE;
num = *size << 1;
}
char *tmp = realloc(*chunk, num * sizeof(char));
if (!tmp) return FCFC_ERR_MEMORY;
*chunk = tmp;
*size = num;
return 0;
}
/******************************************************************************
Function `read_ascii_table`:
Read the first two columns of an ASCII file as double arrays.
Arguments:
* `fname`: filename of the input catalog;
* `x`: array for the first column;
* `y`: array for the second column;
* `num`: number of lines read successfully.
Return:
Zero on success; non-zero on error.
******************************************************************************/
int read_ascii_table(const char *fname, real **x, real **y, size_t *num) {
/* Open the file for reading. */
FILE *fp;
if (!(fp = fopen(fname, "r"))) {
P_ERR("cannot open file for reading: `%s'\n", fname);
return FCFC_ERR_FILE;
}
/* Prepare for the chunk. */
char *chunk = NULL;
size_t csize = 0;
if (chunk_resize(&chunk, &csize)) {
P_ERR("failed to allocate memory for reading the file by chunks\n");
fclose(fp);
return FCFC_ERR_MEMORY;
}
/* Allocate memory for the data. */
size_t max = FCFC_DATA_INIT_NUM;
real *nx, *ny;
nx = ny = NULL;
if (!(nx = malloc(max * sizeof(real))) ||
!(ny = malloc(max * sizeof(real)))) {
P_ERR("failed to allocate memory for the samples\n");
fclose(fp); free(chunk);
if (nx) free(nx);
if (ny) free(ny);
return FCFC_ERR_MEMORY;
}
size_t n, nread, nrest;
n = nrest = 0;
/* Start reading the file by chunk. */
while ((nread = fread(chunk + nrest, sizeof(char), csize - nrest, fp))) {
char *p = chunk;
char *end = p + nrest + nread;
char *endl;
if (nread < csize - nrest) *end = '\n'; /* append '\n' to last line */
/* Process lines in the chunk. */
while ((endl = memchr(p, '\n', end - p))) {
*endl = '\0'; /* replace '\n' by string terminator '\0' */
while (isspace(*p)) ++p; /* omit leading whitespaces */
if (*p == FCFC_READ_COMMENT || *p == '\0') { /* comment or empty */
p = endl + 1;
continue;
}
/* Parse the line. */
if (sscanf(p, REAL_FMT " " REAL_FMT, nx + n, ny + n) != 2) {
P_ERR("failed to read line: %s\n", p);
fclose(fp); free(chunk); free(nx); free(ny);
return FCFC_ERR_FILE;
}
/* Enlarge the memory for the data if necessary. */
if (++n >= max) {
if (SIZE_MAX / 2 < max) {
P_ERR("too many samples in the file: `%s'\n", fname);
fclose(fp); free(chunk); free(nx); free(ny);
return FCFC_ERR_FILE;
}
max <<= 1;
double *tmp = realloc(nx, sizeof(double) * max);
if (!tmp) {
P_ERR("failed to allocate memory for the samples\n");
fclose(fp); free(chunk); free(nx); free(ny);
return FCFC_ERR_MEMORY;
}
nx = tmp;
tmp = realloc(ny, sizeof(double) * max);
if (!tmp) {
P_ERR("failed to allocate memory for the samples\n");
fclose(fp); free(chunk); free(nx); free(ny);
return FCFC_ERR_MEMORY;
}
ny = tmp;
}
/* Continue with the next line. */
p = endl + 1;
}
/* The chunk cannot hold a full line. */
if (p == chunk) {
if (chunk_resize(&chunk, &csize)) {
P_ERR("failed to allocate memory for reading the file by chunk\n");
fclose(fp); free(chunk); free(nx); free(ny);
return FCFC_ERR_MEMORY;
}
nrest += nread;
continue;
}
/* Copy the remaining characters to the beginning of the chunk. */
nrest = end - p;
memmove(chunk, p, nrest);
}
if (!feof(fp)) {
P_ERR("unexpected end of file: `%s'\n", fname);
fclose(fp); free(chunk); free(nx); free(ny);
return FCFC_ERR_FILE;
}
free(chunk);
if (fclose(fp)) P_WRN("failed to close file: `%s'\n", fname);
*x = nx;
*y = ny;
*num = n;
return 0;
}
/******************************************************************************
Function `read_ascii_data`:
Read an ASCII file for the positions and weights.
Arguments:
* `fname`: filename of the input catalog;
* `skip`: number of lines to be skipped before reading positions;
* `comment`: character indicating the beginning of a comment line;
* `fmtr`: formatter string for `sscanf`;
* `pos`: columns of the positions;
* `wt`: weight for pair counts;
* `sel`: data selection criteria;
* `data`: address of the structure for storing positions;
* `num`: number of lines read successfully;
* `verb`: indicate whether to show detailed standard outputs.
Return:
Zero on success; non-zero on error.
******************************************************************************/
int read_ascii_data(const char *fname, const size_t skip, const char comment,
const char *fmtr, char *const *pos, const char *wt, const char *sel,
DATA **data, size_t *num, const int verb) {
/* Parse the formatter. */
asc_arg_t *arg;
int nc, rnc;
nc = rnc = 0;
if (!(arg = parse_ascii_fmtr(fmtr, &nc, &rnc))) return FCFC_ERR_ASCII;
if (!rnc) {
P_ERR("no column to be read given the formatter: `%s'\n", fmtr);
return FCFC_ERR_ASCII;
}
if (rnc < 3) P_WRN("reading coordinates from less than 3 columns\n");
/* Record libast compatible columns. */
asc_col_t *col;
if (!(col = ascii_col_init(arg, nc, rnc))) {
ascii_arg_destroy(arg, nc);
return FCFC_ERR_ASCII;
}
/* Initialise all variables for easy error handling. */
FILE *fp = NULL;
char *chunk = NULL;
DATA *dat = NULL;
ast_t *ast_pos[3] = {NULL, NULL, NULL};
ast_t *ast_sel = NULL;
#ifdef FCFC_DATA_WEIGHT
ast_t *ast_wt = NULL;
#else
(void) wt;
#endif
/* Construct the ASTs for positions and selections. */
for (int i = 0; i < 3; i++) {
if (!(ast_pos[i] = ast_init())) {
P_AST_ERR(ast_pos[i]); CLEAN_PTR; return FCFC_ERR_AST;
}
if (ast_build(ast_pos[i], pos[i], FCFC_DTYPE_DATA, true)) {
P_AST_ERR(ast_pos[i]); CLEAN_PTR; return FCFC_ERR_AST;
}
}
if (sel && *sel) {
if (!(ast_sel = ast_init())) {
P_AST_ERR(ast_sel); CLEAN_PTR; return FCFC_ERR_AST;
}
if (ast_build(ast_sel, sel, AST_DTYPE_BOOL, true)) {
P_AST_ERR(ast_sel); CLEAN_PTR; return FCFC_ERR_AST;
}
}
#ifdef FCFC_DATA_WEIGHT
/* Construct the ASTs for weights. */
if (wt && *wt) {
if (!(ast_wt = ast_init())) {
P_AST_ERR(ast_wt); CLEAN_PTR; return FCFC_ERR_AST;
}
if (ast_build(ast_wt, wt, FCFC_DTYPE_DATA, true)) {
P_AST_ERR(ast_wt); CLEAN_PTR; return FCFC_ERR_AST;
}
}
#endif
/* Check number of variables for expressions. */
int max_col = 0;
for (int i = 0; i < 3; i++) {
if (ast_pos[i]->nvar == 0) {
P_ERR("the expression for position coordinate %d is a constant: `%s'\n",
i + 1, pos[i]);
CLEAN_PTR; return FCFC_ERR_CFG;
}
if (rnc < ast_pos[i]->vidx[ast_pos[i]->nvar - 1]) {
P_ERR("not enough columns for position coordinate %d: `%s'\n",
i + 1, pos[i]);
CLEAN_PTR; return FCFC_ERR_CFG;
}
if (max_col < ast_pos[i]->vidx[ast_pos[i]->nvar - 1])
max_col = ast_pos[i]->vidx[ast_pos[i]->nvar - 1];
}
if (ast_sel) {
if (ast_sel->nvar == 0) {
P_ERR("the expression for data selection is a constant: `%s'\n", sel);
CLEAN_PTR; return FCFC_ERR_CFG;
}
if (rnc < ast_sel->vidx[ast_sel->nvar - 1]) {
P_ERR("not enough columns for data selection: `%s'\n", sel);
CLEAN_PTR; return FCFC_ERR_CFG;
}
if (max_col < ast_sel->vidx[ast_sel->nvar - 1])
max_col = ast_sel->vidx[ast_sel->nvar - 1];
}
#ifdef FCFC_DATA_WEIGHT
if (ast_wt) {
if (ast_wt->nvar && rnc < ast_wt->vidx[ast_wt->nvar - 1]) {
P_ERR("not enough columns for weight: `%s'\n", wt);
CLEAN_PTR; return FCFC_ERR_CFG;
}
if (ast_wt->nvar && max_col < ast_wt->vidx[ast_wt->nvar - 1])
max_col = ast_wt->vidx[ast_wt->nvar - 1];
}
#endif
/* Remove columns that are not needed. */
if (max_col < rnc) {
int i, j;
for (i = j = 0; i < nc; i++) {
if (arg[i].dtype != ASCII_DTYPE_SKIP) {
if (++j == max_col) break;
}
}
for (j = i + 1; j < nc; j++) if(arg[j].fmtr) free(arg[j].fmtr);
nc = i + 1;
rnc = max_col;
}
/* Prepare for the chunk. */
size_t csize = 0;
if (chunk_resize(&chunk, &csize)) {
P_ERR("failed to allocate memory for reading the file by chunk\n");
CLEAN_PTR; return FCFC_ERR_MEMORY;
}
/* Allocate memory for the data. */
size_t max = FCFC_DATA_INIT_NUM;
if (!(dat = malloc(max * sizeof(DATA)))) {
P_ERR("failed to allocate memory for the data\n");
CLEAN_PTR; return FCFC_ERR_MEMORY;
}
/* Dynamic allocations for OpenMP threads. */
#ifdef OMP
const int nomp = omp_get_max_threads();
/* Construct the ASCII columns for non-master threads. */
asc_col_t **pcol = NULL;
if (nomp > 1) {
if (!(pcol = malloc(sizeof(asc_col_t *) * (nomp - 1)))) {
P_ERR("failed to allocate memory for thread-private columns\n");
FCFC_QUIT(FCFC_ERR_MEMORY);
}
for (int j = 0; j < nomp - 1; j++) {
if (!(pcol[j] = malloc(sizeof(asc_col_t) * rnc))) {
P_ERR("failed to allocate memory for thread-private columns\n");
FCFC_QUIT(FCFC_ERR_MEMORY);
}
memcpy(pcol[j], col, sizeof(asc_col_t) * rnc);
}
}
/* Construct the ASTs again for non-master threads. */
ast_t **ast_ppos, **ast_psel;
ast_ppos = ast_psel = NULL;
#ifdef FCFC_DATA_WEIGHT
ast_t **ast_pwt = NULL;
#endif
if (nomp > 1) {
ast_ppos = malloc(sizeof(ast_t *) * (nomp - 1) * 3);
ast_psel = malloc(sizeof(ast_t *) * (nomp - 1));
if (!ast_ppos || !ast_psel) {
P_ERR("failed to allocate memory for thread-private ASTs\n");
FCFC_QUIT(FCFC_ERR_MEMORY);
}
#ifdef FCFC_DATA_WEIGHT
if (!(ast_pwt = malloc(sizeof(ast_t *) * (nomp - 1)))) {
P_ERR("failed to allocate memory for thread-private ASTs\n");
FCFC_QUIT(FCFC_ERR_MEMORY);
}
#endif
for (int j = 0; j < nomp - 1; j++) {
/* Positions. */
for (int i = 0; i < 3; i++) {
int k = j * 3 + i;
if (!(ast_ppos[k] = ast_init())) {
P_AST_ERR(ast_ppos[k]); FCFC_QUIT(FCFC_ERR_AST);
}
if (ast_build(ast_ppos[k], pos[i], FCFC_DTYPE_DATA, true)) {
P_AST_ERR(ast_ppos[k]); FCFC_QUIT(FCFC_ERR_AST);
}
}
/* Selections. */
if (sel && *sel) {
if (!(ast_psel[j] = ast_init())) {
P_AST_ERR(ast_psel[j]); FCFC_QUIT(FCFC_ERR_AST);
}
if (ast_build(ast_psel[j], sel, AST_DTYPE_BOOL, true)) {
P_AST_ERR(ast_psel[j]); FCFC_QUIT(FCFC_ERR_AST);
}
}
else ast_psel[j] = NULL;
#ifdef FCFC_DATA_WEIGHT
/* Weights. */
if (wt && *wt) {
if (!(ast_pwt[j] = ast_init())) {
P_AST_ERR(ast_pwt[j]); FCFC_QUIT(FCFC_ERR_AST);
}
if (ast_build(ast_pwt[j], wt, FCFC_DTYPE_DATA, true)) {
P_AST_ERR(ast_pwt[j]); FCFC_QUIT(FCFC_ERR_AST);
}
}
else ast_pwt[j] = NULL;
#endif
}
}
/* Construct the private data pool. */
DATA **pdata = malloc(sizeof(DATA *) * nomp);
size_t *pndata = calloc(nomp, sizeof(size_t));
if (!pdata || !pndata) {
P_ERR("failed to allocate memory for the thread-private data.\n");
FCFC_QUIT(FCFC_ERR_MEMORY);
}
if (!(pdata[0] = malloc(sizeof(DATA) * nomp * FCFC_DATA_THREAD_NUM))) {
P_ERR("failed to allocate memory for the thread-private data.\n");
FCFC_QUIT(FCFC_ERR_MEMORY);
}
for (int j = 1; j < nomp; j++) pdata[j] = pdata[0] + j * FCFC_DATA_THREAD_NUM;
/* Construct the pool for file lines. */
size_t nlmax = FCFC_DATA_INIT_NUM;
size_t nl = 0;
char **lines = malloc(sizeof(char *) * nlmax);
if (!lines) {
P_ERR("failed to allocate memory for the thread-private lines\n");
FCFC_QUIT(FCFC_ERR_MEMORY);
}
#endif
/* Open the file for reading. */
if (!(fp = fopen(fname, "r"))) {
P_ERR("cannot open file for reading: `%s'\n", fname);
CLEAN_PTR; return FCFC_ERR_FILE;
}
if (verb) printf(" Filename: %s\n", fname);
size_t n, nline, nread, nrest;
n = nline = nrest = 0;
/* Start reading the file by chunk. */
while ((nread = fread(chunk + nrest, sizeof(char), csize - nrest, fp))) {
char *p = chunk;
char *end = p + nrest + nread;
char *endl;
if (nread < csize - nrest) *end = '\n'; /* append '\n' to last line */
/* Process lines in the chunk. */
while ((endl = memchr(p, '\n', end - p))) {
/* Skip header lines. */
if (nline++ < skip) {
p = endl + 1; continue;
}
*endl = '\0'; /* replace '\n' by string terminator '\0' */
while (isspace(*p)) ++p; /* omit leading whitespaces */
if (*p == comment || *p == '\0') { /* comment or empty */
p = endl + 1; continue;
}
#ifdef OMP
/* Append the line to the pool. */
lines[nl++] = p;
/* Enlarge the pool if necessary. */
if (nl >= nlmax) {
if (SIZE_MAX / 2 < nlmax) {
P_ERR("too many lines in the file\n");
FCFC_QUIT(FCFC_ERR_FILE);
}
nlmax <<= 1;
char **tmp = realloc(lines, sizeof(char *) * nlmax);
if (!tmp) {
P_ERR("failed to allocate memory for the thread-private lines\n");
FCFC_QUIT(FCFC_ERR_MEMORY);
}
lines = tmp;
}
#else
/* Parse the line. */
const char *stop = NULL;
if (ascii_read_line(p, arg, nc, col, &stop)) {
P_ERR("failed to read the line with format `%s':\n", fmtr);
fprintf(stderr, "%s\n", p);
if (stop > p) for (int k = 0; k < stop - p; k++) fprintf(stderr, " ");
fprintf(stderr, "^\n");
CLEAN_PTR; return FCFC_ERR_ASCII;
}
/* Apply selection. */
if (ast_sel) {
bool keep = false;
if (ascii_read_sel(ast_sel, col, &keep)) {
CLEAN_PTR; return FCFC_ERR_AST;
}
if (!keep) {
p = endl + 1; continue;
}
}
/* Record coordinates. */
for (int i = 0; i < 3; i++) {
if (ascii_read_real(ast_pos[i], col, &(dat[n].x[i]))) {
CLEAN_PTR; return FCFC_ERR_AST;
}
}
#ifdef FCFC_DATA_WEIGHT
/* Compute weights. */
if (ast_wt) {
if (ascii_read_real(ast_wt, col, &(dat[n].w))) {
CLEAN_PTR; return FCFC_ERR_AST;
}
}
else dat[n].w = 1;
#endif
/* Enlarge the memory for the data if necessary. */
if (++n >= max) {
if (SIZE_MAX / 2 < max) {
P_ERR("too many objects in the file: `%s'\n", fname);
CLEAN_PTR;
return FCFC_ERR_FILE;
}
max <<= 1;
DATA *tmp = realloc(dat, sizeof(DATA) * max);
if (!tmp) {
P_ERR("failed to allocate memory for the data\n");
CLEAN_PTR;
return FCFC_ERR_MEMORY;
}
dat = tmp;
}
#endif
/* Continue with the next line. */
p = endl + 1;
}
/* The chunk cannot hold a full line. */
if (p == chunk) {
if (chunk_resize(&chunk, &csize)) {
P_ERR("failed to allocate memory for reading the file by chunk\n");
CLEAN_PTR;
return FCFC_ERR_MEMORY;
}
nrest += nread;
continue;
}
#ifdef OMP
/* Enlarge the memory for the data if necessary. */
if (nl + n > max) {
while (nl + n > max) {
if (SIZE_MAX / 2 < max) {
P_ERR("too many objects in the file: `%s'.\n", fname);
CLEAN_PTR;
return FCFC_ERR_FILE;
}
max <<= 1;
}
DATA *tmp = realloc(dat, sizeof(DATA) * max);
if (!tmp) {
P_ERR("failed to allocate memory for the data.\n");
CLEAN_PTR;
return FCFC_ERR_MEMORY;
}
dat = tmp;
}
#ifdef FCFC_DATA_WEIGHT
#pragma omp parallel num_threads(nomp) \
firstprivate(ast_pos, ast_sel, ast_wt, col)
#else
#pragma omp parallel num_threads(nomp) firstprivate(ast_pos, ast_sel, col)
#endif
{
/* Redirect pointers to the private pools. */
const int tid = omp_get_thread_num();
if (tid > 0) {
ast_pos[0] = ast_ppos[(tid - 1) * 3];
ast_pos[1] = ast_ppos[(tid - 1) * 3 + 1];
ast_pos[2] = ast_ppos[(tid - 1) * 3 + 2];
ast_sel = ast_psel[tid - 1];
#ifdef FCFC_DATA_WEIGHT
ast_wt = ast_pwt[tid - 1];
#endif
col = pcol[tid - 1];
}
DATA *pdat = pdata[tid];
size_t *pnum = pndata + tid;
/* Process lines in parallel. */
#pragma omp for
for (size_t ii = 0; ii < nl; ii++) {
/* Parse a line in the pool. */
const char *pp = lines[ii];
const char *stop = NULL;
if (ascii_read_line(pp, arg, nc, col, &stop)) {
#pragma omp critical
{
P_ERR("failed to read the line with format `%s':\n", fmtr);
fprintf(stderr, "%s\n", pp);
if (stop > pp) for (int k = 0; k < stop - pp; k++)
fprintf(stderr, " ");
fprintf(stderr, "^\n");
}
FCFC_QUIT(FCFC_ERR_ASCII);
}
/* Apply selection. */
if (ast_sel) {
bool keep = false;
if (ascii_read_sel(ast_sel, col, &keep)) {
FCFC_QUIT(FCFC_ERR_AST);
}
if (!keep) continue;
}
/* Record coordinates in the private data pool. */
for (int i = 0; i < 3; i++) {
if (ascii_read_real(ast_pos[i], col, &(pdat[*pnum].x[i]))) {
FCFC_QUIT(FCFC_ERR_AST);
}
}
#ifdef FCFC_DATA_WEIGHT
/* Compute weights. */
if (ast_wt) {
if (ascii_read_real(ast_wt, col, &(pdat[*pnum].w))) {
FCFC_QUIT(FCFC_ERR_AST);
}
}
else pdat[*pnum].w = 1;
#endif
/* Record the private data and clear the pool if necessary. */
if (++(*pnum) >= FCFC_DATA_THREAD_NUM) {
#pragma omp critical
{
/* Enlarge the memory for the data if necessary. */
if (n + FCFC_DATA_THREAD_NUM >= max) {
if (SIZE_MAX / 2 < max) {
P_ERR("too many objects in the file: `%s'.\n", fname);
FCFC_QUIT(FCFC_ERR_FILE);
}
max <<= 1;
if (max < n + FCFC_DATA_THREAD_NUM)
max = n + FCFC_DATA_THREAD_NUM;
DATA *tmp = realloc(dat, sizeof(DATA) * max);
if (!tmp) {
P_ERR("failed to allocate memory for the data.\n");
FCFC_QUIT(FCFC_ERR_MEMORY);
}
dat = tmp;
}
memcpy(dat + n, pdat, sizeof(DATA) * FCFC_DATA_THREAD_NUM);
n += FCFC_DATA_THREAD_NUM;
}
*pnum = 0;
}
}
}
nl = 0;
#endif
/* Copy the remaining characters to the beginning of the chunk. */
nrest = end - p;
memmove(chunk, p, nrest);
}
#ifdef OMP
/* Record the rest of the private data. */
for (int i = 0; i < nomp; i++) {
if (pndata[i]) {
/* Enlarge the memory for the data if necessary. */
if (n + pndata[i] >= max) {
if (SIZE_MAX / 2 < max) {
P_ERR("too many objects in the file: `%s'.\n", fname);
FCFC_QUIT(FCFC_ERR_FILE);
}
max <<= 1;
if (max < n + pndata[i]) max = n + pndata[i];
DATA *tmp = realloc(dat, sizeof(DATA) * max);
if (!tmp) {
P_ERR("failed to allocate memory for the data.\n");
FCFC_QUIT(FCFC_ERR_MEMORY);
}
dat = tmp;
}
memcpy(dat + n, pdata[i], sizeof(DATA) * pndata[i]);
n += pndata[i];
}
}
/* Release memory for thread-private data structures. */
for (int i = 0; i < nomp - 1; i++) {
free(pcol[i]);
if (ast_psel[i]) ast_destroy(ast_psel[i]);
#ifdef FCFC_DATA_WEIGHT
if (ast_pwt[i]) ast_destroy(ast_pwt[i]);
#endif
}
free(pdata[0]); free(pdata); free(pndata);
for (int i = 0; i < (nomp - 1) * 3; i++) ast_destroy(ast_ppos[i]);
free(pcol); free(ast_ppos); free(ast_psel);
#ifdef FCFC_DATA_WEIGHT
free(ast_pwt);
#endif
free(lines);
#endif
if (!feof(fp)) {
P_ERR("unexpected end of file: `%s'\n", fname);
CLEAN_PTR; return FCFC_ERR_FILE;
}
if (verb) {
printf(" Number of lines processed in total: %zu\n"
" Number of recorded objects: %zu\n", nline, n);
}
if (fclose(fp)) P_WRN("failed to close file: `%s'\n", fname);
ast_destroy(ast_pos[0]); ast_destroy(ast_pos[1]); ast_destroy(ast_pos[2]);
ast_destroy(ast_sel);
#ifdef FCFC_DATA_WEIGHT
ast_destroy(ast_wt);
#endif
ascii_arg_destroy(arg, nc); free(col); free(chunk);
/* Release pre-allocated memory that is not necessary any more. */
DATA *tmp = realloc(dat, n * sizeof(DATA));
if (tmp) *data = tmp;
else *data = dat;
*num = n;
return 0;
}
|
core_clansy.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlansy.c, normal z -> c, Fri Sep 28 17:38:21 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
#include <math.h>
/******************************************************************************/
__attribute__((weak))
void plasma_core_clansy(plasma_enum_t norm, plasma_enum_t uplo,
int n,
const plasma_complex32_t *A, int lda,
float *work, float *value)
{
*value = LAPACKE_clansy_work(LAPACK_COL_MAJOR,
lapack_const(norm),
lapack_const(uplo),
n, A, lda, work);
}
/******************************************************************************/
void plasma_core_omp_clansy(plasma_enum_t norm, plasma_enum_t uplo,
int n,
const plasma_complex32_t *A, int lda,
float *work, float *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:value[0:1])
{
if (sequence->status == PlasmaSuccess)
plasma_core_clansy(norm, uplo, n, A, lda, work, value);
}
}
/******************************************************************************/
void plasma_core_omp_clansy_aux(plasma_enum_t norm, plasma_enum_t uplo,
int n,
const plasma_complex32_t *A, int lda,
float *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
switch (norm) {
case PlasmaOneNorm:
case PlasmaInfNorm:
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:value[0:n])
{
if (sequence->status == PlasmaSuccess) {
if (uplo == PlasmaUpper) {
for (int i = 0; i < n; i++)
value[i] = 0.0;
for (int j = 0; j < n; j++) {
for (int i = 0; i < j; i++) {
value[i] += cabsf(A[lda*j+i]);
value[j] += cabsf(A[lda*j+i]);
}
value[j] += cabsf(A[lda*j+j]);
}
}
else { // PlasmaLower
for (int i = 0; i < n; i++)
value[i] = 0.0;
for (int j = 0; j < n; j++) {
value[j] += cabsf(A[lda*j+j]);
for (int i = j+1; i < n; i++) {
value[i] += cabsf(A[lda*j+i]);
value[j] += cabsf(A[lda*j+i]);
}
}
}
}
}
break;
}
}
|
threshold.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/property.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
size_t
width,
height;
ssize_t
divisor,
*levels;
};
/*
Static declarations.
*/
#if MAGICKCORE_ZERO_CONFIGURATION_SUPPORT
#include "MagickCore/threshold-map.h"
#else
static const char *const
BuiltinMap=
"<?xml version=\"1.0\"?>"
"<thresholds>"
" <threshold map=\"threshold\" alias=\"1x1\">"
" <description>Threshold 1x1 (non-dither)</description>"
" <levels width=\"1\" height=\"1\" divisor=\"2\">"
" 1"
" </levels>"
" </threshold>"
" <threshold map=\"checks\" alias=\"2x1\">"
" <description>Checkerboard 2x1 (dither)</description>"
" <levels width=\"2\" height=\"2\" divisor=\"3\">"
" 1 2"
" 2 1"
" </levels>"
" </threshold>"
"</thresholds>";
#endif
/*
Forward declarations.
*/
static ThresholdMap
*GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,const size_t width,
% const size_t height,const double bias,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o bias: the mean bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const size_t width,const size_t height,const double bias,
ExceptionInfo *exception)
{
#define AdaptiveThresholdImageTag "AdaptiveThreshold/Image"
CacheView
*image_view,
*threshold_view;
Image
*threshold_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickSizeType
number_pixels;
ssize_t
y;
/*
Initialize threshold image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
threshold_image=CloneImage(image,0,0,MagickTrue,exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
if ((width == 0) || (height == 0))
return(threshold_image);
status=SetImageStorageClass(threshold_image,DirectClass,exception);
if (status == MagickFalse)
{
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Threshold image.
*/
status=MagickTrue;
progress=0;
number_pixels=(MagickSizeType) width*height;
image_view=AcquireVirtualCacheView(image,exception);
threshold_view=AcquireAuthenticCacheView(threshold_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,threshold_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_bias[MaxPixelChannels],
channel_sum[MaxPixelChannels];
register const Quantum
*magick_restrict p,
*magick_restrict pixels;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
center,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(height/2L),image->columns+width,height,exception);
q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+
GetPixelChannels(image)*(width/2);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if ((threshold_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
pixels=p;
channel_bias[channel]=0.0;
channel_sum[channel]=0.0;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (u == (ssize_t) (width-1))
channel_bias[channel]+=pixels[i];
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
mean;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if ((threshold_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
channel_sum[channel]-=channel_bias[channel];
channel_bias[channel]=0.0;
pixels=p;
for (v=0; v < (ssize_t) height; v++)
{
channel_bias[channel]+=pixels[i];
pixels+=(width-1)*GetPixelChannels(image);
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image)*(image->columns+1);
}
mean=(double) (channel_sum[channel]/number_pixels+bias);
SetPixelChannel(threshold_image,channel,(Quantum) ((double)
p[center+i] <= mean ? 0 : QuantumRange),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(threshold_image);
}
if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_image->type=image->type;
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoThresholdImage() automatically performs image thresholding
% dependent on which method you specify.
%
% The format of the AutoThresholdImage method is:
%
% MagickBooleanType AutoThresholdImage(Image *image,
% const AutoThresholdMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-threshold.
%
% o method: choose from Kapur, OTSU, or Triangle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double KapurThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
#define MaxIntensity 255
double
*black_entropy,
*cumulative_histogram,
entropy,
epsilon,
maximum_entropy,
*white_entropy;
register ssize_t
i,
j;
size_t
threshold;
/*
Compute optimal threshold from the entopy of the histogram.
*/
cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*cumulative_histogram));
black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*black_entropy));
white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*white_entropy));
if ((cumulative_histogram == (double *) NULL) ||
(black_entropy == (double *) NULL) || (white_entropy == (double *) NULL))
{
if (white_entropy != (double *) NULL)
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
if (black_entropy != (double *) NULL)
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
if (cumulative_histogram != (double *) NULL)
cumulative_histogram=(double *)
RelinquishMagickMemory(cumulative_histogram);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Entropy for black and white parts of the histogram.
*/
cumulative_histogram[0]=histogram[0];
for (i=1; i <= MaxIntensity; i++)
cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i];
epsilon=MagickMinimumValue;
for (j=0; j <= MaxIntensity; j++)
{
/*
Black entropy.
*/
black_entropy[j]=0.0;
if (cumulative_histogram[j] > epsilon)
{
entropy=0.0;
for (i=0; i <= j; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/cumulative_histogram[j]*
log(histogram[i]/cumulative_histogram[j]);
black_entropy[j]=entropy;
}
/*
White entropy.
*/
white_entropy[j]=0.0;
if ((1.0-cumulative_histogram[j]) > epsilon)
{
entropy=0.0;
for (i=j+1; i <= MaxIntensity; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/(1.0-cumulative_histogram[j])*
log(histogram[i]/(1.0-cumulative_histogram[j]));
white_entropy[j]=entropy;
}
}
/*
Find histogram bin with maximum entropy.
*/
maximum_entropy=black_entropy[0]+white_entropy[0];
threshold=0;
for (j=1; j <= MaxIntensity; j++)
if ((black_entropy[j]+white_entropy[j]) > maximum_entropy)
{
maximum_entropy=black_entropy[j]+white_entropy[j];
threshold=(size_t) j;
}
/*
Free resources.
*/
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram);
return(100.0*threshold/MaxIntensity);
}
static double OTSUThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
double
max_sigma,
*myu,
*omega,
*probability,
*sigma,
threshold;
register ssize_t
i;
/*
Compute optimal threshold from maximization of inter-class variance.
*/
myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu));
omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega));
probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*probability));
sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma));
if ((myu == (double *) NULL) || (omega == (double *) NULL) ||
(probability == (double *) NULL) || (sigma == (double *) NULL))
{
if (sigma != (double *) NULL)
sigma=(double *) RelinquishMagickMemory(sigma);
if (probability != (double *) NULL)
probability=(double *) RelinquishMagickMemory(probability);
if (omega != (double *) NULL)
omega=(double *) RelinquishMagickMemory(omega);
if (myu != (double *) NULL)
myu=(double *) RelinquishMagickMemory(myu);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Calculate probability density.
*/
for (i=0; i <= (ssize_t) MaxIntensity; i++)
probability[i]=histogram[i];
/*
Generate probability of graylevels and mean value for separation.
*/
omega[0]=probability[0];
myu[0]=0.0;
for (i=1; i <= (ssize_t) MaxIntensity; i++)
{
omega[i]=omega[i-1]+probability[i];
myu[i]=myu[i-1]+i*probability[i];
}
/*
Sigma maximization: inter-class variance and compute optimal threshold.
*/
threshold=0;
max_sigma=0.0;
for (i=0; i < (ssize_t) MaxIntensity; i++)
{
sigma[i]=0.0;
if ((omega[i] != 0.0) && (omega[i] != 1.0))
sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0-
omega[i]));
if (sigma[i] > max_sigma)
{
max_sigma=sigma[i];
threshold=(double) i;
}
}
/*
Free resources.
*/
myu=(double *) RelinquishMagickMemory(myu);
omega=(double *) RelinquishMagickMemory(omega);
probability=(double *) RelinquishMagickMemory(probability);
sigma=(double *) RelinquishMagickMemory(sigma);
return(100.0*threshold/MaxIntensity);
}
static double TriangleThreshold(const double *histogram)
{
double
a,
b,
c,
count,
distance,
inverse_ratio,
max_distance,
segment,
x1,
x2,
y1,
y2;
register ssize_t
i;
ssize_t
end,
max,
start,
threshold;
/*
Compute optimal threshold with triangle algorithm.
*/
start=0; /* find start bin, first bin not zero count */
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > 0.0)
{
start=i;
break;
}
end=0; /* find end bin, last bin not zero count */
for (i=(ssize_t) MaxIntensity; i >= 0; i--)
if (histogram[i] > 0.0)
{
end=i;
break;
}
max=0; /* find max bin, bin with largest count */
count=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > count)
{
max=i;
count=histogram[i];
}
/*
Compute threshold at split point.
*/
x1=(double) max;
y1=histogram[max];
x2=(double) end;
if ((max-start) >= (end-max))
x2=(double) start;
y2=0.0;
a=y1-y2;
b=x2-x1;
c=(-1.0)*(a*x1+b*y1);
inverse_ratio=1.0/sqrt(a*a+b*b+c*c);
threshold=0;
max_distance=0.0;
if (x2 == (double) start)
for (i=start; i < max; i++)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment > 0.0))
{
threshold=i;
max_distance=distance;
}
}
else
for (i=end; i > max; i--)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment < 0.0))
{
threshold=i;
max_distance=distance;
}
}
return(100.0*threshold/MaxIntensity);
}
MagickExport MagickBooleanType AutoThresholdImage(Image *image,
const AutoThresholdMethod method,ExceptionInfo *exception)
{
CacheView
*image_view;
char
property[MagickPathExtent];
double
gamma,
*histogram,
sum,
threshold;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
/*
Form histogram.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
(void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
double intensity = GetPixelIntensity(image,p);
histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Normalize histogram.
*/
sum=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
sum+=histogram[i];
gamma=PerceptibleReciprocal(sum);
for (i=0; i <= (ssize_t) MaxIntensity; i++)
histogram[i]=gamma*histogram[i];
/*
Discover threshold from histogram.
*/
switch (method)
{
case KapurThresholdMethod:
{
threshold=KapurThreshold(image,histogram,exception);
break;
}
case OTSUThresholdMethod:
default:
{
threshold=OTSUThreshold(image,histogram,exception);
break;
}
case TriangleThresholdMethod:
{
threshold=TriangleThreshold(histogram);
break;
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
if (threshold < 0.0)
status=MagickFalse;
if (status == MagickFalse)
return(MagickFalse);
/*
Threshold image.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold);
(void) SetImageProperty(image,"auto-threshold:threshold",property,exception);
return(BilevelImage(image,QuantumRange*threshold/100.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImage method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
% o exception: return any errors or warnings in this structure.
%
% Aside: You can get the same results as operator using LevelImages()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels at or above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < GetPixelInfoChannel(&threshold,channel))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l a m p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampImage() set each pixel whose value is below zero to zero and any the
% pixel whose value is above the quantum range to the quantum range (e.g.
% 65535) otherwise the pixel value remains unchanged.
%
% The format of the ClampImage method is:
%
% MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) ClampPixel(q->red);
q->green=(double) ClampPixel(q->green);
q->blue=(double) ClampPixel(q->blue);
q->alpha=(double) ClampPixel(q->alpha);
q++;
}
return(SyncImage(image,exception));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampPixel((MagickRealType) q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClampImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (ssize_t *) NULL)
map->levels=(ssize_t *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() loads and searches one or more threshold map files for the
% map matching the given name or alias.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
ThresholdMap
*map;
map=GetThresholdMapFile(BuiltinMap,"built-in",map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
#if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT)
{
const StringInfo
*option;
LinkedListInfo
*options;
options=GetConfigureOptions(ThresholdsFilename,exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
if (map != (ThresholdMap *) NULL)
break;
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
}
#endif
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename,
const char *map_id,ExceptionInfo *exception)
{
char
*p;
const char
*attribute,
*content;
double
value;
register ssize_t
i;
ThresholdMap
*map;
XMLTreeInfo
*description,
*levels,
*threshold,
*thresholds;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
map=(ThresholdMap *) NULL;
thresholds=NewXMLTree(xml,exception);
if (thresholds == (XMLTreeInfo *) NULL)
return(map);
for (threshold=GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
attribute=GetXMLTreeAttribute(threshold,"map");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(threshold,"alias");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
}
if (threshold == (XMLTreeInfo *) NULL)
{
thresholds=DestroyXMLTree(thresholds);
return(map);
}
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
levels=GetXMLTreeChild(threshold,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map));
map->map_id=(char *) NULL;
map->description=(char *) NULL;
map->levels=(ssize_t *) NULL;
attribute=GetXMLTreeAttribute(threshold,"map");
if (attribute != (char *) NULL)
map->map_id=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
map->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->width=StringToUnsignedLong(attribute);
if (map->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->height=StringToUnsignedLong(attribute);
if (map->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->divisor=(ssize_t) StringToLong(attribute);
if (map->divisor < 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if (map->levels == (ssize_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
for (i=0; i < (ssize_t) (map->width*map->height); i++)
{
map->levels[i]=(ssize_t) strtol(content,&p,10);
if (p == content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
if ((map->levels[i] < 0) || (map->levels[i] > map->divisor))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %.20g out of range, map \"%s\"",
(double) map->levels[i],map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=p;
}
value=(double) strtol(content,&p,10);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
const char
*alias,
*content,
*map;
XMLTreeInfo
*description,
*threshold,
*thresholds;
assert( xml != (char *) NULL );
assert( file != (FILE *) NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *) NULL )
return(MagickFalse);
(void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description");
(void) FormatLocaleFile(file,
"----------------------------------------------------\n");
threshold=GetXMLTreeChild(thresholds,"threshold");
for ( ; threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
map=GetXMLTreeAttribute(threshold,"map");
if (map == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias=GetXMLTreeAttribute(threshold,"alias");
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "",
content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickTrue;
if (file == (FILE *) NULL)
file=stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) FormatLocaleFile(file,
"\n Threshold Maps for Ordered Dither Operations\n");
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
(void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option));
status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedDitherImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedDitherImage method is:
%
% MagickBooleanType OrderedDitherImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels to dither between.
%
% Any level number less than 2 will be equivalent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with an ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedDitherImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
char
token[MagickPathExtent];
const char
*p;
double
levels[CompositePixelChannel];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
ThresholdMap
*map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
p=(char *) threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0'))
{
if ((p-threshold_map) >= (MagickPathExtent-1))
break;
token[p-threshold_map]=(*p);
p++;
}
token[p-threshold_map]='\0';
map=GetThresholdMap(token,exception);
if (map == (ThresholdMap *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
for (i=0; i < MaxPixelChannels; i++)
levels[i]=2.0;
p=strchr((char *) threshold_map,',');
if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0))
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
for (i=0; (i < MaxPixelChannels); i++)
levels[i]=StringToDouble(token,(char **) NULL);
for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
levels[i]=StringToDouble(token,(char **) NULL);
}
}
for (i=0; i < MaxPixelChannels; i++)
if (fabs(levels[i]) >= 1)
levels[i]-=1.0;
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
ssize_t
n;
n=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
level,
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(levels[n]) < MagickEpsilon)
{
n++;
continue;
}
threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1));
level=threshold/(map->divisor-1);
threshold-=level*(map->divisor-1);
q[i]=ClampToQuantum((double) (level+(threshold >=
map->levels[(x % map->width)+map->width*(y % map->height)]))*
QuantumRange/levels[n]);
n++;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,DitherImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P e r c e p t i b l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PerceptibleImage() set each pixel whose value is less than |epsilon| to
% epsilon or -epsilon (whichever is closer) otherwise the pixel value remains
% unchanged.
%
% The format of the PerceptibleImage method is:
%
% MagickBooleanType PerceptibleImage(Image *image,const double epsilon,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o epsilon: the epsilon threshold (e.g. 1.0e-9).
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PerceptibleThreshold(const Quantum quantum,
const double epsilon)
{
double
sign;
sign=(double) quantum < 0.0 ? -1.0 : 1.0;
if ((sign*quantum) >= epsilon)
return(quantum);
return((Quantum) (sign*epsilon));
}
MagickExport MagickBooleanType PerceptibleImage(Image *image,
const double epsilon,ExceptionInfo *exception)
{
#define PerceptibleImageTag "Perceptible/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red),
epsilon);
q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green),
epsilon);
q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue),
epsilon);
q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha),
epsilon);
q++;
}
return(SyncImage(image,exception));
}
/*
Perceptible image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PerceptibleThreshold(q[i],epsilon);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PerceptibleImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImage(Image *image,
% const char *thresholds,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low,high: Specify the high and low thresholds. These values range from
% 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const double min_threshold, const double max_threshold,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&threshold);
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] < min_threshold)
threshold=min_threshold;
else
if ((double) q[i] > max_threshold)
threshold=max_threshold;
else
threshold=(double) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
q[i]=(double) q[i] <= threshold ? 0 : QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n g e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RangeThresholdImage() applies soft and hard thresholding.
%
% The format of the RangeThresholdImage method is:
%
% MagickBooleanType RangeThresholdImage(Image *image,
% const double low_black,const double low_white,const double high_white,
% const double high_black,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low_black: Define the minimum black threshold value.
%
% o low_white: Define the minimum white threshold value.
%
% o high_white: Define the maximum white threshold value.
%
% o high_black: Define the maximum black threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RangeThresholdImage(Image *image,
const double low_black,const double low_white,const double high_white,
const double high_black,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
/*
Range threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < low_black)
q[i]=0;
else
if ((pixel >= low_black) && (pixel < low_white))
q[i]=ClampToQuantum(QuantumRange*
PerceptibleReciprocal(low_white-low_black)*(pixel-low_black));
else
if ((pixel >= low_white) && (pixel <= high_white))
q[i]=QuantumRange;
else
if ((pixel > high_white) && (pixel <= high_black))
q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal(
high_black-high_white)*(high_black-pixel));
else
if (pixel > high_black)
q[i]=0;
else
q[i]=0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels at or below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel > GetPixelInfoChannel(&threshold,channel))
q[i]=QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
ast-dump-openmp-distribute-parallel-for-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp distribute parallel for simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp distribute parallel for simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp distribute parallel for simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp distribute parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp distribute parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective {{.*}} <line:4:1, col:41>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective {{.*}} <line:10:1, col:41>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective {{.*}} <line:17:1, col:53>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:42, col:52>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:51> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:51> 'int' 1
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective {{.*}} <line:24:1, col:53>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:42, col:52>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:51> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:51> 'int' 2
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPDistributeParallelForSimdDirective {{.*}} <line:31:1, col:53>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:42, col:52>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:51> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:51> 'int' 2
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
pr88203-1.c | /* PR c++/88203 */
/* { dg-do compile } */
/* { dg-additional-options "-std=c99" { target c } } */
/* { dg-additional-options "-std=c++11" { target c++ } } */
void foo (const char *);
#pragma omp declare target to (foo)
void
f1 (void)
{
#pragma omp parallel default(none)
foo (__func__);
}
void
f2 (void)
{
#pragma omp parallel default(none) shared(__func__)
foo (__func__);
}
void
f3 (void)
{
#pragma omp parallel default(none) firstprivate(__func__)
foo (__func__);
}
void
f4 (void)
{
foo (__func__);
#pragma omp parallel default(none)
foo (__func__);
}
void
f5 (void)
{
foo (__func__);
#pragma omp parallel default(none) shared(__func__)
foo (__func__);
}
void
f6 (void)
{
foo (__func__);
#pragma omp parallel default(none) firstprivate(__func__)
foo (__func__);
}
void
f7 (void)
{
#pragma omp target map(to: __func__)
foo (__func__);
#pragma omp task depend(inout:__func__)
foo (__func__);
}
|
jacobi.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdint.h>
#include "../timer.h"
//------------------------------------------------------------------------------------------------------------------------------
void smooth(domain_type * domain, int level, int phi_id, int rhs_id, double a, double b){
if(numSmooths&1){
printf("error - numSmooths must be even...\n");
exit(0);
}
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box,s;
int ghosts = domain->ghosts;
double TwoThirds = 2.0/3.0;
// if communication-avoiding, need RHS for stencils in ghost zones
if(ghosts>1)exchange_boundary(domain,level,rhs_id,1,1,1);
for(s=0;s<numSmooths;s+=ghosts){
// Jacobi ping pongs between phi and __temp
if((s&1)==0)exchange_boundary(domain,level,phi_id,1,ghosts>1,ghosts>1); // corners/edges if doing communication-avoiding...
else exchange_boundary(domain,level,__temp,1,ghosts>1,ghosts>1); // corners/edges if doing communication-avoiding...
// now do ghosts communication-avoiding smooths on each box...
uint64_t _timeStart = CycleTime();
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k,ss;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double h2inv = 1.0/(domain->h[level]*domain->h[level]);
double * __restrict__ rhs = domain->subdomains[box].levels[level].grids[ rhs_id] + ghosts*(1+pencil+plane);
double * __restrict__ alpha = domain->subdomains[box].levels[level].grids[__alpha ] + ghosts*(1+pencil+plane);
double * __restrict__ beta_i = domain->subdomains[box].levels[level].grids[__beta_i] + ghosts*(1+pencil+plane);
double * __restrict__ beta_j = domain->subdomains[box].levels[level].grids[__beta_j] + ghosts*(1+pencil+plane);
double * __restrict__ beta_k = domain->subdomains[box].levels[level].grids[__beta_k] + ghosts*(1+pencil+plane);
double * __restrict__ lambda = domain->subdomains[box].levels[level].grids[__lambda] + ghosts*(1+pencil+plane);
int ghostsToOperateOn=ghosts-1;
for(ss=s;ss<s+ghosts;ss++,ghostsToOperateOn--){
double * __restrict__ phi;
double * __restrict__ phi_new;
if((ss&1)==0){phi = domain->subdomains[box].levels[level].grids[ phi_id] + ghosts*(1+pencil+plane);
phi_new= domain->subdomains[box].levels[level].grids[ __temp] + ghosts*(1+pencil+plane);}
else{phi = domain->subdomains[box].levels[level].grids[ __temp] + ghosts*(1+pencil+plane);
phi_new= domain->subdomains[box].levels[level].grids[ phi_id] + ghosts*(1+pencil+plane);}
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0-ghostsToOperateOn;k<dim_k+ghostsToOperateOn;k++){
for(j=0-ghostsToOperateOn;j<dim_j+ghostsToOperateOn;j++){
for(i=0-ghostsToOperateOn;i<dim_i+ghostsToOperateOn;i++){
int ijk = i + j*pencil + k*plane;
double helmholtz = a*alpha[ijk]*phi[ijk]
-b*h2inv*(
beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] )
-beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] )
+beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] )
-beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] )
+beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] )
-beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] )
);
phi_new[ijk] = phi[ijk] - TwoThirds*lambda[ijk]*(helmholtz-rhs[ijk]);
}}}
} // ss-loop
} // box-loop
domain->cycles.smooth[level] += (uint64_t)(CycleTime()-_timeStart);
} // s-loop
}
//------------------------------------------------------------------------------------------------------------------------------
|
mandelbrot.c | /*
To compile:
gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
// Q2a: add include for OpenMP header file here:
#include <omp.h>
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);;
// Q2c: add a compiler directive to split the outer for loop amongst threads here
#pragma omp parallel for
for(n=0;n<Nim;++n){
for(m=0;m<Nre;++m){
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m+n*Nre] = testpoint(c);
}
}
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[argc-1]);
// Q2b: set the number of OpenMP threads to be Nthreads here:
omp_set_num_threads(Nthreads);
// storage for the iteration counts
float *count = (float*) malloc(Nre*Nim*sizeof(float));
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
// Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time
double start;
start = omp_get_wtime();
// compute mandelbrot set
mandelbrot(Nre, Nim, cmin, cmax, count);
// Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time
double end;
end = omp_get_wtime();
// print elapsed time
printf("elapsed = %g\n", end-start);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
write_hot_png(fp, Nre, Nim, count, 0, 80);
exit(0);
return 0;
}
|
HDAA_fmt_plug.c | /* HTTP Digest access authentication patch for john
*
* Written by Romain Raboin. OMP and intrinsics support by magnum
*
* This software is Copyright (c) 2008 Romain Raboin - romain.raboin at
* gmail.com, and Copyright (c) 2012 magnum and it is hereby released to
* the general public under the following terms: Redistribution and
* use in source and binary forms, with or without modification, are
* permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_HDAA;
#elif FMT_REGISTERS_H
john_register_one(&fmt_HDAA);
#else
#include <string.h>
#ifdef __MMX__
#include <mmintrin.h>
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "md5.h"
#include "stdint.h"
#include "simd-intrinsics.h"
#define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "hdaa"
#define FORMAT_NAME "HTTP Digest access authentication"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH 32
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(reqinfo_t)
#define SALT_ALIGN 4
#if defined(_OPENMP)
static unsigned int omp_t = 1;
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 256
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#endif
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5)
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&60)*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 )
#define GETOUTPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&0x1c)*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32 )
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define SEPARATOR '$'
#define MAGIC "$response$"
#define SIZE_TAB 12
// This is 8 x 64 bytes, so in MMX/SSE2 we support up to 9 limbs of MD5
#define HTMP 512
typedef struct
{
size_t h1tmplen;
size_t h3tmplen;
char h1tmp[HTMP];
char h3tmp[HTMP];
} reqinfo_t;
/*
digest authentication scheme :
h1 = md5(user:realm:password)
h2 = md5(method:digestURI)
response = h3 = md5(h1:nonce:nonceCount:ClientNonce:qop:h2)
*/
/* request information */
enum e_req {
R_RESPONSE,
R_USER,
R_REALM,
R_METHOD,
R_URI,
R_NONCE,
R_NONCECOUNT,
R_CLIENTNONCE,
R_QOP
};
/* response:user:realm:method:uri:nonce:nonceCount:ClientNonce:qop */
static struct fmt_tests tests[] = {
{"$response$679066476e67b5c7c4e88f04be567f8b$user$myrealm$GET$/$8c12bd8f728afe56d45a0ce846b70e5a$00000001$4b61913cec32e2c9$auth", "nocode"},
{"$response$faa6cb7d676e5b7c17fcbf966436aa0c$moi$myrealm$GET$/$af32592775d27b1cd06356b3a0db9ddf$00000001$8e1d49754a25aea7$auth", "kikou"},
{"$response$56940f87f1f53ade8b7d3c5a102c2bf3$usrx$teN__chars$GET$/4TLHS1TMN9cfsbqSUAdTG3CRq7qtXMptnYfn7mIIi3HRKOMhOks56e$2c0366dcbc$00000001$0153$auth", "passWOrd"},
{NULL}
};
/* used by set_key */
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
#ifdef SIMD_COEF_32
#define LIMBS 9
static unsigned char *saved_key[LIMBS];
static unsigned int *interm_key;
static unsigned int *crypt_key;
#else
static int (*saved_len);
static unsigned char (*crypt_key)[BINARY_SIZE];
#endif
/* Store information about the request ()*/
static reqinfo_t *rinfo = NULL;
static void init(struct fmt_main *self)
{
#ifdef SIMD_COEF_32
int i;
#endif
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_32
for (i = 0; i < LIMBS; i++)
saved_key[i] = mem_calloc_align(self->params.max_keys_per_crypt,
64, MEM_ALIGN_SIMD);
interm_key = mem_calloc_align(self->params.max_keys_per_crypt,
16, MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt,
16, MEM_ALIGN_SIMD);
#else
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
}
static void done(void)
{
#ifdef SIMD_COEF_32
int i;
#endif
MEM_FREE(saved_plain);
MEM_FREE(crypt_key);
#ifdef SIMD_COEF_32
MEM_FREE(interm_key);
for (i = 0; i < LIMBS; i++)
MEM_FREE(saved_key[i]);
#else
MEM_FREE(saved_len);
#endif
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
if (strncmp(ciphertext, MAGIC, sizeof(MAGIC) - 1) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += sizeof(MAGIC)-1;
if ((p = strtokm(ctcopy, "$")) == NULL) /* hash */
goto err;
if (!ishexlc(p) || strlen(p) != 32)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* user */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* realm */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* method */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* uri */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* nonce */
goto err;
if (!ishexlc(p) )
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* noncecount */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* clientnonce */
goto err;
if (!ishexlc(p) )
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* qop */
goto err;
if ((p = strtokm(NULL, "$")) != NULL)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void set_salt(void *salt)
{
rinfo = salt;
}
static void set_key(char *key, int index)
{
strcpy(saved_plain[index], key);
#ifndef SIMD_COEF_32
saved_len[index] = -1;
#endif
}
static char *get_key(int index)
{
return saved_plain[index];
}
static int cmp_all(void *binary, int count)
{
#ifdef SIMD_COEF_32
unsigned int x,y=0;
#ifdef _OPENMP
for(; y < SIMD_PARA_MD5 * omp_t; y++)
#else
for(; y < SIMD_PARA_MD5; y++)
#endif
for(x = 0; x < SIMD_COEF_32; x++)
{
if( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[y*SIMD_COEF_32*4+x] )
return 1;
}
return 0;
#else
int index;
for (index = 0; index < count; index++)
if (!(memcmp(binary, crypt_key[index], BINARY_SIZE)))
return 1;
return 0;
#endif
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
unsigned int i,x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
for(i=0;i<(BINARY_SIZE/4);i++)
if ( ((ARCH_WORD_32*)binary)[i] != ((ARCH_WORD_32*)crypt_key)[y*SIMD_COEF_32*4+i*SIMD_COEF_32+x] )
return 0;
return 1;
#else
return !(memcmp(binary, crypt_key[index], BINARY_SIZE));
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
/* convert hash from binary to ascii */
#ifdef SIMD_COEF_32
// This code should be rewritten in intrinsics, reading from
// MMX or SSE2 output buffers and writing to MMX/SSE2 input buffers.
static inline void sse_bin2ascii(unsigned char *conv, unsigned char *src)
{
unsigned int index;
for (index = 0; index < NBKEYS; index++) {
unsigned int i, j = 0;
for (i = 0; i < BINARY_SIZE; i += 2) {
unsigned int t;
t = (src[GETOUTPOS((i + 1), index)] & 0x0f);
t <<= 12;
t |= (src[GETOUTPOS((i + 1), index)] & 0xf0);
t <<= 4;
t |= (src[GETOUTPOS(i, index)] & 0x0f);
t <<= 8;
t |= ((src[GETOUTPOS(i, index)] & 0xf0) >> 4);
t += 0x06060606;
t += ((((t >> 4) & 0x01010101) * 0x27) + 0x2a2a2a2a);
*(unsigned int*)&conv[GETPOS(j, index)] = t;
j+=4;
}
}
}
#endif /* SIMD_COEF_32 */
#ifdef __MMX__
static inline void bin2ascii(__m64 *conv, __m64 *src)
{
unsigned int i = 0;
while (i != 4) {
__m64 l;
__m64 r;
__m64 t;
__m64 u;
__m64 v;
/* 32 bits to 64 bits */
t = _mm_set1_pi32(0x0f0f0f0f);
/* Bit-wise AND the 64-bit values in M1 and M2. */
u = _mm_and_si64(_mm_srli_si64(src[(i / 2)], 4), t);
v = _mm_and_si64(src[(i / 2)], t);
/* interleaving */
l = _mm_unpacklo_pi8(u, v);
r = _mm_unpackhi_pi8(u, v);
t = _mm_set1_pi32(0x06060606);
l = _mm_add_pi32(l, t);
r = _mm_add_pi32(r, t);
t = _mm_set1_pi32(0x01010101);
/* u = (l << 4) & t */
u = _mm_and_si64(_mm_srli_si64(l, 4), t);
/* v = (r << 4) & t */
v = _mm_and_si64(_mm_srli_si64(r, 4), t);
t = _mm_set1_pi32(0x00270027);
/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
the low 16 bits of the results. */
u = _mm_mullo_pi16(u, t);
v = _mm_mullo_pi16(v, t);
t = _mm_set1_pi32(0x2a2a2a2a);
u = _mm_add_pi32(u, t);
v = _mm_add_pi32(v, t);
conv[(i++)] = _mm_add_pi32(l, u);
conv[(i++)] = _mm_add_pi32(r, v);
}
__asm__ __volatile__("emms");
}
#else
static inline void bin2ascii(uint32_t *conv, uint32_t *source)
{
unsigned char *src = (unsigned char*)source;
unsigned int i;
unsigned int j = 0;
uint32_t t = 0;
for (i = 0; i < BINARY_SIZE; i += 2) {
#if (ARCH_LITTLE_ENDIAN == 0)
t = (src[i] & 0xf0);
t *= 0x10;
t += (src[i] & 0x0f);
t *= 0x1000;
t += (src[(i + 1)] & 0xf0);
t *= 0x10;
t += (src[(i + 1)] & 0x0f);
#else
t = (src[(i + 1)] & 0x0f);
t *= 0x1000;
t += (src[(i + 1)] & 0xf0);
t *= 0x10;
t += (src[i] & 0x0f);
t *= 0x100;
t += ((src[i] & 0xf0) >> 4);
#endif
t += 0x06060606;
t += ((((t >> 4) & 0x01010101) * 0x27) + 0x2a2a2a2a);
conv[(j++)] = t;
}
}
#endif /* MMX */
#if SIMD_COEF_32
static inline void crypt_done(unsigned const int *source, unsigned int *dest, int index)
{
unsigned int i;
unsigned const int *s = &source[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32];
unsigned int *d = &dest[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32];
for (i = 0; i < BINARY_SIZE / 4; i++) {
*d = *s;
s += SIMD_COEF_32;
d += SIMD_COEF_32;
}
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
#if SIMD_COEF_32
#if defined(_OPENMP)
#define ti (thread*NBKEYS+index)
int thread;
#pragma omp parallel for
for (thread = 0; thread < (count+NBKEYS-1)/NBKEYS; thread++)
#else
#define thread 0
#define ti index
#endif
{
static unsigned int crypt_len[NBKEYS];
unsigned int index, i, shortest, longest;
for (index = 0; index < NBKEYS; index++)
{
int len;
char temp;
const char *key;
key = rinfo->h1tmp;
for (len = 0; len < rinfo->h1tmplen; len += 4, key += 4)
*(ARCH_WORD_32*)&saved_key[len>>6][GETPOS(len, ti)] = *(ARCH_WORD_32*)key;
len = rinfo->h1tmplen;
key = (char*)&saved_plain[ti];
while((temp = *key++)) {
saved_key[len>>6][GETPOS(len, ti)] = temp;
len++;
}
saved_key[len>>6][GETPOS(len, ti)] = 0x80;
// Clean rest of this buffer
i = len;
while (++i & 3)
saved_key[i>>6][GETPOS(i, ti)] = 0;
for (; i < (((len+8)>>6)+1)*64; i += 4)
*(ARCH_WORD_32*)&saved_key[i>>6][GETPOS(i, ti)] = 0;
((unsigned int *)saved_key[(len+8)>>6])[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (ti/SIMD_COEF_32)*16*SIMD_COEF_32] = len << 3;
}
SIMDmd5body(&saved_key[0][thread*64*NBKEYS], &crypt_key[thread*4*NBKEYS], NULL, SSEi_MIXED_IN);
sse_bin2ascii((unsigned char*)&saved_key[0][thread*64*NBKEYS], (unsigned char*)&crypt_key[thread*4*NBKEYS]);
longest = 0; shortest = HTMP;
for (index = 0; index < NBKEYS; index++)
{
const char *key;
int i, len;
len = CIPHERTEXT_LENGTH - 1;
key = rinfo->h3tmp + CIPHERTEXT_LENGTH;
// Copy a char at a time until aligned at destination
while (++len & 3)
saved_key[len>>6][GETPOS(len, ti)] = *key++;
// ...then a word at a time. This is a good boost, we are copying over 100 bytes.
for (;len < rinfo->h3tmplen; len += 4, key += 4)
*(ARCH_WORD_32*)&saved_key[len>>6][GETPOS(len, ti)] = *(ARCH_WORD_32*)key;
len = rinfo->h3tmplen;
saved_key[len>>6][GETPOS(len, ti)] = 0x80;
// Clean rest of this buffer
i = len;
while (++i & 3)
saved_key[i>>6][GETPOS(i, ti)] = 0;
//for (; i < (((len+8)>>6)+1)*64; i += 4)
for (; i <= crypt_len[index]; i += 4)
*(ARCH_WORD_32*)&saved_key[i>>6][GETPOS(i, ti)] = 0;
((unsigned int *)saved_key[(len+8)>>6])[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (ti/SIMD_COEF_32)*16*SIMD_COEF_32] = len << 3;
crypt_len[index] = len;
if (len > longest)
longest = len;
if (len < shortest)
shortest = len;
}
// First limb
SIMDmd5body(&saved_key[0][thread*64*NBKEYS], &interm_key[thread*4*NBKEYS], NULL, SSEi_MIXED_IN);
// Copy any output that is done now
if (shortest < 56) {
if (longest < 56)
memcpy(&crypt_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], 16*NBKEYS);
else
for (index = 0; index < NBKEYS; index++)
if (crypt_len[index] < 56)
crypt_done(interm_key, crypt_key, ti);
}
// Do the rest of the limbs
for (i = 1; i < (((longest + 8) >> 6) + 1); i++) {
SIMDmd5body(&saved_key[i][thread*64*NBKEYS], &interm_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], SSEi_RELOAD|SSEi_MIXED_IN);
// Copy any output that is done now
if (shortest < i*64+56) {
if (shortest > (i-1)*64+55 && longest < i*64+56)
memcpy(&crypt_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], 16*NBKEYS);
else
for (index = 0; index < NBKEYS; index++)
if (((crypt_len[index] + 8) >> 6) == i)
crypt_done(interm_key, crypt_key, ti);
}
}
}
#undef thread
#undef ti
#else
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
MD5_CTX ctx;
int len;
#ifdef _OPENMP
char h3tmp[HTMP];
char h1tmp[HTMP];
#else
char *h3tmp;
char *h1tmp;
#endif
size_t tmp;
#ifdef __MMX__
__m64 h1[BINARY_SIZE / sizeof(__m64)];
__m64 conv[CIPHERTEXT_LENGTH / sizeof(__m64) + 1];
#else
uint32_t h1[BINARY_SIZE / sizeof(uint32_t)];
uint32_t conv[(CIPHERTEXT_LENGTH / sizeof(uint32_t)) + 1];
#endif
tmp = rinfo->h1tmplen;
if ((len = saved_len[index]) < 0)
len = saved_len[index] = strlen(saved_plain[index]);
#ifdef _OPENMP
memcpy(h1tmp, rinfo->h1tmp, tmp);
memcpy(h3tmp + CIPHERTEXT_LENGTH, rinfo->h3tmp + CIPHERTEXT_LENGTH, rinfo->h3tmplen - CIPHERTEXT_LENGTH);
#else
h3tmp = rinfo->h3tmp;
h1tmp = rinfo->h1tmp;
#endif
memcpy(&h1tmp[tmp], saved_plain[index], len);
MD5_Init(&ctx);
MD5_Update(&ctx, h1tmp, len + tmp);
MD5_Final((unsigned char*)h1, &ctx);
bin2ascii(conv, h1);
memcpy(h3tmp, conv, CIPHERTEXT_LENGTH);
MD5_Init(&ctx);
MD5_Update(&ctx, h3tmp, rinfo->h3tmplen);
MD5_Final(crypt_key[index], &ctx);
}
#endif
return count;
}
static char *mystrndup(const char *s, size_t n)
{
size_t tmp;
size_t size;
char *ret;
for (tmp = 0; s[tmp] != 0 && tmp <= n; tmp++);
size = n;
if (tmp < size)
size = tmp;
if ((ret = mem_alloc(sizeof(char) * size + 1)) == NULL)
return NULL;
memmove(ret, s, size);
ret[size] = 0;
return ret;
}
static size_t reqlen(char *str)
{
size_t len;
for (len = 0; str[len] != 0 && str[len] != SEPARATOR; len++);
return len;
}
static void *get_salt(char *ciphertext)
{
int nb;
int i;
char *request[SIZE_TAB];
char *str;
static reqinfo_t *r;
#ifdef __MMX__
__m64 h2[BINARY_SIZE / sizeof(__m64)];
__m64 conv[CIPHERTEXT_LENGTH / sizeof(__m64) + 1];
#else
unsigned int h2[BINARY_SIZE / sizeof(unsigned int)];
uint32_t conv[(CIPHERTEXT_LENGTH / sizeof(uint32_t)) + 1];
#endif
MD5_CTX ctx;
/* parse the password string */
if (!r) r = mem_alloc_tiny(sizeof(*r), MEM_ALIGN_WORD);
memset(r, 0, sizeof(*r));
for (nb = 0, i = 1; ciphertext[i] != 0; i++) {
if (ciphertext[i] == SEPARATOR) {
i++;
request[nb] = mystrndup(&ciphertext[i], reqlen(&ciphertext[i]));
nb++;
}
}
while (nb < SIZE_TAB) {
request[nb++] = NULL;
}
/* calculate h2 (h2 = md5(method:digestURI))*/
str = mem_alloc(strlen(request[R_METHOD]) + strlen(request[R_URI]) + 2);
sprintf(str, "%s:%s", request[R_METHOD], request[R_URI]);
MD5_Init(&ctx);
MD5_Update(&ctx, str, strlen(str));
MD5_Final((unsigned char*)h2, &ctx);
memset(conv, 0, CIPHERTEXT_LENGTH + 1);
bin2ascii(conv, h2);
MEM_FREE(str);
/* create a part of h1 (h1tmp = request:realm:)*/
snprintf(r->h1tmp, HTMP - PLAINTEXT_LENGTH, "%s:%s:", request[R_USER], request[R_REALM]);
/* create a part of h3 (h3tmp = nonce:noncecount:clientnonce:qop:h2)*/
snprintf(&r->h3tmp[CIPHERTEXT_LENGTH], HTMP - CIPHERTEXT_LENGTH, ":%s:%s:%s:%s:%s",
request[R_NONCE], request[R_NONCECOUNT], request[R_CLIENTNONCE],
request[R_QOP], (char*)conv);
r->h1tmplen = strlen(r->h1tmp);
r->h3tmplen = strlen(&r->h3tmp[CIPHERTEXT_LENGTH]) + CIPHERTEXT_LENGTH;
for (nb=0; nb < SIZE_TAB; ++nb) {
MEM_FREE(request[nb]);
}
return r;
}
/* convert response to binary form */
static void *get_binary(char *ciphertext)
{
static unsigned int realcipher[BINARY_SIZE / sizeof(int)];
int i;
ciphertext += 10;
for (i = 0; i < BINARY_SIZE; i++) {
((unsigned char*)realcipher)[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 +
atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
}
return (void*) realcipher;
}
#ifdef SIMD_COEF_32
#define HASH_OFFSET (index&(SIMD_COEF_32-1))+((unsigned int)index/SIMD_COEF_32)*SIMD_COEF_32*4
static int get_hash_0(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_0; }
static int get_hash_1(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_1; }
static int get_hash_2(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_2; }
static int get_hash_3(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_3; }
static int get_hash_4(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_4; }
static int get_hash_5(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_5; }
static int get_hash_6(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_0; }
static int get_hash_1(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_1; }
static int get_hash_2(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_2; }
static int get_hash_3(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_3; }
static int get_hash_4(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_4; }
static int get_hash_5(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_5; }
static int get_hash_6(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_6; }
#endif
struct fmt_main fmt_HDAA = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
Gctpc.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "proj.h"
#include "grb2.h"
#include "wgrib2.h"
#include "fnlist.h"
/* Gctpc.c interface routines to the gctpc library
2/2012 Public Domain Wesley Ebisuzaki
gctpc_get_latlon: fill grid with lat/lon values
mercator 10
polar stereographic 20
lambert conformal 30
Albers equal area 31
5/2014 added staggering
10/2015 added Lambert Azimuthal Equal Area Projection
*/
/* M_PI, M_PI_2, M_PI_4, and M_SQRT2 are not ANSI C but are commonly defined */
/* values from GNU C library version of math.h copyright Free Software Foundation, Inc. */
#ifndef M_PI
#define M_PI 3.14159265358979323846 /* pi */
#endif
#ifndef M_PI_2
#define M_PI_2 1.57079632679489661923 /* pi/2 */
#endif
#ifndef M_PI_4
#define M_PI_4 0.78539816339744830962 /* pi/4 */
#endif
#ifndef M_SQRT2
#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */
#endif
extern int use_gctpc;
extern int latlon;
extern double *lon, *lat;
extern enum output_order_type output_order;
/*
* HEADER:100:gctpc:misc:1: X=0,1 use gctpc library (default=1)
*/
int f_gctpc(ARG1) {
use_gctpc = (strcmp(arg1,"1") == 0);
return 0;
}
/* get lat-lon for grid
*
* step 1: initialize to center point
* step 2: find (x,y) of lon1/lat1 (1st grid point) (x0,y0)
* step 3 find (x,y) of grid
* step 4 find lat-lon of (x,y)
*/
static int warning_LamAZ = 0;
int gctpc_get_latlon(unsigned char **sec, double **lon, double **lat) {
int gdt;
unsigned char *gds;
double r_maj; /* major axis */
double r_min; /* minor axis */
double lat1; /* first standard parallel */
double lat2; /* second standard parallel */
double c_lon; /* center longitude */
double c_lat; /* center latitude */
double false_east; /* x offset in meters */
double false_north;
double dx, dy;
double x0, y0;
long int (*inv_fn)();
double *llat, *llon, rlon, rlat;
int nnx, nny, nres, nscan;
unsigned int i, nnpnts;
long g_error;
gdt = code_table_3_1(sec);
gds = sec[3];
/* only process certain grids */
if (gdt != 10 && gdt != 20 && gdt != 30 && gdt != 31 && gdt != 140) return 1;
get_nxny(sec, &nnx, &nny, &nnpnts, &nres, &nscan);
/* potentially staggered */
// 8/2014 if (nnx < 1 || nny < 1) return 1;
llat = *lat;
llon = *lon;
if (llat != NULL) {
free(llat);
free(llon);
*lat = *lon = llat = llon = NULL;
}
inv_fn = NULL;
dx = dy = 0.0;
if (gdt == 10) { // mercator
/* get earth axis */
axes_earth(sec, &r_maj, &r_min);
dy = GDS_Mercator_dy(gds);
dx = GDS_Mercator_dx(gds);
/* central point */
c_lon = GDS_Mercator_ori_angle(gds) * (M_PI/180.0);
c_lat = GDS_Mercator_latD(gds) * (M_PI/180.0);
/* find the eastling and northing of of the 1st grid point */
false_east = false_north = 0.0;
g_error = merforint(r_maj,r_min,c_lon,c_lat,false_east,false_north);
if (g_error) fatal_error_i("merforint %d", g_error);
rlon = GDS_Mercator_lon1(gds) * (M_PI/180.0);
rlat = GDS_Mercator_lat1(gds) * (M_PI/180.0);
g_error = merfor(rlon, rlat, &x0, &y0);
if (g_error) fatal_error_i("merfor %d", g_error);
/* initialize for 1st grid point */
x0 = -x0;
y0 = -y0;
g_error = merinvint(r_maj,r_min,c_lon,c_lat,x0,y0);
if (g_error) fatal_error_i("merinvint %d", g_error);
inv_fn = &merinv;
}
else if (gdt == 20) { // polar stereographic
/* get earth axis */
axes_earth(sec, &r_maj, &r_min);
dy = GDS_Polar_dy(gds);
dx = GDS_Polar_dx(gds);
/* central point */
c_lon = GDS_Polar_lov(gds) * (M_PI/180.0);
c_lat = GDS_Polar_lad(gds) * (M_PI/180.0);
/* find the eastling and northing of of the 1st grid point */
false_east = false_north = 0.0;
g_error = psforint(r_maj,r_min,c_lon,c_lat,false_east,false_north);
if (g_error) fatal_error_i("psforint %d", g_error);
rlon = GDS_Polar_lon1(gds) * (M_PI/180.0);
rlat = GDS_Polar_lat1(gds) * (M_PI/180.0);
g_error = psfor(rlon, rlat, &x0, &y0);
if (g_error) fatal_error_i("psfor %d", g_error);
/* initialize for 1st grid point */
x0 = -x0;
y0 = -y0;
g_error = psinvint(r_maj,r_min,c_lon,c_lat,x0,y0);
if (g_error) fatal_error_i("psinvint %d", g_error);
inv_fn = &psinv;
}
else if (gdt == 30) { // lambert conformal conic
/* get earth axis */
axes_earth(sec, &r_maj, &r_min);
dy = GDS_Lambert_dy(gds);
dx = GDS_Lambert_dx(gds);
//printf(">>> gctpc dx %lf, dy %lf\n", dx, dy);
/* latitudes of tangent/intersection */
lat1 = GDS_Lambert_Latin1(gds) * (M_PI/180.0);
lat2 = GDS_Lambert_Latin2(gds) * (M_PI/180.0);
/* central point */
c_lon = GDS_Lambert_Lov(gds) * (M_PI/180.0);
c_lat = GDS_Lambert_LatD(gds) * (M_PI/180.0);
/* find the eastling and northing of of the 1st grid point */
false_east = false_north = 0.0;
g_error = lamccforint(r_maj,r_min,lat1,lat2,c_lon,c_lat,false_east,false_north);
if (g_error) fatal_error_i("lamccforint %d", g_error);
rlon = GDS_Lambert_Lo1(gds) * (M_PI/180.0);
rlat = GDS_Lambert_La1(gds) * (M_PI/180.0);
g_error = lamccfor(rlon, rlat, &x0, &y0);
if (g_error) fatal_error_i("lamccfor %d", g_error);
/* initialize for 1st grid point */
x0 = -x0;
y0 = -y0;
g_error = lamccinvint(r_maj,r_min,lat1,lat2,c_lon,c_lat,x0,y0);
if (g_error) fatal_error_i("lamccinvint %d", g_error);
inv_fn = &lamccinv;
}
else if (gdt == 31) { // albers equal area
/* get earth axis */
axes_earth(sec, &r_maj, &r_min);
dy = GDS_Albers_dy(gds);
dx = GDS_Albers_dx(gds);
/* latitudes of tangent/intersection */
lat1 = GDS_Albers_Latin1(gds) * (M_PI/180.0);
lat2 = GDS_Albers_Latin2(gds) * (M_PI/180.0);
/* central point */
c_lon = GDS_Albers_Lov(gds) * (M_PI/180.0);
c_lat = GDS_Albers_LatD(gds) * (M_PI/180.0);
/* find the eastling and northing of of the 1st grid point */
false_east = false_north = 0.0;
g_error = alberforint(r_maj,r_min,lat1,lat2,c_lon,c_lat,false_east,false_north);
if (g_error) fatal_error_i("alberforint %d", g_error);
rlon = GDS_Albers_Lo1(gds) * (M_PI/180.0);
rlat = GDS_Albers_La1(gds) * (M_PI/180.0);
g_error = alberfor(rlon, rlat, &x0, &y0);
if (g_error) fatal_error_i("alberfor %d", g_error);
/* initialize for 1st grid point */
x0 = -x0;
y0 = -y0;
g_error = alberinvint(r_maj,r_min,lat1,lat2,c_lon,c_lat,x0,y0);
if (g_error) fatal_error_i("alberinvint %d", g_error);
inv_fn = &alberinv;
}
else if (gdt == 140) { // lambert azimuthal equal area
/* get earth axis */
axes_earth(sec, &r_maj, &r_min);
if (r_maj != r_min && warning_LamAZ< 3) {
warning_LamAZ++;
fprintf(stderr,"**WARNING gctpc only does spherical Lambert Azimuthal Equal Area.\n"
"**WARNING wgrib2 with Proj4 does aspherical earth: -proj4 1\n");
}
r_maj = (r_maj + r_min)/2.0; /* only for spherical earth */
dy = GDS_Lambert_Az_dy(gds);
dx = GDS_Lambert_Az_dx(gds);
/* central point */
c_lon = GDS_Lambert_Az_Cen_Lon(gds) * (M_PI/180.0);
c_lat = GDS_Lambert_Az_Std_Par(gds) * (M_PI/180.0);
/* find the eastling and northing of of the 1st grid point */
false_east = false_north = 0.0;
g_error = lamazforint(r_maj,c_lon,c_lat,false_east,false_north);
if (g_error) fatal_error_i("lamazforint %d", g_error);
rlon = GDS_Lambert_Az_Lo1(gds) * (M_PI/180.0);
rlat = GDS_Lambert_Az_La1(gds) * (M_PI/180.0);
g_error = lamazfor(rlon, rlat, &x0, &y0);
if (g_error) fatal_error_i("lamazfor %d", g_error);
/* initialize for 1st grid point */
x0 = -x0;
y0 = -y0;
g_error = lamazinvint(r_maj,c_lon,c_lat,x0,y0);
if (g_error) fatal_error_i("lamazinvint %d", g_error);
inv_fn = &lamazinv;
}
if (inv_fn == NULL) return 1;
if ((*lat = llat = (double *) malloc(((size_t) nnpnts) * sizeof(double))) == NULL) {
fatal_error("gctpc_get_latlon memory allocation failed","");
}
if ((*lon = llon = (double *) malloc(((size_t) nnpnts) * sizeof(double))) == NULL) {
fatal_error("gctpc_get_latlon memory allocation failed","");
}
/* put x[] and y[] values in lon and lat */
if (stagger(sec, nnpnts, llon, llat)) fatal_error("gctpc: stagger problem","");
// printf(">> stagger gctpc x00 %lf y00 %lf\n",llon[0], llat[0]);
#pragma omp parallel for private(i)
for (i = 0; i < nnpnts; i++) {
inv_fn(llon[i]*dx, llat[i]*dy, llon+i, llat+i);
llat[i] *= (180.0 / M_PI);
llon[i] *= (180.0 / M_PI);
if (llon[i] < 0.0) llon[i] += 360.0;
}
return 0;
}
/*
* HEADER:100:ll2ij:inv:2:x=lon y=lat, converts lon-lat to (i,j) using gctpc
*/
int f_ll2ij(ARG2) {
double x[1], y[1], to_lat[1], to_lon[1];
int i;
if (mode == -1) {
latlon = 1;
}
if (mode >= 0) {
if (output_order != wesn) return 1;
to_lon[0] = atof(arg1);
to_lat[0] = atof(arg2);
i = gctpc_ll2xy_init(sec, lon, lat);
if (i == 0) {
i = gctpc_ll2xy(1, to_lon, to_lat, x , y);
sprintf(inv_out,"%lf %lf -> (%lf,%lf)",to_lon[0], to_lat[0], x[0]+1.0, y[0]+1.0);
}
}
return 0;
}
/*
* HEADER:100:ll2i:inv:2:x=lon y=lat, converts to (i), 1..ndata
*/
int f_ll2i(ARG2) {
double to_lat[1], to_lon[1];
int i;
unsigned int iptr;
if (mode == -1) {
latlon = 1;
}
if (mode >= 0) {
if (output_order != wesn) return 1;
to_lon[0] = atof(arg1);
to_lat[0] = atof(arg2);
i = gctpc_ll2xy_init(sec, lon, lat);
if (i != 0) {
iptr = 0;
}
else {
i = gctpc_ll2i(1, to_lon, to_lat, &iptr);
if (i != 0) iptr = 0;
}
sprintf(inv_out,"%lf %lf -> (%u)",to_lon[0], to_lat[0], iptr);
}
return 0;
}
|
sort.ref.c | #include <sys/time.h>
#include <time.h>
#include <stdio.h>
static unsigned long long current_time_ns() {
#ifdef __MACH__
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec;
return (unsigned long long)mts.tv_nsec + s;
#else
struct timespec t ={0,0};
clock_gettime(CLOCK_MONOTONIC, &t);
unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec;
return (((unsigned long long)t.tv_nsec)) + s;
#endif
}
/**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
/*
* Original code from the Cilk project
*
* Copyright (c) 2000 Massachusetts Institute of Technology
* Copyright (c) 2000 Matteo Frigo
*/
/*
* this program uses an algorithm that we call `cilksort'.
* The algorithm is essentially mergesort:
*
* cilksort(in[1..n]) =
* spawn cilksort(in[1..n/2], tmp[1..n/2])
* spawn cilksort(in[n/2..n], tmp[n/2..n])
* sync
* spawn cilkmerge(tmp[1..n/2], tmp[n/2..n], in[1..n])
*
*
* The procedure cilkmerge does the following:
*
* cilkmerge(A[1..n], B[1..m], C[1..(n+m)]) =
* find the median of A \union B using binary
* search. The binary search gives a pair
* (ma, mb) such that ma + mb = (n + m)/2
* and all elements in A[1..ma] are smaller than
* B[mb..m], and all the B[1..mb] are smaller
* than all elements in A[ma..n].
*
* spawn cilkmerge(A[1..ma], B[1..mb], C[1..(n+m)/2])
* spawn cilkmerge(A[ma..m], B[mb..n], C[(n+m)/2 .. (n+m)])
* sync
*
* The algorithm appears for the first time (AFAIK) in S. G. Akl and
* N. Santoro, "Optimal Parallel Merging and Sorting Without Memory
* Conflicts", IEEE Trans. Comp., Vol. C-36 No. 11, Nov. 1987 . The
* paper does not express the algorithm using recursion, but the
* idea of finding the median is there.
*
* For cilksort of n elements, T_1 = O(n log n) and
* T_\infty = O(log^3 n). There is a way to shave a
* log factor in the critical path (left as homework).
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "bots.h"
#include "app-desc.h"
ELM *array, *tmp;
static unsigned long rand_nxt = 0;
static inline unsigned long my_rand(void)
{
rand_nxt = rand_nxt * 1103515245 + 12345;
return rand_nxt;
}
static inline void my_srand(unsigned long seed)
{
rand_nxt = seed;
}
static inline ELM med3(ELM a, ELM b, ELM c)
{
if (a < b) {
if (b < c) {
return b;
} else {
if (a < c)
return c;
else
return a;
}
} else {
if (b > c) {
return b;
} else {
if (a > c)
return c;
else
return a;
}
}
}
/*
* simple approach for now; a better median-finding
* may be preferable
*/
static inline ELM choose_pivot(ELM *low, ELM *high)
{
return med3(*low, *high, low[(high - low) / 2]);
}
static ELM *seqpart(ELM *low, ELM *high)
{
ELM pivot;
ELM h, l;
ELM *curr_low = low;
ELM *curr_high = high;
pivot = choose_pivot(low, high);
while (1) {
while ((h = *curr_high) > pivot)
curr_high--;
while ((l = *curr_low) < pivot)
curr_low++;
if (curr_low >= curr_high)
break;
*curr_high-- = l;
*curr_low++ = h;
}
/*
* I don't know if this is really necessary.
* The problem is that the pivot is not always the
* first element, and the partition may be trivial.
* However, if the partition is trivial, then
* *high is the largest element, whence the following
* code.
*/
if (curr_high < high)
return curr_high;
else
return curr_high - 1;
}
#define swap(a, b) \
{ \
ELM tmp;\
tmp = a;\
a = b;\
b = tmp;\
}
static void insertion_sort(ELM *low, ELM *high)
{
ELM *p, *q;
ELM a, b;
for (q = low + 1; q <= high; ++q) {
a = q[0];
for (p = q - 1; p >= low && (b = p[0]) > a; p--)
p[1] = b;
p[1] = a;
}
}
/*
* tail-recursive quicksort, almost unrecognizable :-)
*/
void seqquick(ELM *low, ELM *high)
{
ELM *p;
while (high - low >= bots_app_cutoff_value_2) {
p = seqpart(low, high);
seqquick(low, p);
low = p + 1;
}
insertion_sort(low, high);
}
void seqmerge(ELM *low1, ELM *high1, ELM *low2, ELM *high2,
ELM *lowdest)
{
ELM a1, a2;
/*
* The following 'if' statement is not necessary
* for the correctness of the algorithm, and is
* in fact subsumed by the rest of the function.
* However, it is a few percent faster. Here is why.
*
* The merging loop below has something like
* if (a1 < a2) {
* *dest++ = a1;
* ++low1;
* if (end of array) break;
* a1 = *low1;
* }
*
* Now, a1 is needed immediately in the next iteration
* and there is no way to mask the latency of the load.
* A better approach is to load a1 *before* the end-of-array
* check; the problem is that we may be speculatively
* loading an element out of range. While this is
* probably not a problem in practice, yet I don't feel
* comfortable with an incorrect algorithm. Therefore,
* I use the 'fast' loop on the array (except for the last
* element) and the 'slow' loop for the rest, saving both
* performance and correctness.
*/
if (low1 < high1 && low2 < high2) {
a1 = *low1;
a2 = *low2;
for (;;) {
if (a1 < a2) {
*lowdest++ = a1;
a1 = *++low1;
if (low1 >= high1)
break;
} else {
*lowdest++ = a2;
a2 = *++low2;
if (low2 >= high2)
break;
}
}
}
if (low1 <= high1 && low2 <= high2) {
a1 = *low1;
a2 = *low2;
for (;;) {
if (a1 < a2) {
*lowdest++ = a1;
++low1;
if (low1 > high1)
break;
a1 = *low1;
} else {
*lowdest++ = a2;
++low2;
if (low2 > high2)
break;
a2 = *low2;
}
}
}
if (low1 > high1) {
memcpy(lowdest, low2, sizeof(ELM) * (high2 - low2 + 1));
} else {
memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1 + 1));
}
}
#define swap_indices(a, b) \
{ \
ELM *tmp;\
tmp = a;\
a = b;\
b = tmp;\
}
ELM *binsplit(ELM val, ELM *low, ELM *high)
{
/*
* returns index which contains greatest element <= val. If val is
* less than all elements, returns low-1
*/
ELM *mid;
while (low != high) {
mid = low + ((high - low + 1) >> 1);
if (val <= *mid)
high = mid - 1;
else
low = mid;
}
if (*low > val)
return low - 1;
else
return low;
}
void cilkmerge_par(ELM *low1, ELM *high1, ELM *low2, ELM *high2, ELM *lowdest)
{
/*
* Cilkmerge: Merges range [low1, high1] with range [low2, high2]
* into the range [lowdest, ...]
*/
ELM *split1, *split2; /*
* where each of the ranges are broken for
* recursive merge
*/
long int lowsize; /*
* total size of lower halves of two
* ranges - 2
*/
/*
* We want to take the middle element (indexed by split1) from the
* larger of the two arrays. The following code assumes that split1
* is taken from range [low1, high1]. So if [low1, high1] is
* actually the smaller range, we should swap it with [low2, high2]
*/
if (high2 - low2 > high1 - low1) {
swap_indices(low1, low2);
swap_indices(high1, high2);
}
if (high2 < low2) {
/* smaller range is empty */
memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1));
return;
}
if (high2 - low2 < bots_app_cutoff_value ) {
seqmerge(low1, high1, low2, high2, lowdest);
return;
}
/*
* Basic approach: Find the middle element of one range (indexed by
* split1). Find where this element would fit in the other range
* (indexed by split 2). Then merge the two lower halves and the two
* upper halves.
*/
split1 = ((high1 - low1 + 1) / 2) + low1;
split2 = binsplit(*split1, low2, high2);
lowsize = split1 - low1 + split2 - low2;
/*
* directly put the splitting element into
* the appropriate location
*/
*(lowdest + lowsize + 1) = *split1;
#pragma omp task untied
cilkmerge_par(low1, split1 - 1, low2, split2, lowdest);
#pragma omp task untied
cilkmerge_par(split1 + 1, high1, split2 + 1, high2,
lowdest + lowsize + 2);
#pragma omp taskwait
;
return;
}
void cilksort_par(ELM *low, ELM *tmp, long size)
{
/*
* divide the input in four parts of the same size (A, B, C, D)
* Then:
* 1) recursively sort A, B, C, and D (in parallel)
* 2) merge A and B into tmp1, and C and D into tmp2 (in parallel)
* 3) merge tmp1 and tmp2 into the original array
*/
long quarter = size / 4;
ELM *A, *B, *C, *D, *tmpA, *tmpB, *tmpC, *tmpD;
if (size < bots_app_cutoff_value_1 ) {
/* quicksort when less than 1024 elements */
seqquick(low, low + size - 1);
return;
}
A = low;
tmpA = tmp;
B = A + quarter;
tmpB = tmpA + quarter;
C = B + quarter;
tmpC = tmpB + quarter;
D = C + quarter;
tmpD = tmpC + quarter;
#pragma omp task untied
cilksort_par(A, tmpA, quarter);
#pragma omp task untied
cilksort_par(B, tmpB, quarter);
#pragma omp task untied
cilksort_par(C, tmpC, quarter);
#pragma omp task untied
cilksort_par(D, tmpD, size - 3 * quarter);
#pragma omp taskwait
;
#pragma omp task untied
cilkmerge_par(A, A + quarter - 1, B, B + quarter - 1, tmpA);
#pragma omp task untied
cilkmerge_par(C, C + quarter - 1, D, low + size - 1, tmpC);
#pragma omp taskwait
;
cilkmerge_par(tmpA, tmpC - 1, tmpC, tmpA + size - 1, A);
}
void scramble_array( ELM *array )
{
unsigned long i;
unsigned long j;
for (i = 0; i < bots_arg_size; ++i) {
j = my_rand();
j = j % bots_arg_size;
swap(array[i], array[j]);
}
}
void fill_array( ELM *array )
{
unsigned long i;
my_srand(1);
/* first, fill with integers 1..size */
for (i = 0; i < bots_arg_size; ++i) {
array[i] = i;
}
}
void sort_init ( void )
{
/* Checking arguments */
if (bots_arg_size < 4) {
bots_message("%s can not be less than 4, using 4 as a parameter.\n", BOTS_APP_DESC_ARG_SIZE );
bots_arg_size = 4;
}
if (bots_app_cutoff_value < 2) {
bots_message("%s can not be less than 2, using 2 as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF);
bots_app_cutoff_value = 2;
}
else if (bots_app_cutoff_value > bots_arg_size ) {
bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF, bots_arg_size);
bots_app_cutoff_value = bots_arg_size;
}
if (bots_app_cutoff_value_1 > bots_arg_size ) {
bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_1, bots_arg_size);
bots_app_cutoff_value_1 = bots_arg_size;
}
if (bots_app_cutoff_value_2 > bots_arg_size ) {
bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_2, bots_arg_size);
bots_app_cutoff_value_2 = bots_arg_size;
}
if (bots_app_cutoff_value_2 > bots_app_cutoff_value_1) {
bots_message("%s can not be greather than %s, using %d as a parameter.\n",
BOTS_APP_DESC_ARG_CUTOFF_2,
BOTS_APP_DESC_ARG_CUTOFF_1,
bots_app_cutoff_value_1
);
bots_app_cutoff_value_2 = bots_app_cutoff_value_1;
}
array = (ELM *) malloc(bots_arg_size * sizeof(ELM));
tmp = (ELM *) malloc(bots_arg_size * sizeof(ELM));
fill_array(array);
scramble_array(array);
}
void sort_par ( void )
{
bots_message("Computing multisort algorithm (n=%d) ", bots_arg_size);
const unsigned long long full_program_start = current_time_ns();
{
#pragma omp parallel
{
#pragma omp single nowait
{
#pragma omp task untied
{
cilksort_par(array, tmp, bots_arg_size);
}
}
}
} ;
const unsigned long long full_program_end = current_time_ns();
printf("full_program %llu ns\n", full_program_end - full_program_start);
bots_message(" completed!\n");
}
int sort_verify ( void )
{
int i, success = 1;
for (i = 0; i < bots_arg_size; ++i)
if (array[i] != i)
success = 0;
return success ? BOTS_RESULT_SUCCESSFUL : BOTS_RESULT_UNSUCCESSFUL;
}
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=MagickMin(GetMagickResourceLimit(WidthResource),
(MagickSizeType) MAGICK_SSIZE_MAX);
cache_info->height_limit=MagickMin(GetMagickResourceLimit(HeightResource),
(MagickSizeType) MAGICK_SSIZE_MAX);
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=(GetLogEventMask() & CacheEvent) != 0 ? MagickTrue :
MagickFalse;
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(number_threads,
2*sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% void *AcquirePixelCachePixels(const Image *image,size_t *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
(void) exception;
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=(size_t) cache_info->length;
return(cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply clip mask.
*/
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & WriteMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL))
return(MagickFalse);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
mask_alpha;
ssize_t
i;
mask_alpha=QuantumScale*GetPixelWriteMask(image,p);
if (fabs(mask_alpha) >= MagickEpsilon)
{
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha*
GetPixelAlpha(image,p),(double) q[i],(double)
GetPixelAlpha(image,q)));
}
SetPixelAlpha(image,GetPixelAlpha(image,p),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *clone_info,
% CacheInfo *cache_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o clone_info: the pixel cache.
%
% o cache_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
{
#if defined(MAGICKCORE_HAVE_LINUX_SENDFILE)
if (cache_info->length < 0x7ffff000)
{
count=sendfile(clone_info->file,cache_info->file,(off_t *) NULL,
(size_t) cache_info->length);
if (count == (ssize_t) cache_info->length)
return(MagickTrue);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
}
#endif
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
}
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->alpha_trait == clone_info->alpha_trait) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->number_channels*cache_info->columns*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns,
clone_info->number_channels*clone_info->columns);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
(void) ShredMagickMemory(cache_info->pixels,(size_t) cache_info->length);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
}
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *) image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
}
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
RetainOpenCLMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=GetMagickTime();
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (image->type != UndefinedType)
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the colorspace of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *magick_unused(exception))
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
magick_unreferenced(exception);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimized cache tile width in pixels.
%
% o height: the optimized cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% Quantum *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset;
modulo.remainder=0;
if (extent != 0)
{
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
}
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
const Quantum
*magick_restrict p;
const void
*magick_restrict r;
Quantum
*magick_restrict q;
ssize_t
i,
u;
unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
s=(unsigned char *) nexus_info->metacontent;
(void) memset(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) memset(virtual_metacontent,0,cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels*length;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ApplyPixelCompositeMask(const Quantum p,
const MagickRealType alpha,const Quantum q,const MagickRealType beta)
{
double
gamma;
if (fabs((double) (alpha-TransparentAlpha)) < MagickEpsilon)
return(q);
gamma=1.0-QuantumScale*QuantumScale*alpha*beta;
gamma=PerceptibleReciprocal(gamma);
return(ClampToQuantum(gamma*MagickOver_((double) p,alpha,(double) q,beta)));
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply composite mask.
*/
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & CompositeMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL))
return(MagickFalse);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
alpha;
ssize_t
i;
alpha=(double) GetPixelCompositeMask(image,p);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyPixelCompositeMask(q[i],alpha,p[i],GetPixelAlpha(image,p));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (cache_info->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*hosts,
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
if (GetMagickResourceLimit(ListLengthResource) != MagickResourceInfinity)
{
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
}
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->channels=image->channels;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=MagickMax(cache_info->number_channels,1)*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (cache_info->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (cache_info->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->type=DiskCache;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length == (MagickSizeType) ((size_t) length))
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status != MagickFalse)
{
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (cache_info->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (cache_info->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->alpha_trait=cache_info->alpha_trait;
clone_info->channels=cache_info->channels;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->number_channels=cache_info->number_channels;
clone_info->metacontent_extent=cache_info->metacontent_extent;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
(void) memcpy(clone_info->channel_map,cache_info->channel_map,
MaxPixelChannels*sizeof(*cache_info->channel_map));
clone_info->offset=(*offset);
status=OpenPixelCacheOnDisk(clone_info,WriteMode);
if (status != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
ssize_t
y;
unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
Quantum
*magick_restrict q;
ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t C a c h e A n o n y m o u s M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetCacheAnonymousMemory() resets the anonymous_memory value.
%
% The format of the ResetCacheAnonymousMemory method is:
%
% void ResetCacheAnonymousMemory(void)
%
*/
MagickPrivate void ResetCacheAnonymousMemory(void)
{
cache_anonymous_memory=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrict cache_info,const MapMode mode,
% const ssize_t x,const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: if true, nexus pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) length));
if (nexus_info->cache != (Quantum *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (Quantum *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static inline MagickBooleanType ValidatePixelOffset(const ssize_t x,
const size_t a)
{
if ((x >= 0) && (x >= ((ssize_t) MAGICK_SSIZE_MAX-(ssize_t) a)))
return(MagickFalse);
if (x <= ((ssize_t) MAGICK_SSIZE_MIN+(ssize_t) a))
return(MagickFalse);
return(MagickTrue);
}
static Quantum *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit) ||
(ValidatePixelOffset(x,width) == MagickFalse) ||
(ValidatePixelOffset(y,height) == MagickFalse))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
status=MagickTrue;
if (nexus_info->cache == (Quantum *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
return((Quantum *) NULL);
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+
cache_info->number_channels*number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (image->mask_trait != UpdatePixelTrait)
{
if (((image->channels & WriteMaskChannel) != 0) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (((image->channels & CompositeMaskChannel) != 0) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
}
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
const unsigned char
*magick_restrict p;
ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
const Quantum
*magick_restrict p;
ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->number_channels*cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
XdmfWriter.h | /**
* @file
* This file is part of XdmfWriter
*
* @author Sebastian Rettenberger <sebastian.rettenberger@tum.de>
*
* @copyright Copyright (c) 2014-2017, Technische Universitaet Muenchen.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef XDMFWRITER_XDMFWRITER_H
#define XDMFWRITER_XDMFWRITER_H
#ifdef USE_MPI
#include <mpi.h>
#endif // USE_MPI
#include <fstream>
#include <iomanip>
#include <limits>
#include <sstream>
#include <string>
#include <vector>
#include <type_traits>
#include "utils/env.h"
#include "utils/logger.h"
#include "utils/mathutils.h"
#include "scorep_wrapper.h"
#include "BufferFilter.h"
#include "ParallelVertexFilter.h"
#include "Topology.h"
#include "backends/Backend.h"
namespace xdmfwriter
{
/**
* Writes data in XDMF format
*/
template<TopoType Topo, typename VertexDataType, typename CellDataType = VertexDataType>
class XdmfWriter
{
private:
#ifdef USE_MPI
MPI_Comm m_comm;
#endif // USE_MPI
int m_rank;
std::string m_outputPrefix;
std::fstream m_xdmfFile;
/** The backend for large scale I/O */
backends::Backend<VertexDataType, CellDataType> m_backend;
/** Names of the cell variables that should be written */
std::vector<const char*> m_cellVariableNames;
/** Names of the vertex variables that should be written */
std::vector<const char*> m_vertexVariableNames;
/** Vertex filter (only used if vertex filter is enabled) */
internal::ParallelVertexFilter<VertexDataType> m_vertexFilter;
/** The buffer filter for vertex data (only used if vertex filter is enabled) */
internal::BufferFilter<sizeof(VertexDataType)> m_vertexDataFilter;
/** Only execute the flush on certain time steps */
unsigned int m_flushInterval;
/** Output step counter */
unsigned int m_timeStep;
/** The current mesh id */
unsigned int m_meshId;
/** The timestep counter of the current mesh */
unsigned int m_meshTimeStep;
bool m_useVertexFilter;
bool m_writePartitionInfo;
bool m_writeClusteringInfo;
/** Total number of cells/vertices */
unsigned long m_totalSize[2];
public:
/**
* @param timestep Set this to > 0 to activate append mode
*/
XdmfWriter(BackendType backendType,
const char* outputPrefix,
unsigned int timeStep = 0)
: m_rank(0), m_outputPrefix(outputPrefix),
m_backend(backendType),
m_flushInterval(0),
m_timeStep(timeStep), m_meshId(0), m_meshTimeStep(0),
m_useVertexFilter(true), m_writePartitionInfo(true), m_writeClusteringInfo(false)
{
#ifdef USE_MPI
setComm(MPI_COMM_WORLD);
#endif // USE_MPI
}
virtual ~XdmfWriter()
{
close();
}
#ifdef USE_MPI
/**
* Sets the communicator that should be used. Default is MPI_COMM_WORLD.
*/
void setComm(MPI_Comm comm)
{
m_comm = comm;
MPI_Comm_rank(comm, &m_rank);
m_backend.setComm(comm);
m_vertexFilter.setComm(comm);
}
#endif // USE_MPI
void init(const std::vector<const char*> &cellVariableNames, const std::vector<const char*> &vertexVariableNames,
bool useVertexFilter = true, bool writePartitionInfo = true, bool writeClusteringInfo = false)
{
m_cellVariableNames = cellVariableNames;
m_vertexVariableNames = vertexVariableNames;
m_useVertexFilter = useVertexFilter;
m_writePartitionInfo = writePartitionInfo;
m_writeClusteringInfo = writeClusteringInfo;
int nProcs = 1;
#ifdef USE_MPI
MPI_Comm_size(m_comm, &nProcs);
#endif // USE_MPI
if (nProcs == 1)
m_useVertexFilter = false;
// Create variable data for the backend
std::vector<backends::VariableData> cellVariableData;
cellVariableData.push_back(backends::VariableData("connect", backends::UNSIGNED_LONG, internal::Topology<Topo>::size(), false));
if (writePartitionInfo)
cellVariableData.push_back(backends::VariableData("partition", backends::INT, 1, false));
if (writeClusteringInfo)
cellVariableData.push_back(backends::VariableData("clustering", backends::INT, 1, false));
for (std::vector<const char*>::const_iterator it = cellVariableNames.begin();
it != cellVariableNames.end(); ++it) {
cellVariableData.push_back(backends::VariableData(*it, backends::FLOAT, 1, true));
}
std::vector<backends::VariableData> vertexVariableData;
vertexVariableData.push_back(backends::VariableData("geometry", backends::FLOAT, 3, false));
for (std::vector<const char*>::const_iterator it = vertexVariableNames.begin();
it != vertexVariableNames.end(); ++it) {
vertexVariableData.push_back(backends::VariableData(*it, backends::FLOAT, 1, true));
}
// Open the backend
m_backend.open(m_outputPrefix, cellVariableData, vertexVariableData, m_timeStep == 0);
// Write the XML file
if (m_rank == 0) {
std::string xdmfName = m_outputPrefix + ".xdmf";
std::ofstream(xdmfName.c_str(), std::ios::app).close(); // Create the file (if it does not exist)
m_xdmfFile.open(xdmfName.c_str());
m_xdmfFile << "<?xml version=\"1.0\" ?>" << std::endl
<< "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []>" << std::endl
<< "<Xdmf Version=\"2.0\">" << std::endl
<< " <Domain>" << std::endl
<< " <Grid Name=\"TimeSeries\" GridType=\"Collection\" CollectionType=\"Temporal\">" << std::endl;
if (m_timeStep == 0)
closeXdmf();
else {
// Jump the correct position in the file
std::ostringstream tStartStream;
timeStepStartXdmf(m_timeStep-1, tStartStream);
std::string tStart = tStartStream.str();
// Find beginning of the (correct) time step
std::string line;
std::size_t pos;
while (getline(m_xdmfFile, line)) {
pos = line.find(tStart);
if (pos != std::string::npos)
break;
}
if (!m_xdmfFile)
logError() << "Unable to find time step for appending";
// Extract mesh id and mesh step
std::istringstream ss(line.substr(pos + tStart.size()));
ss.seekg(13, std::iostream::cur); // Skip "<!-- mesh id: "
ss >> m_meshId;
ss.seekg(13, std::iostream::cur); // Skip ", mesh step: "
ss >> m_meshTimeStep;
logInfo() << "Found mesh" << m_meshId << "in step" << m_meshTimeStep;
m_meshId++;
m_meshTimeStep++;
// Find end of this time step
while (getline(m_xdmfFile, line)) {
if (line.find("</Grid>") != std::string::npos)
break;
}
}
}
#ifdef USE_MPI
if (m_timeStep != 0) {
// Broadcast the some information if we restart
unsigned int buf[2] = {m_meshId, m_meshTimeStep};
MPI_Bcast(buf, 2, MPI_UNSIGNED, 0, m_comm);
m_meshId = buf[0];
m_meshTimeStep = buf[1];
}
#endif // USE_MPI
// Get flush interval
m_flushInterval = utils::Env::get<unsigned int>("XDMFWRITER_FLUSH_INTERVAL", 1);
}
/**
* @param restarting Set this to <code>true</code> if the codes restarts from a checkpoint
* and the XDMF writer should continue with the old mesh
*/
void setMesh(unsigned int numCells,
const unsigned int* cells,
unsigned int numVertices,
const VertexDataType *vertices,
bool restarting = false)
{
#ifdef USE_MPI
// Apply vertex filter
internal::BufferFilter<3*sizeof(VertexDataType)> vertexRemover;
if (m_useVertexFilter) {
// Filter duplicate vertices
m_vertexFilter.filter(numVertices, vertices);
if (!restarting) {
vertexRemover.init(numVertices, numVertices - m_vertexFilter.numLocalVertices(), m_vertexFilter.duplicates());
vertices = static_cast<const VertexDataType*>(vertexRemover.filter(vertices));
}
// Set the vertex data filter
if (m_backend.numVertexVars() > 0)
m_vertexDataFilter.init(numVertices, numVertices - m_vertexFilter.numLocalVertices(), m_vertexFilter.duplicates());
numVertices = m_vertexFilter.numLocalVertices();
}
#endif // USE_MPI
// Set the backend mesh
m_totalSize[0] = numCells;
m_totalSize[1] = numVertices;
unsigned long offset[2] = {numCells, numVertices};
#ifdef USE_MPI
MPI_Allreduce(MPI_IN_PLACE, m_totalSize, 2, MPI_UNSIGNED_LONG, MPI_SUM, m_comm);
MPI_Scan(MPI_IN_PLACE, offset, 2, MPI_UNSIGNED_LONG, MPI_SUM, m_comm);
#endif // USE_MPI
offset[0] -= numCells;
offset[1] -= numVertices;
// Add a new mesh to the backend
unsigned int localSize[2] = {numCells, numVertices};
// Use the old mesh id for restarts
m_backend.setMesh((restarting ? m_meshId-1 : m_meshId), m_totalSize, localSize, offset);
if (restarting)
// Can skip writing the mesh if we are restarting
return;
// Add vertex offset to all cells and convert to unsigned long
unsigned long *h5Cells = new unsigned long[numCells * internal::Topology<Topo>::size()];
#ifdef USE_MPI
if (m_useVertexFilter) {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif // _OPENMP
for (size_t i = 0; i < numCells*internal::Topology<Topo>::size(); i++)
h5Cells[i] = m_vertexFilter.globalIds()[cells[i]];
} else
#endif // USE_MPI
{
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif // _OPENMP
for (size_t i = 0; i < numCells*internal::Topology<Topo>::size(); i++)
h5Cells[i] = cells[i] + offset[1];
}
m_backend.writeCellData(0, 0, h5Cells);
delete [] h5Cells;
if (m_writePartitionInfo) {
// Create partition information
unsigned int *partInfo = new unsigned int[numCells];
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif // _OPENMP
for (unsigned int i = 0; i < numCells; i++)
partInfo[i] = m_rank;
m_backend.writeCellData(0, 1, partInfo);
delete [] partInfo;
}
m_backend.writeVertexData(0, 0, vertices);
m_meshId++;
m_meshTimeStep = 0;
}
/**
* Add a new output time step
*/
void addTimeStep(double time)
{
if (m_rank == 0) {
unsigned long alignedSize[2] = {m_backend.numAlignedCells(), m_backend.numAlignedVertices()};
m_xdmfFile << " ";
timeStepStartXdmf(m_timeStep, m_xdmfFile);
// Generate information for restarting (WARNING: if this line is modified the initialization has to be adapted)
m_xdmfFile << "<!-- mesh id: " << (m_meshId-1) << ", mesh step: " << m_meshTimeStep << " -->";
m_xdmfFile << std::endl;
m_xdmfFile << " <Topology TopologyType=\"" << internal::Topology<Topo>::name() << "\" NumberOfElements=\"" << m_totalSize[0] << "\">" << std::endl
// This should be UInt but for some reason this does not work with binary data
<< " <DataItem NumberType=\"Int\" Precision=\"8\" Format=\""
<< m_backend.format() << "\" Dimensions=\"" << m_totalSize[0] << " " << internal::Topology<Topo>::size() << "\">"
<< m_backend.cellDataLocation(m_meshId-1, "connect")
<< "</DataItem>" << std::endl
<< " </Topology>" << std::endl
<< " <Geometry name=\"geo\" GeometryType=\"XYZ\" NumberOfElements=\"" << m_totalSize[1] << "\">" << std::endl
<< " <DataItem NumberType=\"Float\" Precision=\"" << sizeof(VertexDataType) << "\" Format=\""
<< m_backend.format() << "\" Dimensions=\"" << m_totalSize[1] << " 3\">"
<< m_backend.vertexDataLocation(m_meshId-1, "geometry")
<< "</DataItem>" << std::endl
<< " </Geometry>" << std::endl
<< " <Time Value=\"" << time << "\"/>" << std::endl;
if (m_writePartitionInfo) {
m_xdmfFile << " <Attribute Name=\"partition\" Center=\"Cell\">" << std::endl
<< " <DataItem NumberType=\"Int\" Precision=\"4\" Format=\""
<< m_backend.format() << "\" Dimensions=\"" << m_totalSize[0] << "\">"
<< m_backend.cellDataLocation(m_meshId-1, "partition")
<< "</DataItem>" << std::endl
<< " </Attribute>" << std::endl;
}
if (m_writeClusteringInfo) {
m_xdmfFile << " <Attribute Name=\"clustering\" Center=\"Cell\">" << std::endl
<< " <DataItem NumberType=\"Int\" Precision=\"4\" Format=\""
<< m_backend.format() << "\" Dimensions=\"" << m_totalSize[0] << "\">"
<< m_backend.cellDataLocation(m_meshId-1, "clustering")
<< "</DataItem>" << std::endl
<< " </Attribute>" << std::endl;
}
for (size_t i = 0; i < m_cellVariableNames.size(); i++) {
m_xdmfFile << " <Attribute Name=\"" << m_cellVariableNames[i] << "\" Center=\"Cell\">" << std::endl
<< " <DataItem ItemType=\"HyperSlab\" Dimensions=\"" << m_totalSize[0] << "\">" << std::endl
<< " <DataItem NumberType=\"UInt\" Precision=\"4\" Format=\"XML\" Dimensions=\"3 2\">"
<< m_meshTimeStep << " 0 1 1 1 " << m_totalSize[0] << "</DataItem>" << std::endl
<< " <DataItem NumberType=\"Float\" Precision=\"" << sizeof(CellDataType) << "\" Format=\""
<< m_backend.format() << "\" Dimensions=\""
<< (m_meshTimeStep + 1) << ' ' << alignedSize[0] << "\">"
<< m_backend.cellDataLocation(m_meshId-1, m_cellVariableNames[i])
<< "</DataItem>" << std::endl
<< " </DataItem>" << std::endl
<< " </Attribute>" << std::endl;
}
for (size_t i = 0; i < m_vertexVariableNames.size(); i++) {
m_xdmfFile << " <Attribute Name=\"" << m_vertexVariableNames[i] << "\" Center=\"Node\">" << std::endl
<< " <DataItem ItemType=\"HyperSlab\" Dimensions=\"" << m_totalSize[1] << "\">" << std::endl
<< " <DataItem NumberType=\"UInt\" Precision=\"4\" Format=\"XML\" Dimensions=\"3 2\">"
<< m_meshTimeStep << " 0 1 1 1 " << m_totalSize[1] << "</DataItem>" << std::endl
<< " <DataItem NumberType=\"Float\" Precision=\"" << sizeof(VertexDataType) << "\" Format=\""
<< m_backend.format() << "\" Dimensions=\""
<< (m_meshTimeStep + 1) << ' ' << alignedSize[1] << "\">"
<< m_backend.vertexDataLocation(m_meshId-1, m_vertexVariableNames[i])
<< "</DataItem>" << std::endl
<< " </DataItem>" << std::endl
<< " </Attribute>" << std::endl;
}
m_xdmfFile << " </Grid>" << std::endl;
closeXdmf();
}
m_timeStep++;
m_meshTimeStep++;
}
/**
* Write clustering data for each cell
*
* Note: has no affect if the user didn't enable the functionality while passing parameters into
* <code>init</code> method
*
* @param data comes from the caller
*/
void writeClusteringInfo(const unsigned int *data)
{
SCOREP_USER_REGION("XDMFWriter_writeClustering", SCOREP_USER_REGION_TYPE_FUNCTION);
if (m_writeClusteringInfo) {
const int ClusteringId = (m_writePartitionInfo ? 2 : 1);
m_backend.writeCellData(0, ClusteringId, data);
}
}
/**
* Write cell data for one variable at the current time step
*
* @param id The number of the variable that should be written
*/
void writeCellData(unsigned int id, const CellDataType *data)
{
SCOREP_USER_REGION("XDMFWriter_writeCellData", SCOREP_USER_REGION_TYPE_FUNCTION);
int idShift = (m_writePartitionInfo ? 2 : 1);
idShift += (m_writeClusteringInfo ? 1 : 0);
m_backend.writeCellData(m_meshTimeStep-1, id + idShift, data);
}
/**
* Write vertex data for one variable at the current time step
*
* @param id The number of the variable that should be written
*/
void writeVertexData(unsigned int id, const VertexDataType *data)
{
SCOREP_USER_REGION("XDMFWriter_writeCellData", SCOREP_USER_REGION_TYPE_FUNCTION);
// Filter duplicates if the vertex filter is enabled
const void* tmp = data;
if (m_useVertexFilter)
tmp = m_vertexDataFilter.filter(data);
m_backend.writeVertexData(m_meshTimeStep-1, id + 1, tmp);
}
/**
* Flushes the data to disk
*/
void flush()
{
SCOREP_USER_REGION("XDMFWriter_flush", SCOREP_USER_REGION_TYPE_FUNCTION);
if (m_timeStep % m_flushInterval == 0)
m_backend.flush();
}
/**
* Closes the HDF5 file (should be done before MPI_Finalize is called)
*/
void close()
{
// Close backend
m_backend.close();
}
/**
* @return The current time step of the output file
*/
unsigned int timestep() const
{
return m_timeStep;
}
private:
void closeXdmf()
{
size_t contPos = m_xdmfFile.tellp();
m_xdmfFile << " </Grid>" << std::endl
<< " </Domain>" << std::endl
<< "</Xdmf>" << std::endl;
m_xdmfFile.seekp(contPos);
}
private:
/**
* Write the beginning of a time step to the stream
*/
static void timeStepStartXdmf(unsigned int timestep, std::ostream &s)
{
s << "<Grid Name=\"step_" << std::setw(MAX_TIMESTEP_SPACE) << std::setfill('0') << timestep << std::setfill(' ')
<< "\" GridType=\"Uniform\">";
}
private:
static const unsigned int MAX_TIMESTEP_SPACE = 12;
};
}
#endif // XDMFWRITER_XDMFWRITER_H
|
matmul_itlmic_kernel.c | #include <offload.h>
#include <homp.h>
#include "matmul.h"
#ifdef USE_INTEL_MKL
#include <mkl.h>
#endif
void matmul_itlmic_wrapper(omp_offloading_t *off, long i, long j,long k,REAL *a,REAL *b,REAL *c)
{
long ii, jj, kk;
int sysid = off->dev->sysid;
int num_cores = off->dev->num_cores;
#ifdef USE_INTEL_MKL
REAL alpha = 1;
REAL beta = 0;
//mkl_mic_enable();
#endif
#ifndef ITLMIC_COMBINED_OFFLOADING
#pragma offload target(mic:sysid) in (a: length(0) alloc_if(0) free_if(0)) \
in (b: length(0) alloc_if(0) free_if(0)) \
in (c: length(0) alloc_if(0) free_if(0))
#else
#pragma offload target(mic:sysid) in (a: length(i*k) align(64)) \
in (b: length(k*j) align(64)) \
inout (c: length(i*j) align(64))
#endif
{
#ifdef USE_INTEL_MKL
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
i, j, k, alpha, a, k, b, j, beta, c, j);
#else
#pragma omp parallel for simd shared(i, j, k, a, b, c) private(ii, jj, kk) num_threads(num_cores)
for (ii = 0; ii < i; ii++) {
for (jj = 0; jj < j; jj++) {
REAL sum = 0.0;
for (kk = 0; kk < k; kk++) {
sum += a[ii * k + kk] * b[kk * j + jj];
}
c[ii * j + jj] = sum;
}
}
#endif
}
#if 0
#if defined (OMP_BREAKDOWN_TIMING)
omp_event_record_start(&events[acc_mapto_event_index], stream, "ACC_MAPTO", "Accumulated time for mapto data movement for all array");
#endif
#pragma offload target(mic) in (a: length(i*k) alloc_if(1) free_if(0)) \
in (b: length(k*j) alloc_if(1) free_if(0)) \
in (c: length(i*j) alloc_if(1) free_if(0))
{
}
#if defined (OMP_BREAKDOWN_TIMING)
omp_event_record_stop(&events[acc_mapto_event_index]);
omp_event_record_start(&events[acc_kernel_exe_event_index], stream, "KERN", "Time for kernel (%s) execution", off_info->name);
#endif
#pragma offload target(mic) nocopy (a: length(i*k) alloc_if(0) free_if(0)) \
nocopy (b: length(k*j) alloc_if(0) free_if(0)) \
nocopy (c: length(i*j) alloc_if(0) free_if(0))
#pragma omp parallel for simd
{
for (ii = 0; ii < i; ii++) {
for (jj = 0; jj < j; jj++) {
REAL sum = 0.0;
for (kk = 0; kk < k; kk++) {
sum += a[ii * k + kk] * b[kk * j + jj];
}
c[ii * j + jj] = sum;
}
}
}
#if defined (OMP_BREAKDOWN_TIMING)
omp_event_record_stop(&events[acc_kernel_exe_event_index]);
omp_event_record_start(&events[acc_mapfrom_event_index], stream, "ACC_MAPFROM", "Accumulated time for mapfrom data movement for all array");
#endif
#pragma offload target(mic) nocopy (a: length(i*k) alloc_if(0) free_if(1)) \
nocopy (b: length(k*j) alloc_if(0) free_if(1)) \
nocopy (c: length(i*j) alloc_if(0) free_if(1))
{
}
#if defined (OMP_BREAKDOWN_TIMING)
omp_event_record_stop(&events[acc_mapfrom_event_index]);
#endif
#endif
}
|
GB_binop__gt_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__gt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__gt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint16)
// A*D function (colscale): GB (_AxD__gt_uint16)
// D*A function (rowscale): GB (_DxB__gt_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint16)
// C=scalar+B GB (_bind1st__gt_uint16)
// C=scalar+B' GB (_bind1st_tran__gt_uint16)
// C=A+scalar GB (_bind2nd__gt_uint16)
// C=A'+scalar GB (_bind2nd_tran__gt_uint16)
// C type: bool
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_UINT16 || GxB_NO_GT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__gt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__gt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__gt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dfwavelet.c | /*
* Copyright 2013-2015 The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2013 Frank Ong <frankong@berkeley.edu>
* 2013 Martin Uecker, Pat Virtue, and Mark Murphy
*
*
* Ong F, Uecker M, Tariq U, Hsiao A, Alley MT, Vasanawala SS, Lustig M.
* Robust 4D Flow Denoising using Divergence-free Wavelet Transform,
* Magn Reson Med 2015; 73: 828-842.
*/
#define _GNU_SOURCE
#include <math.h>
#include <string.h>
#include <assert.h>
#include <complex.h>
#ifdef _WIN32
#include "win/rand_r.h"
#endif
#include "num/multind.h"
#include "misc/misc.h"
#include "dfwavelet.h"
#include "dfwavelet_impl.h"
#ifdef USE_CUDA
#include "dfwavelet_kernels.h"
#endif
#define str_eq(s1,s2) (!strcmp ((s1),(s2)))
/******** Header *********/
static void dffwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn, data_t* in_vx,data_t* in_vy,data_t* in_vz);
static void dfiwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn);
static void dfsoftthresh_cpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn);
static void dfwavthresh3_cpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz);
void dflincomb_cpu(struct dfwavelet_plan_s* plan,data_t* wc1,data_t* wc2,data_t* wc3);
void dfunlincomb_cpu(struct dfwavelet_plan_s* plan,data_t* wc1,data_t* wc2,data_t* wc3);
static void fwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out, data_t* in,int dir);
static void iwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out, data_t* in,int dir);
static void circshift_cpu(struct dfwavelet_plan_s* plan, data_t *data);
static void circunshift_cpu(struct dfwavelet_plan_s* plan, data_t *data);
static void conv_down_3d(data_t *out, data_t *in, int size1, int skip1, int size2, int skip2, int size3, int skip3, scalar_t *filter, int filterLen);
static void conv_up_3d(data_t *out, data_t *in, int size1, int skip1, int size2, int skip2, int size3, int skip3, scalar_t *filter, int filterLen);
static void mult(data_t* in,scalar_t scalar,int maxInd);
static void create_numLevels(struct dfwavelet_plan_s* plan);
static void create_wavelet_sizes(struct dfwavelet_plan_s* plan);
static void create_wavelet_filters(struct dfwavelet_plan_s* plan);
static void get_noise_amp (struct dfwavelet_plan_s* plan);
struct dfwavelet_plan_s* prepare_dfwavelet_plan(int numdims, long* imSize, long* minSize, data_t* res,int use_gpu)
{
struct dfwavelet_plan_s* plan = (struct dfwavelet_plan_s*) malloc(sizeof(struct dfwavelet_plan_s));
plan->use_gpu = use_gpu;
plan->numdims = numdims;
plan->imSize = (long*) malloc(sizeof(long)*numdims);
plan->minSize = (long*) malloc(sizeof(long)*numdims);
plan->res = (data_t*) malloc(sizeof(data_t)*numdims);
plan->percentZero = -1;
plan->noiseAmp = NULL;
// Get imSize, numPixel, numdims
plan->numPixel = 1;
int i;
for (i = 0; i < numdims; i++)
{
plan->imSize[i] = imSize[i];
plan->numPixel *= imSize[i];
plan->minSize[i] = minSize[i];
plan->res[i] = res[i];
}
create_wavelet_filters(plan);
create_numLevels(plan);
create_wavelet_sizes(plan);
plan->randShift = (int*) malloc(sizeof(int)*plan->numdims);
memset(plan->randShift,0,sizeof(int)*plan->numdims);
get_noise_amp(plan);
return plan;
}
void dfwavelet_forward(struct dfwavelet_plan_s* plan, data_t* out_wcdf1, data_t* out_wcdf2, data_t* out_wcn, data_t* in_vx, data_t* in_vy, data_t* in_vz)
{
if(plan->use_gpu==0)
dffwt3_cpu(plan,out_wcdf1,out_wcdf2,out_wcn,in_vx,in_vy,in_vz);
#ifdef USE_CUDA
if(plan->use_gpu==1)
dffwt3_gpu(plan,out_wcdf1,out_wcdf2,out_wcn,in_vx,in_vy,in_vz);
if(plan->use_gpu==2)
dffwt3_gpuHost(plan,out_wcdf1,out_wcdf2,out_wcn,in_vx,in_vy,in_vz);
#endif
}
void dfwavelet_inverse(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn)
{
if(plan->use_gpu==0)
dfiwt3_cpu(plan,out_vx,out_vy,out_vz,in_wcdf1,in_wcdf2,in_wcn);
#ifdef USE_CUDA
if(plan->use_gpu==1)
dfiwt3_gpu(plan,out_vx,out_vy,out_vz,in_wcdf1,in_wcdf2,in_wcn);
if(plan->use_gpu==2)
dfiwt3_gpuHost(plan,out_vx,out_vy,out_vz,in_wcdf1,in_wcdf2,in_wcn);
#endif
}
void dfsoft_thresh(struct dfwavelet_plan_s* plan, scalar_t dfthresh, scalar_t nthresh,data_t* wcdf1,data_t* wcdf2, data_t* wcn)
{
if(plan->use_gpu==0)
dfsoftthresh_cpu(plan,dfthresh,nthresh,wcdf1,wcdf2,wcn);
#ifdef USE_CUDA
if(plan->use_gpu==1)
dfsoftthresh_gpu(plan,dfthresh,nthresh,wcdf1,wcdf2,wcn);
if(plan->use_gpu==2)
dfsoftthresh_gpuHost(plan,dfthresh,nthresh,wcdf1,wcdf2,wcn);
#endif
}
void dfwavelet_thresh(struct dfwavelet_plan_s* plan, scalar_t dfthresh, scalar_t nthresh,data_t* out_vx, data_t* out_vy, data_t* out_vz, data_t* in_vx,data_t* in_vy, data_t* in_vz)
{
if(plan->use_gpu==0)
dfwavthresh3_cpu(plan,dfthresh,nthresh,out_vx,out_vy,out_vz, in_vx,in_vy,in_vz);
#ifdef USE_CUDA
if(plan->use_gpu==1)
dfwavthresh3_gpu(plan,dfthresh,nthresh, out_vx,out_vy,out_vz, in_vx,in_vy,in_vz);
if(plan->use_gpu==2)
dfwavthresh3_gpuHost(plan,dfthresh,nthresh,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz);
#endif
}
static int dfrand_lim(unsigned int* state, int limit) {
int divisor = RAND_MAX/(limit+1);
int retval = 0;
do {
retval = rand_r(state) / divisor;
} while (retval > limit);
return retval;
}
void dfwavelet_new_randshift (struct dfwavelet_plan_s* plan) {
int i;
int maxShift = 1 << (plan->numLevels);
for(i = 0; i < plan->numdims; i++) {
// Generate random shift value between 0 and maxShift
plan->randShift[i] = dfrand_lim(&plan->state, maxShift);
}
}
void dfwavelet_clear_randshift (struct dfwavelet_plan_s* plan) {
memset(plan->randShift, 0, plan->numdims*sizeof(int));
}
void dfwavelet_free(struct dfwavelet_plan_s* plan)
{
free(plan->imSize);
free(plan->minSize);
free(plan->lod0);
free(plan->lod1);
free(plan->res);
free(plan->waveSizes);
free(plan->randShift);
if (plan->noiseAmp!=NULL)
free(plan->noiseAmp);
free(plan);
}
////////////// Private Functions //////////////
void dffwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn, data_t* in_vx,data_t* in_vy,data_t* in_vz)
{
fwt3_cpu(plan,out_wcdf1,in_vx,0);
fwt3_cpu(plan,out_wcdf2,in_vy,1);
fwt3_cpu(plan,out_wcn,in_vz,2);
mult(out_wcdf1,1/plan->res[0],plan->numCoeff);
mult(out_wcdf2,1/plan->res[1],plan->numCoeff);
mult(out_wcn,1/plan->res[2],plan->numCoeff);
dflincomb_cpu(plan,out_wcdf1,out_wcdf2,out_wcn);
}
void dfiwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn)
{
dfunlincomb_cpu(plan,in_wcdf1,in_wcdf2,in_wcn);
mult(in_wcdf1,plan->res[0],plan->numCoeff);
mult(in_wcdf2,plan->res[1],plan->numCoeff);
mult(in_wcn,plan->res[2],plan->numCoeff);
iwt3_cpu(plan,out_vx,in_wcdf1,0);
iwt3_cpu(plan,out_vy,in_wcdf2,1);
iwt3_cpu(plan,out_vz,in_wcn,2);
}
void dfsoftthresh_cpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh, data_t* wcdf1,data_t* wcdf2,data_t* wcn)
{
data_t* HxLyLz1 = wcdf1 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz2 = wcdf2 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz3 = wcn + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
int l;
for (l = 1; l <= plan->numLevels; ++l){
HxLyLz1 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz2 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz3 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
}
int dxNext = plan->waveSizes[0 + 3*plan->numLevels];
int dyNext = plan->waveSizes[1 + 3*plan->numLevels];
int dzNext = plan->waveSizes[2 + 3*plan->numLevels];
int blockSize = dxNext*dyNext*dzNext;
int naInd = 0;
for (l = plan->numLevels; l >= 1; --l)
{
dxNext = plan->waveSizes[0 + 3*l];
dyNext = plan->waveSizes[1 + 3*l];
dzNext = plan->waveSizes[2 + 3*l];
blockSize = dxNext*dyNext*dzNext;
HxLyLz1 = HxLyLz1 - 7*blockSize;
HxLyLz2 = HxLyLz2 - 7*blockSize;
HxLyLz3 = HxLyLz3 - 7*blockSize;
int bandInd;
for (bandInd=0; bandInd<7*3;bandInd++)
{
data_t *subband;
scalar_t lambda;
if (bandInd<7)
{
subband = HxLyLz1 + bandInd*blockSize;
lambda = dfthresh * plan->noiseAmp[naInd];
} else if (bandInd<14)
{
subband = HxLyLz2 + (bandInd-7)*blockSize;
lambda = dfthresh * plan->noiseAmp[naInd];
} else
{
subband = HxLyLz3 + (bandInd-14)*blockSize;
lambda = nthresh * plan->noiseAmp[naInd];
}
// SoftThresh
float const eps = 1.1921e-7f;
#pragma omp parallel for
for(int i = 0; i < blockSize; i++)
{
scalar_t norm = cabs(subband[i]);
scalar_t red = norm - lambda;
red = 0.5f*(red + fabs(red));
red = red / (norm + eps);
subband[i] = red * subband[i];
}
naInd++;
}
}
}
void dfwavthresh3_cpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz)
{
data_t *wcdf1,*wcdf2,*wcn;
wcdf1 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff);
wcdf2 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff);
wcn = (data_t*) malloc(sizeof(data_t)*plan->numCoeff);
dffwt3_cpu(plan, wcdf1,wcdf2,wcn,in_vx,in_vy,in_vz);
dfsoftthresh_cpu(plan,dfthresh,nthresh,wcdf1,wcdf2,wcn);
dfiwt3_cpu(plan,out_vx,out_vy,out_vz,wcdf1,wcdf2,wcn);
free(wcdf1);
free(wcdf2);
free(wcn);
}
void dflincomb_cpu(struct dfwavelet_plan_s* plan,data_t* wc1,data_t* wc2,data_t* wc3)
{
data_t* HxLyLz1 = wc1 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz2 = wc2 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz3 = wc3 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
int l;
for (l = 1; l <= plan->numLevels; ++l){
HxLyLz1 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz2 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz3 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
}
int dxNext = plan->waveSizes[0 + 3*plan->numLevels];
int dyNext = plan->waveSizes[1 + 3*plan->numLevels];
int dzNext = plan->waveSizes[2 + 3*plan->numLevels];
int blockSize = dxNext*dyNext*dzNext;
int i,j,k;
for (l = plan->numLevels; l >= 1; --l)
{
dxNext = plan->waveSizes[0 + 3*l];
dyNext = plan->waveSizes[1 + 3*l];
dzNext = plan->waveSizes[2 + 3*l];
blockSize = dxNext*dyNext*dzNext;
HxLyLz1 = HxLyLz1 - 7*blockSize;
HxLyLz2 = HxLyLz2 - 7*blockSize;
HxLyLz3 = HxLyLz3 - 7*blockSize;
data_t* LxHyLz1 = HxLyLz1 + blockSize;
data_t* HxHyLz1 = LxHyLz1 + blockSize;
data_t* LxLyHz1 = HxHyLz1 + blockSize;
data_t* HxLyHz1 = LxLyHz1 + blockSize;
data_t* LxHyHz1 = HxLyHz1 + blockSize;
data_t* HxHyHz1 = LxHyHz1 + blockSize;
data_t* LxHyLz2 = HxLyLz2 + blockSize;
data_t* HxHyLz2 = LxHyLz2 + blockSize;
data_t* LxLyHz2 = HxHyLz2 + blockSize;
data_t* HxLyHz2 = LxLyHz2 + blockSize;
data_t* LxHyHz2 = HxLyHz2 + blockSize;
data_t* HxHyHz2 = LxHyHz2 + blockSize;
data_t* LxHyLz3 = HxLyLz3 + blockSize;
data_t* HxHyLz3 = LxHyLz3 + blockSize;
data_t* LxLyHz3 = HxHyLz3 + blockSize;
data_t* HxLyHz3 = LxLyHz3 + blockSize;
data_t* LxHyHz3 = HxLyHz3 + blockSize;
data_t* HxHyHz3 = LxHyHz3 + blockSize;
#pragma omp parallel for private(i,j,k)
for (k=0;k<dzNext;k++)
for (j=0;j<dyNext;j++)
for (i=0;i<dxNext;i++)
{
int ind = i+j*dxNext+k*dxNext*dyNext;
data_t wcx100 = HxLyLz1[ind];
data_t wcy100 = HxLyLz2[ind];
data_t wcz100 = HxLyLz3[ind];
data_t wcx010 = LxHyLz1[ind];
data_t wcy010 = LxHyLz2[ind];
data_t wcz010 = LxHyLz3[ind];
data_t wcx001 = LxLyHz1[ind];
data_t wcy001 = LxLyHz2[ind];
data_t wcz001 = LxLyHz3[ind];
data_t wcx110 = HxHyLz1[ind];
data_t wcy110 = HxHyLz2[ind];
data_t wcz110 = HxHyLz3[ind];
data_t wcx101 = HxLyHz1[ind];
data_t wcy101 = HxLyHz2[ind];
data_t wcz101 = HxLyHz3[ind];
data_t wcx011 = LxHyHz1[ind];
data_t wcy011 = LxHyHz2[ind];
data_t wcz011 = LxHyHz3[ind];
data_t wcx111 = HxHyHz1[ind];
data_t wcy111 = HxHyHz2[ind];
data_t wcz111 = HxHyHz3[ind];
HxLyLz1[ind] = wcy100;
LxHyLz1[ind] = wcx010;
LxLyHz1[ind] = wcy001;
HxLyLz2[ind] = wcz100;
LxHyLz2[ind] = wcz010;
LxLyHz2[ind] = wcx001;
HxLyLz3[ind] = wcx100;
LxHyLz3[ind] = wcy010;
LxLyHz3[ind] = wcz001;
HxHyLz1[ind] = 0.5*(wcx110-wcy110);
HxLyHz1[ind] = 0.5*(wcz101-wcx101);
LxHyHz1[ind] = 0.5*(wcy011-wcz011);
HxHyLz2[ind] = wcz110;
HxLyHz2[ind] = wcy101;
LxHyHz2[ind] = wcx011;
HxHyLz3[ind] = 0.5*(wcx110+wcy110);
HxLyHz3[ind] = 0.5*(wcz101+wcx101);
LxHyHz3[ind] = 0.5*(wcy011+wcz011);
HxHyHz1[ind] = 1/3.*(-2*wcx111+wcy111+wcz111);
HxHyHz2[ind] = 1/3.*(-wcx111+2*wcy111-wcz111);
HxHyHz3[ind] = 1/3.*(wcx111+wcy111+wcz111);
}
#pragma omp barrier
#pragma omp parallel for private(i,j,k)
for (k=0;k<dzNext;k++)
for (j=0;j<dyNext;j++)
for (i=0;i<dxNext;i++)
{
int ind = i+j*dxNext+k*dxNext*dyNext;
int indxs = ind-1;
int indys = ind-dxNext;
int indzs = ind-dxNext*dyNext;
if (i==0)
indxs = 0;
if (j==0)
indys = 0;
if (k==0)
indzs = 0;
data_t wcy100 = HxLyLz1[ind];
data_t wcy100s = HxLyLz1[indys];
data_t wcz100 = HxLyLz2[ind];
data_t wcz100s = HxLyLz2[indzs];
data_t wcx010 = LxHyLz1[ind];
data_t wcx010s = LxHyLz1[indxs];
data_t wcz010 = LxHyLz2[ind];
data_t wcz010s = LxHyLz2[indzs];
data_t wcx001 = LxLyHz2[ind];
data_t wcx001s = LxLyHz2[indxs];
data_t wcy001 = LxLyHz1[ind];
data_t wcy001s = LxLyHz1[indys];
data_t wcz110 = HxHyLz2[ind];
data_t wcz110s = HxHyLz2[indzs];
data_t wcy101 = HxLyHz2[ind];
data_t wcy101s = HxLyHz2[indys];
data_t wcx011 = LxHyHz2[ind];
data_t wcx011s = LxHyHz2[indxs];
HxLyLz3[ind] = HxLyLz3[ind]+0.25*(wcy100-wcy100s+wcz100-wcz100s);
LxHyLz3[ind] = LxHyLz3[ind]+0.25*(wcx010-wcx010s+wcz010-wcz010s);
LxLyHz3[ind] = LxLyHz3[ind]+0.25*(wcx001-wcx001s+wcy001-wcy001s);
HxHyLz3[ind] = HxHyLz3[ind] + 0.125*(wcz110-wcz110s);
HxLyHz3[ind] = HxLyHz3[ind] + 0.125*(wcy101-wcy101s);
LxHyHz3[ind] = LxHyHz3[ind] + 0.125*(wcx011-wcx011s);
}
}
}
void dfunlincomb_cpu(struct dfwavelet_plan_s* plan,data_t* wc1,data_t* wc2,data_t* wc3)
{
data_t* HxLyLz1 = wc1 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz2 = wc2 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz3 = wc3 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
int l;
for (l = 1; l <= plan->numLevels; ++l){
HxLyLz1 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz2 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz3 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
}
int dxNext = plan->waveSizes[0 + 3*plan->numLevels];
int dyNext = plan->waveSizes[1 + 3*plan->numLevels];
int dzNext = plan->waveSizes[2 + 3*plan->numLevels];
int blockSize = dxNext*dyNext*dzNext;
int i,j,k;
for (l = plan->numLevels; l >= 1; --l)
{
dxNext = plan->waveSizes[0 + 3*l];
dyNext = plan->waveSizes[1 + 3*l];
dzNext = plan->waveSizes[2 + 3*l];
blockSize = dxNext*dyNext*dzNext;
HxLyLz1 = HxLyLz1 - 7*blockSize;
HxLyLz2 = HxLyLz2 - 7*blockSize;
HxLyLz3 = HxLyLz3 - 7*blockSize;
data_t* LxHyLz1 = HxLyLz1 + blockSize;
data_t* HxHyLz1 = LxHyLz1 + blockSize;
data_t* LxLyHz1 = HxHyLz1 + blockSize;
data_t* HxLyHz1 = LxLyHz1 + blockSize;
data_t* LxHyHz1 = HxLyHz1 + blockSize;
data_t* HxHyHz1 = LxHyHz1 + blockSize;
data_t* LxHyLz2 = HxLyLz2 + blockSize;
data_t* HxHyLz2 = LxHyLz2 + blockSize;
data_t* LxLyHz2 = HxHyLz2 + blockSize;
data_t* HxLyHz2 = LxLyHz2 + blockSize;
data_t* LxHyHz2 = HxLyHz2 + blockSize;
data_t* HxHyHz2 = LxHyHz2 + blockSize;
data_t* LxHyLz3 = HxLyLz3 + blockSize;
data_t* HxHyLz3 = LxHyLz3 + blockSize;
data_t* LxLyHz3 = HxHyLz3 + blockSize;
data_t* HxLyHz3 = LxLyHz3 + blockSize;
data_t* LxHyHz3 = HxLyHz3 + blockSize;
data_t* HxHyHz3 = LxHyHz3 + blockSize;
#pragma omp parallel for private(i,j,k)
for (k=0;k<dzNext;k++)
for (j=0;j<dyNext;j++)
for (i=0;i<dxNext;i++)
{
int ind = i+j*dxNext+k*dxNext*dyNext;
data_t df1_100 = HxLyLz1[ind];
data_t df2_100 = HxLyLz2[ind];
data_t n_100 = HxLyLz3[ind];
data_t df1_010 = LxHyLz1[ind];
data_t df2_010 = LxHyLz2[ind];
data_t n_010 = LxHyLz3[ind];
data_t df1_001 = LxLyHz1[ind];
data_t df2_001 = LxLyHz2[ind];
data_t n_001 = LxLyHz3[ind];
data_t df1_110 = HxHyLz1[ind];
data_t df2_110 = HxHyLz2[ind];
data_t n_110 = HxHyLz3[ind];
data_t df1_101 = HxLyHz1[ind];
data_t df2_101 = HxLyHz2[ind];
data_t n_101 = HxLyHz3[ind];
data_t df1_011 = LxHyHz1[ind];
data_t df2_011 = LxHyHz2[ind];
data_t n_011 = LxHyHz3[ind];
data_t df1_111 = HxHyHz1[ind];
data_t df2_111 = HxHyHz2[ind];
data_t n_111 = HxHyHz3[ind];
HxLyLz2[ind] = df1_100;
LxHyLz1[ind] = df1_010;
LxLyHz2[ind] = df1_001;
HxLyLz3[ind] = df2_100;
LxHyLz3[ind] = df2_010;
LxLyHz1[ind] = df2_001;
HxLyLz1[ind] = n_100;
LxHyLz2[ind] = n_010;
LxLyHz3[ind] = n_001;
HxHyLz3[ind] = df2_110;
HxLyHz2[ind] = df2_101;
LxHyHz1[ind] = df2_011;
HxHyLz1[ind] = (df1_110+n_110);
HxLyHz3[ind] = (df1_101+n_101);
LxHyHz2[ind] = (df1_011+n_011);
HxHyLz2[ind] = (-df1_110+n_110);
HxLyHz1[ind] = (-df1_101+n_101);
LxHyHz3[ind] = (-df1_011+n_011);
HxHyHz1[ind] = (-df1_111+n_111);
HxHyHz2[ind] = (df2_111+n_111);
HxHyHz3[ind] = df1_111-df2_111+n_111;
}
#pragma omp barrier
#pragma omp parallel for private(i,j,k)
for (k=0;k<dzNext;k++)
for (j=0;j<dyNext;j++)
for (i=0;i<dxNext;i++)
{
int ind = i+j*dxNext+k*dxNext*dyNext;
int indxs = ind-1;
int indys = ind-dxNext;
int indzs = ind-dxNext*dyNext;
if (i==0)
indxs = 0;
if (j==0)
indys = 0;
if (k==0)
indzs = 0;
data_t df1_100 = HxLyLz2[ind];
data_t df1_100s = HxLyLz2[indys];
data_t df2_100 = HxLyLz3[ind];
data_t df2_100s = HxLyLz3[indzs];
data_t df1_010 = LxHyLz1[ind];
data_t df1_010s = LxHyLz1[indxs];
data_t df2_010 = LxHyLz3[ind];
data_t df2_010s = LxHyLz3[indzs];
data_t df2_001 = LxLyHz1[ind];
data_t df2_001s = LxLyHz1[indxs];
data_t df1_001 = LxLyHz2[ind];
data_t df1_001s = LxLyHz2[indys];
data_t df2_110 = HxHyLz3[ind];
data_t df2_110s = HxHyLz3[indzs];
data_t df2_101 = HxLyHz2[ind];
data_t df2_101s = HxLyHz2[indys];
data_t df2_011 = LxHyHz1[ind];
data_t df2_011s = LxHyHz1[indxs];
HxLyLz1[ind] = HxLyLz1[ind]-0.25*(df1_100-df1_100s+df2_100-df2_100s);
LxHyLz2[ind] = LxHyLz2[ind]-0.25*(df1_010-df1_010s+df2_010-df2_010s);
LxLyHz3[ind] = LxLyHz3[ind]-0.25*(df2_001-df2_001s+df1_001-df1_001s);
HxHyLz1[ind] = HxHyLz1[ind] - 0.125*(df2_110-df2_110s);
HxLyHz3[ind] = HxLyHz3[ind] - 0.125*(df2_101-df2_101s);
LxHyHz2[ind] = LxHyHz2[ind] - 0.125*(df2_011-df2_011s);
HxHyLz2[ind] = HxHyLz2[ind] - 0.125*(df2_110-df2_110s);
HxLyHz1[ind] = HxLyHz1[ind] - 0.125*(df2_101-df2_101s);
LxHyHz3[ind] = LxHyHz3[ind] - 0.125*(df2_011-df2_011s);
}
}
}
void fwt3_cpu(struct dfwavelet_plan_s* plan, data_t* coeff, data_t* inImage,int dir)
{
circshift_cpu(plan,inImage);
data_t* origInImage = inImage;
data_t* HxLyLz = coeff + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
int l;
for (l = 1; l <= plan->numLevels; ++l){
HxLyLz += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
}
int dx = plan->imSize[0];
int dy = plan->imSize[1];
int dz = plan->imSize[2];
int dxNext = plan->waveSizes[0 + 3*plan->numLevels];
int dyNext = plan->waveSizes[1 + 3*plan->numLevels];
int dzNext = plan->waveSizes[2 + 3*plan->numLevels];
int blockSize = dxNext*dyNext*dzNext;
data_t* LxLyLz = (data_t*) malloc(sizeof(data_t)*blockSize);
data_t* tempz = (data_t*) malloc(sizeof(data_t)*dx*dy*dzNext);
data_t* tempyz = (data_t*) malloc(sizeof(data_t)*dx*dyNext*dzNext);
data_t* tempxyz = (data_t*) malloc(sizeof(data_t)*blockSize);
// Assign Filters
scalar_t *lodx,*lody,*lodz,*hidx,*hidy,*hidz;
lodx = plan->lod0;
lody = plan->lod0;
lodz = plan->lod0;
hidx = plan->hid0;
hidy = plan->hid0;
hidz = plan->hid0;
if (dir==0)
{
lodx = plan->lod1;
hidx = plan->hid1;
}
if (dir==1)
{
lody = plan->lod1;
hidy = plan->hid1;
}
if (dir==2)
{
lodz = plan->lod1;
hidz = plan->hid1;
}
for (l = plan->numLevels; l >= 1; --l)
{
dxNext = plan->waveSizes[0 + 3*l];
dyNext = plan->waveSizes[1 + 3*l];
dzNext = plan->waveSizes[2 + 3*l];
blockSize = dxNext*dyNext*dzNext;
HxLyLz = HxLyLz - 7*blockSize;
data_t* LxHyLz = HxLyLz + blockSize;
data_t* HxHyLz = LxHyLz + blockSize;
data_t* LxLyHz = HxHyLz + blockSize;
data_t* HxLyHz = LxLyHz + blockSize;
data_t* LxHyHz = HxLyHz + blockSize;
data_t* HxHyHz = LxHyHz + blockSize;
int dxy = dx*dy;
int newdz = (dz + plan->filterLen-1) / 2;
int newdy = (dy + plan->filterLen-1) / 2;
int newdxy = dx*newdy;
// Lz
conv_down_3d(tempz, inImage, dz, dxy, dx, 1, dy, dx, lodz,plan->filterLen);
// LyLz
conv_down_3d(tempyz, tempz, dy, dx, dx, 1, newdz, dxy, lody,plan->filterLen);
conv_down_3d(LxLyLz, tempyz, dx, 1, newdy, dx, newdz, newdxy, lodx,plan->filterLen);
conv_down_3d(HxLyLz, tempyz, dx, 1, newdy, dx, newdz, newdxy, hidx,plan->filterLen);
// HyLz
conv_down_3d(tempyz, tempz, dy, dx, dx, 1, newdz, dxy, hidy,plan->filterLen);
conv_down_3d(LxHyLz, tempyz, dx, 1, newdy, dx, newdz, newdxy, lodx,plan->filterLen);
conv_down_3d(HxHyLz, tempyz, dx, 1, newdy, dx, newdz, newdxy, hidx,plan->filterLen);
// Hz
conv_down_3d(tempz, inImage, dz, dxy, dx, 1, dy, dx, hidz,plan->filterLen);
// LyHz
conv_down_3d(tempyz, tempz, dy, dx, dx, 1, newdz, dxy, lody,plan->filterLen);
conv_down_3d(LxLyHz, tempyz, dx, 1, newdy, dx, newdz, newdxy, lodx,plan->filterLen);
conv_down_3d(HxLyHz, tempyz, dx, 1, newdy, dx, newdz, newdxy, hidx,plan->filterLen);
// HyHz
conv_down_3d(tempyz, tempz, dy, dx, dx, 1, newdz, dxy, hidy,plan->filterLen);
conv_down_3d(LxHyHz, tempyz, dx, 1, newdy, dx, newdz, newdxy, lodx,plan->filterLen);
conv_down_3d(HxHyHz, tempyz, dx, 1, newdy, dx, newdz, newdxy, hidx,plan->filterLen);
memcpy(tempxyz, LxLyLz, blockSize*sizeof(data_t));
inImage = tempxyz;
dx = dxNext;
dy = dyNext;
dz = dzNext;
}
// Final LxLyLz
memcpy(coeff, inImage, plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]*sizeof(data_t));
free(LxLyLz);
free(tempz);
free(tempyz);
free(tempxyz);
circunshift_cpu(plan,origInImage);
}
void iwt3_cpu(struct dfwavelet_plan_s* plan, data_t* outImage, data_t* coeff,int dir)
{
// Workspace dimensions
int dxWork = plan->waveSizes[0 + 3*plan->numLevels]*2-1 + plan->filterLen-1;
int dyWork = plan->waveSizes[1 + 3*plan->numLevels]*2-1 + plan->filterLen-1;
int dzWork = plan->waveSizes[2 + 3*plan->numLevels]*2-1 + plan->filterLen-1;
int dyWork2 = plan->waveSizes[1 + 3*(plan->numLevels-1)]*2-1 + plan->filterLen-1;
int dzWork2 = plan->waveSizes[2 + 3*(plan->numLevels-1)]*2-1 + plan->filterLen-1;
// Workspace
data_t* tempyz = (data_t*) malloc(sizeof(data_t)*dxWork*dyWork2*dzWork2);
data_t* tempz = (data_t*) malloc(sizeof(data_t)*dxWork*dyWork*dzWork2);
data_t* tempFull = (data_t*) malloc(sizeof(data_t)*dxWork*dyWork*dzWork);
int dx = plan->waveSizes[0];
int dy = plan->waveSizes[1];
int dz = plan->waveSizes[2];
// Assign Filters
scalar_t *lorx,*lory,*lorz,*hirx,*hiry,*hirz;
lorx = plan->lor0;
lory = plan->lor0;
lorz = plan->lor0;
hirx = plan->hir0;
hiry = plan->hir0;
hirz = plan->hir0;
if (dir==0)
{
lorx = plan->lor1;
hirx = plan->hir1;
}
if (dir==1)
{
lory = plan->lor1;
hiry = plan->hir1;
}
if (dir==2)
{
lorz = plan->lor1;
hirz = plan->hir1;
}
memcpy(outImage, coeff, dx*dy*dz*sizeof(data_t));
data_t* HxLyLz = coeff + dx*dy*dz;
int level;
for (level = 1; level < plan->numLevels+1; ++level)
{
dx = plan->waveSizes[0 + 3*level];
dy = plan->waveSizes[1 + 3*level];
dz = plan->waveSizes[2 + 3*level];
int blockSize = dx*dy*dz;
data_t* LxHyLz = HxLyLz + blockSize;
data_t* HxHyLz = LxHyLz + blockSize;
data_t* LxLyHz = HxHyLz + blockSize;
data_t* HxLyHz = LxLyHz + blockSize;
data_t* LxHyHz = HxLyHz + blockSize;
data_t* HxHyHz = LxHyHz + blockSize;
data_t* LxLyLz = outImage;
int newdx = 2*dx-1 + plan->filterLen-1;
int newdy = 2*dy-1 + plan->filterLen-1;
int newdz = 2*dz-1 + plan->filterLen-1;
int dxy = dx*dy;
int newdxy = newdx*dy;
int newnewdxy = newdx*newdy;
memset(tempFull, 0, newnewdxy*newdz*sizeof(data_t));
memset(tempz, 0, newnewdxy*dz*sizeof(data_t));
memset(tempyz, 0, newdxy*dz*sizeof(data_t));
conv_up_3d(tempyz, LxLyLz, dx, 1, dy, dx, dz, dxy, lorx,plan->filterLen);
conv_up_3d(tempyz, HxLyLz, dx, 1, dy, dx, dz, dxy, hirx,plan->filterLen);
conv_up_3d(tempz, tempyz, dy, newdx, newdx, 1, dz, newdxy, lory,plan->filterLen);
memset(tempyz, 0, newdxy*dz*sizeof(data_t));
conv_up_3d(tempyz, LxHyLz, dx, 1, dy, dx, dz, dxy, lorx,plan->filterLen);
conv_up_3d(tempyz, HxHyLz, dx, 1, dy, dx, dz, dxy, hirx,plan->filterLen);
conv_up_3d(tempz, tempyz, dy, newdx, newdx, 1, dz, newdxy, hiry,plan->filterLen);
conv_up_3d(tempFull, tempz, dz, newnewdxy, newdx, 1, newdy, newdx, lorz,plan->filterLen);
memset(tempz, 0, newnewdxy*dz*sizeof(data_t));
memset(tempyz, 0, newdxy*dz*sizeof(data_t));
conv_up_3d(tempyz, LxLyHz, dx, 1, dy, dx, dz, dxy, lorx,plan->filterLen);
conv_up_3d(tempyz, HxLyHz, dx, 1, dy, dx, dz, dxy, hirx,plan->filterLen);
conv_up_3d(tempz, tempyz, dy, newdx, newdx, 1, dz, newdxy, lory,plan->filterLen);
memset(tempyz, 0, newdxy*dz*sizeof(data_t));
conv_up_3d(tempyz, LxHyHz, dx, 1, dy, dx, dz, dxy, lorx,plan->filterLen);
conv_up_3d(tempyz, HxHyHz, dx, 1, dy, dx, dz, dxy, hirx,plan->filterLen);
conv_up_3d(tempz, tempyz, dy, newdx, newdx, 1, dz, newdxy, hiry,plan->filterLen);
conv_up_3d(tempFull, tempz, dz, newnewdxy, newdx, 1, newdy, newdx, hirz,plan->filterLen);
// Crop center of workspace
int dxNext = plan->waveSizes[0+3*(level+1)];
int dyNext = plan->waveSizes[1+3*(level+1)];
int dzNext = plan->waveSizes[2+3*(level+1)];
int dxyNext = dxNext*dyNext;
dxWork = (2*dx-1 + plan->filterLen-1);
dyWork = (2*dy-1 + plan->filterLen-1);
dzWork = (2*dz-1 + plan->filterLen-1);
int dxyWork = dxWork*dyWork;
int xOffset = (int) ((dxWork - dxNext) / 2.0);
int yOffset = (int) ((dyWork - dyNext) / 2.0);
int zOffset = (int) ((dzWork - dzNext) / 2.0);
int k,j;
for (k = 0; k < dzNext; ++k){
for (j = 0; j < dyNext; ++j){
memcpy(outImage+j*dxNext + k*dxyNext, tempFull+xOffset + (yOffset+j)*dxWork + (zOffset+k)*dxyWork, dxNext*sizeof(data_t));
}
}
HxLyLz += 7*blockSize;
}
free(tempyz);
free(tempz);
free(tempFull);
circunshift_cpu(plan,outImage);
}
void circshift_cpu(struct dfwavelet_plan_s* plan, data_t *data) {
if (plan->randshift)
dfwavelet_new_randshift(plan);
// Return if no shifts
int zeroShift = 1;
int i;
for (i = 0; i< plan->numdims; i++)
{
zeroShift &= (plan->randShift[i]==0);
}
if(zeroShift) {
return;
}
// Copy data
data_t* dataCopy = malloc(sizeof(data_t)*plan->numPixel);
memcpy(dataCopy, data, plan->numPixel*sizeof(data_t));
if (plan->numdims==2)
{
int dx,dy,r0,r1,j,i,index,indexShifted;
dx = plan->imSize[0];
dy = plan->imSize[1];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
#pragma omp parallel for private(index, j, i,indexShifted)
for(j = 0; j < dy; j++) {
for(i = 0; i < dx; i++) {
index = i+j*dx;
indexShifted = (((i+r0) + (j+r1)*dx)%(dx*dy)+dx*dy)%(dx*dy);
data[indexShifted] = dataCopy[index];
}
}
}
if (plan->numdims==3)
{
int dx,dy,dz,r0,r1,r2,k,j,i,index,indexShifted;
dx = plan->imSize[0];
dy = plan->imSize[1];
dz = plan->imSize[2];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
r2 = plan->randShift[2];
#pragma omp parallel for private(index, k, j, i,indexShifted)
for (k = 0; k < dz; k++) {
for(j = 0; j < dy; j++) {
for(i = 0; i < dx; i++) {
index = i+j*dx+k*dx*dy;
indexShifted = ((i+r0 + (j+r1)*dx + (k+r2)*dx*dy)%(dx*dy*dz)+(dx*dy*dz))%(dx*dy*dz);
data[indexShifted] = dataCopy[index];
}
}
}
}
#pragma omp barrier
free(dataCopy);
}
void circunshift_cpu(struct dfwavelet_plan_s* plan, data_t *data) {
// Return if no shifts
int zeroShift = 1;
int i;
for (i = 0; i< plan->numdims; i++)
{
zeroShift &= (plan->randShift[i]==0);
}
if(zeroShift) {
return;
}
// Copy data
data_t* dataCopy = malloc(sizeof(data_t)*plan->numPixel);
memcpy(dataCopy, data, plan->numPixel*sizeof(data_t));
if (plan->numdims==2)
{
int dx,dy,r0,r1,j,i,index,indexShifted;
dx = plan->imSize[0];
dy = plan->imSize[1];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
#pragma omp parallel for private(index, j, i,indexShifted)
for(j = 0; j < dy; j++) {
for(i = 0; i < dx; i++) {
index = i+j*dx;
indexShifted = (((i+r0) + (j+r1)*dx)%(dx*dy)+dx*dy)%(dx*dy);
data[index] = dataCopy[indexShifted];
}
}
}
if (plan->numdims==3)
{
int dx,dy,dz,r0,r1,r2,k,j,i,index,indexShifted;
dx = plan->imSize[0];
dy = plan->imSize[1];
dz = plan->imSize[2];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
r2 = plan->randShift[2];
#pragma omp parallel for private(index, k, j, i,indexShifted)
for (k = 0; k < dz; k++) {
for(j = 0; j < dy; j++) {
for(i = 0; i < dx; i++) {
index = i+j*dx+k*dx*dy;
indexShifted = ((i+r0 + (j+r1)*dx + (k+r2)*dx*dy)%(dx*dy*dz)+(dx*dy*dz))%(dx*dy*dz);
data[index] = dataCopy[indexShifted];
}
}
}
}
free(dataCopy);
}
/********** Helper Function *********/
void conv_down_3d(data_t *out, data_t *in,
int size1, int skip1, int size2, int skip2, int size3, int skip3,
scalar_t *filter, int filterLen)
{
int outSize1 = (size1 + filterLen-1) / 2;
// Adjust out skip 2 and 3 if needed
int outSkip2;
if(skip2 > skip1) {
outSkip2 = outSize1*skip2/size1;
}
else {
outSkip2 = skip2;
}
int outSkip3;
if(skip3 > skip1) {
outSkip3 = outSize1*skip3/size1;
}
else {
outSkip3 = skip3;
}
int i32;
#pragma omp parallel for
for (i32 = 0; i32 < size2*size3; ++i32)
{
int i2 = i32 % size2;
int i3 = i32 / size2;
int i1;
for (i1 = 0; i1 < outSize1; ++i1)
{
out[i3*outSkip3 + i2*outSkip2 + i1*skip1] = 0.0f;
int k;
for (k = 0; k < filterLen; ++k)
{
int out_i1 = 2*i1+1 - (filterLen-1) + k;
if (out_i1 < 0) out_i1 = -out_i1-1;
if (out_i1 >= size1) out_i1 = size1-1 - (out_i1-size1);
out[i3*outSkip3 + i2*outSkip2 + i1*skip1] += in[i3*skip3 + i2*skip2 + out_i1*skip1] * filter[filterLen-1-k];
}
}
}
}
void conv_up_3d(data_t *out, data_t *in,
int size1, int skip1, int size2, int skip2, int size3, int skip3,
scalar_t *filter, int filterLen)
{
int outSize1 = 2*size1-1 + filterLen-1;
// Adjust out skip 2 and 3 if needed
int outSkip2;
if(skip2 > skip1) {
outSkip2 = outSize1*skip2/size1;
}
else {
outSkip2 = skip2;
}
int outSkip3;
if(skip3 > skip1) {
outSkip3 = outSize1*skip3/size1;
}
else {
outSkip3 = skip3;
}
int i32;
#pragma omp parallel for
for (i32 = 0; i32 < size2*size3; ++i32)
{
int i2 = i32 % size2;
int i3 = i32 / size2;
int i1;
for (i1 = 0; i1 < outSize1; ++i1) {
int k;
for (k = (i1 - (filterLen-1)) & 1; k < filterLen; k += 2){
int in_i1 = (i1 - (filterLen-1) + k) >> 1;
if (in_i1 >= 0 && in_i1 < size1)
out[i3*outSkip3 + i2*outSkip2 + i1*skip1] += in[i3*skip3 + i2*skip2 + in_i1*skip1] * filter[filterLen-1-k];
}
}
}
}
void mult(data_t* in,scalar_t scale,int numMax)
{
int i;
for(i=0; i<numMax;i++)
in[i]*=scale;
}
void create_numLevels(struct dfwavelet_plan_s* plan)
{
int numdims = plan->numdims;
int filterLen = plan->filterLen;
int bandSize, l, minSize;
plan->numLevels = 10000000;
int d;
for (d = 0; d < numdims; d++)
{
bandSize = plan->imSize[d];
minSize = plan->minSize[d];
l = 0;
while (bandSize > minSize)
{
++l;
bandSize = (bandSize + filterLen - 1) / 2;
}
l--;
plan->numLevels = (l < plan->numLevels) ? l : plan->numLevels;
}
}
void create_wavelet_sizes(struct dfwavelet_plan_s* plan)
{
int numdims = plan->numdims;
int filterLen = plan->filterLen;
int numLevels = plan->numLevels;
int numSubCoef;
plan->waveSizes = (long*) malloc(sizeof(long)*numdims*(numLevels+2));
// Get number of subband per level, (3 for 2d, 7 for 3d)
// Set the last bandSize to be imSize
int d,l;
int numSubband = 1;
for (d = 0; d<numdims; d++)
{
plan->waveSizes[d + numdims*(numLevels+1)] = plan->imSize[d];
numSubband <<= 1;
}
numSubband--;
// Get numCoeff and waveSizes
// Each bandSize[l] is (bandSize[l+1] + filterLen - 1)/2
plan->numCoeff = 0;
for (l = plan->numLevels; l >= 1; --l) {
numSubCoef = 1;
for (d = 0; d < numdims; d++)
{
plan->waveSizes[d + numdims*l] = (plan->waveSizes[d + numdims*(l+1)] + filterLen - 1) / 2;
numSubCoef *= plan->waveSizes[d + numdims*l];
}
plan->numCoeff += numSubband*numSubCoef;
if (l==1)
plan->numCoarse = numSubCoef;
}
numSubCoef = 1;
for (d = 0; d < numdims; d++)
{
plan->waveSizes[d] = plan->waveSizes[numdims+d];
numSubCoef *= plan->waveSizes[d];
}
plan->numCoeff += numSubCoef;
}
/* All filter coefficients are obtained from http://wavelets.pybytes.com/ */
void create_wavelet_filters(struct dfwavelet_plan_s* plan)
{
int filterLen = 0;
scalar_t* filter1, *filter2;
filterLen = 6;
// CDF 2.2 and CDF 3.1 Wavelet
scalar_t cdf22[] = {
0.0,-0.17677669529663689,0.35355339059327379,1.0606601717798214,0.35355339059327379,-0.17677669529663689,
0.0,0.35355339059327379,-0.70710678118654757,0.35355339059327379,0.0,0.0,
0.0,0.35355339059327379,0.70710678118654757,0.35355339059327379,0.0,0.0,
0.0,0.17677669529663689,0.35355339059327379,-1.0606601717798214,0.35355339059327379,0.17677669529663689
};
scalar_t cdf31[] = {
0.0,-0.35355339059327379,1.0606601717798214,1.0606601717798214,-0.35355339059327379,0.0 ,
0.0,-0.17677669529663689,0.53033008588991071,-0.53033008588991071,0.17677669529663689,0.0,
0.0,0.17677669529663689,0.53033008588991071,0.53033008588991071,0.17677669529663689,0.0,
0.0,-0.35355339059327379,-1.0606601717798214,1.0606601717798214,0.35355339059327379,0.0
};
filter1 = cdf22;
filter2 = cdf31;
// Allocate filters contiguously (for convenience)
plan->filterLen = filterLen;
plan->lod0 = (scalar_t*) malloc(sizeof(scalar_t) * 4 * filterLen);
memcpy(plan->lod0, filter1, 4*filterLen*sizeof(scalar_t));
plan->lod1 = (scalar_t*) malloc(sizeof(scalar_t) * 4 * filterLen);
memcpy(plan->lod1, filter2, 4*filterLen*sizeof(scalar_t));
plan->hid0 = plan->lod0 + 1*filterLen;
plan->lor0 = plan->lod0 + 2*filterLen;
plan->hir0 = plan->lod0 + 3*filterLen;
plan->hid1 = plan->lod1 + 1*filterLen;
plan->lor1 = plan->lod1 + 2*filterLen;
plan->hir1 = plan->lod1 + 3*filterLen;
}
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
static data_t drand() /* uniform distribution, (0..1] */
{
return (rand()+1.0)/(RAND_MAX+1.0);
}
static void random_normal(data_t* in,int length)
/* normal distribution, centered on 0, std dev 1 */
{
int i;
for (i=0;i<length;i++)
in[i] = sqrt(-2*log(drand())) * cos(2*M_PI*drand());
}
void get_noise_amp(struct dfwavelet_plan_s* plan)
{
if (plan->noiseAmp==NULL)
{
// Generate Gaussian w/ mean=0, std=1 data
data_t* vx,*vy,*vz;
data_t* wcdf1,*wcdf2,*wcn;
vx = (data_t*) malloc(sizeof(data_t)*plan->numPixel);
vy = (data_t*) malloc(sizeof(data_t)*plan->numPixel);
vz = (data_t*) malloc(sizeof(data_t)*plan->numPixel);
random_normal(vx,plan->numPixel);
random_normal(vy,plan->numPixel);
random_normal(vz,plan->numPixel);
wcdf1 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff);
wcdf2 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff);
wcn = (data_t*) malloc(sizeof(data_t)*plan->numCoeff);
// Get Wavelet Coefficients
int temp_use_gpu = plan->use_gpu;
if (plan->use_gpu==1)
plan->use_gpu = 2;
dfwavelet_forward(plan,wcdf1,wcdf2,wcn,vx,vy,vz);
plan->use_gpu = temp_use_gpu;
// Get Noise Amp for each subband
data_t* HxLyLz1 = wcdf1 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz2 = wcdf2 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz3 = wcn + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
int l;
for (l = 1; l <= plan->numLevels; ++l){
HxLyLz1 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz2 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz3 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
}
int numBand = 7*plan->numLevels*3;
plan->noiseAmp = (scalar_t*) malloc(sizeof(scalar_t)*numBand);
int naInd = 0;
for (l = plan->numLevels; l >= 1; --l)
{
int dxNext = plan->waveSizes[0 + 3*l];
int dyNext = plan->waveSizes[1 + 3*l];
int dzNext = plan->waveSizes[2 + 3*l];
int blockSize = dxNext*dyNext*dzNext;
HxLyLz1 = HxLyLz1 - 7*blockSize;
HxLyLz2 = HxLyLz2 - 7*blockSize;
HxLyLz3 = HxLyLz3 - 7*blockSize;
int bandInd;
//#pragma omp parallel for private(bandInd)
for (bandInd=0; bandInd<7*3;bandInd++)
{
data_t *subband;
if (bandInd<7)
{
subband = HxLyLz1 + bandInd*blockSize;
} else if (bandInd<14)
{
subband = HxLyLz2 + (bandInd-7)*blockSize;
} else
{
subband = HxLyLz3 + (bandInd-14)*blockSize;
}
data_t sig = 0;
data_t mean = 0;
data_t mean_old;
int i;
for (i=0; i<blockSize; i++)
{
scalar_t x = subband[i];
mean_old = mean;
mean = mean_old + (x-mean_old)/(i+1);
sig = sig + (x - mean_old)*(x-mean);
}
sig = sqrt(sig/(blockSize-1));
plan->noiseAmp[naInd] = sig;
naInd++;
}
}
free(vx);
free(vy);
free(vz);
free(wcdf1);
free(wcdf2);
free(wcn);
}
}
|
ompsievetest.c | // Copyright 2009-2020 NTESS. Under the terms
// of Contract DE-NA0003525 with NTESS, the U.S.
// Government retains certain rights in this software.
//
// Copyright (c) 2009-2020, NTESS
// All rights reserved.
//
// Portions are copyright of other developers:
// See the file CONTRIBUTORS.TXT in the top level directory
// the distribution for more information.
//
// This file is part of the SST software package. For license
// information, see the LICENSE file in the top level directory of the
// distribution.
#include <stdio.h>
#include <stdlib.h>
#include <vector>
/*
* Test for sieve
* Contains:
* * direct calls to malloc
* * indirect calls (via std library containers)
* * calls from each thread
* * accesses to mallocs by different threads
*
*/
int main(int argc, char* argv[]) {
const int n = 20;
int** the_array = (int**) malloc(sizeof(int*) * n);
int i = 0;
#pragma omp parallel for
for(i = 0; i < n; ++i) {
the_array[i] = (int*) malloc(sizeof(int) * n);
int j = 0;
for (j = 0; j < n; ++j) {
the_array[i][j] = 0; // initialize
}
}
#pragma omp parallel for
for(i = 0; i < n; ++i) {
int j = 0;
for(j = 0; j < n; ++j) {
if (j < i) {
the_array[i][j] = 1;
} else {
the_array[i][j] = 0;
}
}
}
// Now have a triangle matrix, no do something with std lib
std::vector<int> rowSums;
for (int i = 0; i < n; i++) {
rowSums.push_back(0);
for (int j = 0; j < n; j++) {
rowSums[i] += the_array[i][j];
}
}
printf("The vector is:\n");
for (std::vector<int>::iterator it = rowSums.begin(); it != rowSums.end(); it++) {
printf("%d\n", *it);
}
}
|
graph.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GRAPH_H_
#define GRAPH_H_
#include <algorithm>
#include <cinttypes>
#include <cstddef>
#include <iostream>
#include <type_traits>
#include "pvector.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: CSRGraph
Author: Scott Beamer
Simple container for graph in CSR format
- Intended to be constructed by a Builder
- To make weighted, set DestID_ template type to NodeWeight
- MakeInverse parameter controls whether graph stores its inverse
*/
// Used to hold node & weight, with another node it makes a weighted edge
template <typename NodeID_, typename WeightT_>
struct NodeWeight {
NodeID_ v;
WeightT_ w;
NodeWeight() {}
NodeWeight(NodeID_ v) : v(v), w(1) {}
NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {}
bool operator< (const NodeWeight& rhs) const {
return v == rhs.v ? w < rhs.w : v < rhs.v;
}
// doesn't check WeightT_s, needed to remove duplicate edges
bool operator== (const NodeWeight& rhs) const {
return v == rhs.v;
}
// doesn't check WeightT_s, needed to remove self edges
bool operator== (const NodeID_& rhs) const {
return v == rhs;
}
operator NodeID_() {
return v;
}
};
template <typename NodeID_, typename WeightT_>
std::ostream& operator<<(std::ostream& os,
const NodeWeight<NodeID_, WeightT_>& nw) {
os << nw.v << " " << nw.w;
return os;
}
template <typename NodeID_, typename WeightT_>
std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) {
is >> nw.v >> nw.w;
return is;
}
// Syntatic sugar for an edge
template <typename SrcT, typename DstT = SrcT>
struct EdgePair {
SrcT u;
DstT v;
EdgePair() {}
EdgePair(SrcT u, DstT v) : u(u), v(v) {}
};
// SG = serialized graph, these types are for writing graph to file
typedef int32_t SGID;
typedef EdgePair<SGID> SGEdge;
typedef int64_t SGOffset;
template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true>
class CSRGraph {
// Used for *non-negative* offsets within a neighborhood
typedef std::make_unsigned<std::ptrdiff_t>::type OffsetT;
// Used to access neighbors of vertex, basically sugar for iterators
class Neighborhood {
NodeID_ n_;
DestID_** g_index_;
OffsetT start_offset_;
public:
Neighborhood(NodeID_ n, DestID_** g_index, OffsetT start_offset) :
n_(n), g_index_(g_index), start_offset_(0) {
OffsetT max_offset = end() - begin();
start_offset_ = std::min(start_offset, max_offset);
}
typedef DestID_* iterator;
iterator begin() { return g_index_[n_] + start_offset_; }
iterator end() { return g_index_[n_+1]; }
};
void ReleaseResources() {
if (out_index_ != nullptr)
delete[] out_index_;
if (out_neighbors_ != nullptr)
delete[] out_neighbors_;
if (directed_) {
if (in_index_ != nullptr)
delete[] in_index_;
if (in_neighbors_ != nullptr)
delete[] in_neighbors_;
}
}
public:
CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1),
out_index_(nullptr), out_neighbors_(nullptr),
in_index_(nullptr), in_neighbors_(nullptr) {}
CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) :
directed_(false), num_nodes_(num_nodes),
out_index_(index), out_neighbors_(neighs),
in_index_(index), in_neighbors_(neighs) {
num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2;
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs) {
num_edges_ = out_index_[num_nodes_] - out_index_[0];
}
CSRGraph(CSRGraph&& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_) {
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
~CSRGraph() {
ReleaseResources();
}
void set_directed() {
std::cout << "setting directed flag\n";
directed_ = false;
}
CSRGraph& operator=(CSRGraph&& other) {
if (this != &other) {
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
return *this;
}
bool directed() const {
return directed_;
}
int64_t num_nodes() const {
return num_nodes_;
}
int64_t num_edges() const {
return num_edges_;
}
int64_t num_edges_directed() const {
return directed_ ? num_edges_ : 2*num_edges_;
}
int64_t out_degree(NodeID_ v) const {
return out_index_[v+1] - out_index_[v];
}
int64_t in_degree(NodeID_ v) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return in_index_[v+1] - in_index_[v];
}
Neighborhood out_neigh(NodeID_ n, OffsetT start_offset = 0) const {
return Neighborhood(n, out_index_, start_offset);
}
Neighborhood in_neigh(NodeID_ n, OffsetT start_offset = 0) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return Neighborhood(n, in_index_, start_offset);
}
void PrintStats() const {
std::cout << "Graph has " << num_nodes_ << " nodes and "
<< num_edges_ << " ";
if (!directed_)
std::cout << "un";
std::cout << "directed edges for degree: ";
std::cout << num_edges_/num_nodes_ << std::endl;
}
void PrintTopology() const {
for (NodeID_ i=0; i < num_nodes_; i++) {
std::cout << i << ": ";
for (DestID_ j : out_neigh(i)) {
std::cout << j << " ";
}
std::cout << std::endl;
}
}
static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
pvector<SGOffset> VertexOffsets(bool in_graph = false) const {
pvector<SGOffset> offsets(num_nodes_+1);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets[n] = in_index_[n] - in_index_[0];
else
offsets[n] = out_index_[n] - out_index_[0];
return offsets;
}
Range<NodeID_> vertices() const {
return Range<NodeID_>(num_nodes());
}
private:
bool directed_;
int64_t num_nodes_;
int64_t num_edges_;
DestID_** out_index_;
DestID_* out_neighbors_;
DestID_** in_index_;
DestID_* in_neighbors_;
};
#endif // GRAPH_H_
|
mode.h | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#if defined(__NVCC__) || defined(__HIPCC__)
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/functional.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#endif
#include <algorithm>
#include <cmath>
#include <utility>
#include <vector>
#ifdef PADDLE_WITH_MKLML
#include <omp.h>
#endif
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
namespace funcs {
static int ComputeBlockSize(int col) {
if (col > 512)
return 1024;
else if (col > 256 && col <= 512)
return 512;
else if (col > 128 && col <= 256)
return 256;
else if (col > 64 && col <= 128)
return 128;
else
return 64;
}
static inline void GetDims(
const phi::DDim& dim, int axis, int* pre, int* n, int* post) {
*pre = 1;
*post = 1;
*n = dim[axis];
for (int i = 0; i < axis; ++i) {
(*pre) *= dim[i];
}
for (int i = axis + 1; i < dim.size(); ++i) {
(*post) *= dim[i];
}
}
template <typename T, typename Type>
static void GetMode(Type input_height,
Type input_width,
int input_dim,
const DenseTensor* input,
T* t_out,
Type* t_indices) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (Type i = 0; i < input_height; ++i) {
std::vector<std::pair<T, Type>> col_vec;
col_vec.reserve(input_width);
if (input_dim == 1) {
auto e_input = EigenVector<T>::Flatten(*input);
for (Type j = 0; j < input_width; ++j) {
col_vec.emplace_back(std::pair<T, Type>(e_input(j), j));
}
} else {
auto e_input = EigenMatrix<T>::Reshape(*input, input_dim - 1);
for (Type j = 0; j < input_width; ++j) {
col_vec.emplace_back(std::pair<T, Type>(e_input(i, j), j));
}
}
std::sort(col_vec.begin(),
col_vec.end(),
[](const std::pair<T, Type>& l, const std::pair<T, Type>& r) {
return (!std::isnan(static_cast<double>(l.first)) &&
std::isnan(static_cast<double>(r.first))) ||
(l.first < r.first);
});
T mode = 0;
int64_t indice = 0;
int64_t cur_freq = 0;
int64_t max_freq = 0;
for (int64_t i = 0; i < input_width; ++i) {
++cur_freq;
if (i == input_width - 1 || (col_vec[i + 1].first != col_vec[i].first)) {
if (cur_freq > max_freq) {
max_freq = cur_freq;
mode = col_vec[i].first;
indice = col_vec[i].second;
}
cur_freq = 0;
}
}
t_out[i] = mode;
t_indices[i] = indice;
}
}
template <typename T, typename Type>
static void ModeAssign(const Type& input_height,
const Type& input_width,
const int& input_dim,
const DenseTensor* input,
const DenseTensor* indices,
T* output_data) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (Type i = 0; i < input_height; ++i) {
if (input_dim == 1) {
auto e_input = EigenVector<T>::Flatten(*input);
auto e_indices = EigenVector<Type>::Flatten(*indices);
output_data[i * input_width + e_indices(0)] = e_input(0);
} else {
auto e_input = EigenMatrix<T>::Reshape(*input, input_dim - 1);
auto e_indices = EigenMatrix<Type>::Reshape(*indices, input_dim - 1);
output_data[i * input_width + e_indices(i, 0)] = e_input(i, 0);
}
}
}
#if defined(__NVCC__) || defined(__HIPCC__)
template <typename T>
static void GetModebySort(const phi::GPUContext& dev_ctx,
const DenseTensor* input_tensor,
const int64_t num_cols,
const int64_t num_rows,
T* out_tensor,
int64_t* indices_tensor) {
DenseTensor input_tmp;
input_tmp.Resize(phi::make_ddim({num_rows, num_cols}));
T* input_tmp_data = dev_ctx.Alloc<T>(&input_tmp);
phi::Copy(dev_ctx, *input_tensor, dev_ctx.GetPlace(), false, &input_tmp);
thrust::device_ptr<T> out_tensor_ptr(out_tensor);
thrust::device_ptr<int64_t> indices_tensor_ptr(indices_tensor);
for (int64_t i = 0; i < num_rows; ++i) {
T* begin = input_tmp_data + num_cols * i;
T* end = input_tmp_data + num_cols * (i + 1);
thrust::device_vector<int64_t> indices_data(num_cols);
thrust::sequence(
thrust::device, indices_data.begin(), indices_data.begin() + num_cols);
thrust::sort_by_key(thrust::device, begin, end, indices_data.begin());
int unique = 1 + thrust::inner_product(thrust::device,
begin,
end - 1,
begin + 1,
0,
thrust::plus<int>(),
thrust::not_equal_to<T>());
thrust::device_vector<T> keys_data(unique);
thrust::device_vector<int64_t> cnts_data(unique);
thrust::reduce_by_key(thrust::device,
begin,
end,
thrust::constant_iterator<int>(1),
keys_data.begin(),
cnts_data.begin());
auto it = thrust::max_element(
thrust::device, cnts_data.begin(), cnts_data.begin() + unique);
T mode = keys_data[it - cnts_data.begin()];
int64_t counts = cnts_data[it - cnts_data.begin()];
auto pos = thrust::find(thrust::device, begin, end, mode);
int64_t index = indices_data[pos - begin + counts - 1];
out_tensor_ptr[i] = static_cast<T>(mode);
indices_tensor_ptr[i] = static_cast<int64_t>(index);
}
}
#endif
} // namespace funcs
} // namespace phi
|
bins_static.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: clabra
//
#if !defined(KRATOS_BINS_CONTAINER_H_INCLUDE)
#define KRATOS_BINS_CONTAINER_H_INCLUDE
#include "tree.h"
namespace Kratos
{
template< std::size_t TDimension,
class TPointType,
class TContainerType,
class TPointerType = typename TContainerType::value_type,
class TIteratorType = typename TContainerType::iterator,
class TDistanceIteratorType = typename std::vector<double>::iterator,
class TDistanceFunction = Kratos::SearchUtils::SquaredDistanceFunction<TDimension,TPointType> >
class Bins : public TreeNode<TDimension,TPointType, TPointerType, TIteratorType, TDistanceIteratorType>
{
public:
enum { Dimension = TDimension };
typedef TPointType PointType;
typedef TContainerType ContainerType;
typedef TIteratorType IteratorType;
typedef TDistanceIteratorType DistanceIteratorType;
typedef TPointerType PointerType;
typedef TDistanceFunction DistanceFunction;
typedef TreeNode<Dimension,PointType,PointerType,IteratorType,DistanceIteratorType> TreeNodeType;
typedef typename TreeNodeType::SizeType SizeType;
typedef typename TreeNodeType::IndexType IndexType;
typedef typename TreeNodeType::CoordinateType CoordinateType;
typedef Tvector<CoordinateType,Dimension> CoordinateArray;
typedef Tvector<SizeType,Dimension> SizeArray;
typedef Tvector<IndexType,Dimension> IndexArray;
typedef typename TreeNodeType::IteratorIteratorType IteratorIteratorType;
typedef typename TreeNodeType::SearchStructureType SearchStructureType;
// Local Container ( PointPointer Container per Cell )
// can be different to ContainerType
// not always LocalContainerType == ContainerType ( if ContainerType = C array )
typedef std::vector<PointerType> LocalContainerType;
typedef typename LocalContainerType::iterator LocalIterator;
typedef Tvector<IndexType,TDimension> CellType;
typedef std::vector<IteratorType> IteratorVector;
typedef typename IteratorVector::iterator IteratorIterator;
typedef typename IteratorVector::const_iterator IteratorConstIterator;
typedef Kratos::SearchUtils::SearchNearestInRange<PointType,PointerType,IteratorType,DistanceFunction,CoordinateType> SearchNearestInRange;
typedef Kratos::SearchUtils::SearchRadiusInRange<PointType,IteratorType,DistanceIteratorType,DistanceFunction,SizeType,CoordinateType> SearchRadiusInRange;
typedef Kratos::SearchUtils::SearchBoxInRange<PointType,IteratorType,SizeType,TDimension> SearchBoxInRange;
// Legacy typedef ( to preserve compativility in case someone was using this definitions)
typedef LocalContainerType PointVector;
typedef LocalIterator PointIterator;
typedef TreeNodeType LeafType;
/// Pointer definition of Bins
KRATOS_CLASS_POINTER_DEFINITION(Bins);
public:
/**
* @brief Default Constructor
*
*/
Bins() : mPointBegin(this->NullIterator()), mPointEnd(this->NullIterator()) {};
/**
* @brief Constructs a new BinsStatic
*
* Construct a new BinsStatic using a list of points and an automatically calculate cell size.
*
* @param PointBegin Iterator to the first object of the bins
* @param PointEnd Iterator to the last object of the bins
* @param BucketSize Unused.
*/
Bins( IteratorType const& PointBegin, IteratorType const& PointEnd, SizeType BucketSize = 1 )
: mPointBegin(PointBegin), mPointEnd(PointEnd)
{
auto NumPoints = std::distance(mPointBegin, mPointEnd);
if(mPointBegin==mPointEnd)
return;
CalculateBoundingBox();
CalculateCellSize(NumPoints);
AllocateCellsContainer();
GenerateBins();
}
/**
* @brief Constructs a new BinsStatic
*
* Construct a new BinsStatic using a list of points and an automatically calculate cell size and a custom bounding box.
*
* @param PointBegin Iterator to the first object of the bins
* @param PointEnd Iterator to the last object of the bins
* @param MinPoint Lower point of the custom bounding box
* @param MaxPoint Upper point of the custom bounding box
* @param BucketSize Unused
*/
Bins( IteratorType const& PointBegin, IteratorType const& PointEnd, PointType const& MinPoint, PointType const& MaxPoint, SizeType BucketSize = 1 )
: mPointBegin(PointBegin), mPointEnd(PointEnd)
{
auto NumPoints = std::distance(mPointBegin, mPointEnd);
if(mPointBegin==mPointEnd)
return;
for(SizeType i = 0 ; i < TDimension ; i++)
{
mMinPoint[i] = MinPoint[i];
mMaxPoint[i] = MaxPoint[i];
}
CalculateCellSize(NumPoints);
AllocateCellsContainer();
GenerateBins();
}
/**
* @brief Constructs a new BinsStatic
*
* Constructs a new BinsObjectDynamic using a list of objects and an user provided cell size.
*
* @param PointBegin Iterator to the first object of the bins
* @param PointEnd Iterator to the last object of the bins
* @param cellsize Size of the cells (equal for every dimension)
* @param BucketSize Unsued
*/
Bins( IteratorType const& PointBegin, IteratorType const& PointEnd, CoordinateType cellsize, SizeType BucketSize = 1 )
: mPointBegin(PointBegin), mPointEnd(PointEnd)
{
if(mPointBegin==mPointEnd)
return;
CalculateBoundingBox();
AssignCellSize(cellsize);
AllocateCellsContainer();
GenerateBins();
}
// Destructor
~Bins() override { }
//************************************************************************
IteratorType Begin()
{
return mPointBegin;
}
//************************************************************************
IteratorType End()
{
return mPointBegin;
}
//************************************************************************
KRATOS_DEPRECATED CoordinateType CellSize( SizeType const& iDim )
{
return mCellSize[iDim];
}
//************************************************************************
KRATOS_DEPRECATED SizeType NumCell( SizeType const& iDim )
{
return mN[iDim];
}
/**
* @brief Get the Divisions object
*
* @return SizeArray& Array containing the number of Cells in each dimension
*/
SizeArray& GetDivisions() {
return mN;
}
/**
* @brief Get the Cell Size object
*
* @return CoordinateArray& Array containing the size of the Cell in each dimension
*/
CoordinateArray& GetCellSize() {
return mCellSize;
}
/**
* @brief Get the Min Point object
*
* @return PointType& Min point of the bins
*/
PointType& GetMinPoint() {
return mMinPoint;
}
/**
* @brief Get the Max Point object
*
* @return PointType& Max point of the bins
*/
PointType& GetMaxPoint() {
return mMaxPoint;
}
//************************************************************************
private:
//************************************************************************
void CalculateBoundingBox()
{
for(SizeType i = 0 ; i < TDimension ; i++)
{
mMinPoint[i] = (**mPointBegin)[i];
mMaxPoint[i] = (**mPointBegin)[i];
}
for(IteratorType Point = mPointBegin ; Point != mPointEnd ; Point++)
for(SizeType i = 0 ; i < TDimension ; i++)
{
if( (**Point)[i] < mMinPoint[i] ) mMinPoint[i] = (**Point)[i];
if( (**Point)[i] > mMaxPoint[i] ) mMaxPoint[i] = (**Point)[i];
}
}
//************************************************************************
/**
* @brief Calculates the cell size of the bins.
*
* Calculates the cell size of the bins using an average aproximation of the objects in the bins.
*
* @param ApproximatedSize Aproximate number of objects that will be stored in the bins
*/
void CalculateCellSize(std::size_t ApproximatedSize)
{
std::size_t average_number_of_cells = static_cast<std::size_t>(std::pow(static_cast<double>(ApproximatedSize), 1.00 / Dimension));
std::array<double, 3> lengths;
double average_length = 0.00;
for (int i = 0; i < Dimension; i++) {
lengths[i] = mMaxPoint[i] - mMinPoint[i];
average_length += lengths[i];
}
average_length *= 1.00 / 3.00;
if (average_length < std::numeric_limits<double>::epsilon()) {
for(int i = 0; i < Dimension; i++) {
mN[i] = 1;
}
return;
}
for (int i = 0; i < Dimension; i++) {
mN[i] = static_cast<std::size_t>(lengths[i] / average_length * (double)average_number_of_cells) + 1;
if (mN[i] > 1) {
mCellSize[i] = lengths[i] / mN[i];
} else {
mCellSize[i] = average_length;
}
mInvCellSize[i] = 1.00 / mCellSize[i];
}
}
//************************************************************************
void AssignCellSize( CoordinateType BoxSize )
{
for(SizeType i = 0 ; i < TDimension ; i++)
{
mCellSize[i] = BoxSize;
mInvCellSize[i] = 1.00 / mCellSize[i];
mN[i] = static_cast<SizeType>( (mMaxPoint[i]-mMinPoint[i]) / mCellSize[i]) + 1;
}
}
//************************************************************************
void AllocateCellsContainer()
{
SizeType Size = 1;
for(SizeType i = 0 ; i < TDimension ; i++)
Size *= mN[i];
mIndexCell.resize(Size+1);
mIndexCellBegin = mIndexCell.begin();
mIndexCellEnd = mIndexCell.end();
}
//************************************************************************
void GenerateBins( )
{
LocalContainerType TempPoint(mPointBegin,mPointEnd);
// Reset index vector
for( IteratorIterator Iter = mIndexCell.begin(); Iter != mIndexCell.end(); Iter++)
*Iter = mPointBegin;
// Update storage counter, storing ahead
for( IteratorType Point = mPointBegin ; Point != mPointEnd ; Point++)
mIndexCell[ CalculateIndex(**Point) + 1 ]++;
// Storage/reshufing pass 1
// Update storage counter and store
for( IteratorIterator Iter = mIndexCell.begin()+1 ; Iter != mIndexCell.end() ; Iter++)
*Iter = *(Iter-1) + SearchUtils::PointerDistance(mPointBegin,*Iter);
// Point pass 2
// Store the points in lbin1
// Update storage counter, storing in lbin1
for( LocalIterator Point = TempPoint.begin() ; Point != TempPoint.end() ; Point++)
*(mIndexCell[CalculateIndex(**Point)]++) = *Point;
// Storage/reshuffing pass 2
// Loop over bins, in reverse order
for(IteratorIterator Iter = mIndexCell.end()-1; Iter != mIndexCell.begin(); Iter--)
*Iter = *(Iter-1);
mIndexCell[0] = mPointBegin;
}
//************************************************************************
IndexType CalculatePosition( CoordinateType const& ThisCoord, SizeType ThisDimension )
{
CoordinateType d_index = (ThisCoord - mMinPoint[ThisDimension]) * mInvCellSize[ThisDimension];
IndexType index = static_cast<SizeType>( (d_index < 0.00) ? 0.00 : d_index );
return (index > mN[ThisDimension]-1) ? mN[ThisDimension]-1 : index;
}
//************************************************************************
IndexType CalculateIndex( PointType const& ThisPoint )
{
IndexType Index = 0;
for(SizeType iDim = TDimension-1 ; iDim > 0 ; iDim--)
{
Index += CalculatePosition(ThisPoint[iDim],iDim);
Index *= mN[iDim-1];
}
Index += CalculatePosition(ThisPoint[0],0);
return Index;
}
//************************************************************************
IndexType CalculateIndex( CellType const& ThisIndex )
{
IndexType Index = 0;
for(SizeType iDim = TDimension-1 ; iDim > 0 ; iDim--)
{
Index += ThisIndex[iDim];
Index *= mN[iDim-1];
}
Index += ThisIndex[0];
return Index;
}
//************************************************************************
CellType CalculateCell( PointType const& ThisPoint )
{
CellType Cell;
for(SizeType i = 0 ; i < TDimension ; i++)
Cell[i] = CalculatePosition(ThisPoint[i],i);
return Cell;
}
CellType CalculateCell( PointType const& ThisPoint, CoordinateType Radius )
{
CellType Cell;
for(SizeType i = 0 ; i < TDimension ; i++)
Cell[i] = CalculatePosition(ThisPoint[i]+Radius,i);
return Cell;
}
//************************************************************************
public:
//************************************************************************
//************************************************************************
/**
* @brief Return the closest point to ThisPoint in case it exists or a null pointer otherwise
*
* @param ThisPoint Searched Point.
* @param Tolerance Tolerance of the search.
* @return PointerType a pointer to the nearest point in case it exists or nullptr otherwise.
*/
PointerType ExistPoint( PointerType const& ThisPoint, CoordinateType const Tolerance = static_cast<CoordinateType>(10.0*DBL_EPSILON) )
{
PointerType Nearest;
CoordinateType Distance = static_cast<CoordinateType>(DBL_MAX);
bool Found;
SearchStructureType Box( CalculateCell(*ThisPoint), mN, mIndexCellBegin );
SearchNearestInBox( *ThisPoint, Nearest, Distance, Box, Found );
if(Found)
return Nearest;
return this->NullPointer();
}
/**
* @brief Return the nearest point to ThisPoint. This function can not return the same point.
*
* @param ThisPoint Searched Point.
* @return PointerType Pointer to the nearest element. Cannot return the same point as the one given as input.
*/
PointerType SearchNearestPointInner( PointerType& ThisPoint )
{
PointerType Result = *mPointBegin; //static_cast<PointerType>(NULL);
CoordinateType ResultDistance = static_cast<CoordinateType>(DBL_MAX);
SearchStructureType Box( CalculateCell(*ThisPoint), mN, mIndexCellBegin );
SearchNearestPointLocalInner( ThisPoint, Result, ResultDistance, Box );
return Result;
}
/**
* @brief Return the nearest point to ThisPoint. This function can return the same point with distance 0.
*
* @param ThisPoint Searched Point.
* @return PointerType Pointer to the nearest element. ThisPoint in case it exists inside the bins.
*/
PointerType SearchNearestPoint( PointType const& ThisPoint )
{
PointerType Result = *mPointBegin; //static_cast<PointerType>(NULL);
CoordinateType ResultDistance = static_cast<CoordinateType>(DBL_MAX);
SearchStructureType Box( CalculateCell(ThisPoint), mN, mIndexCellBegin );
SearchNearestPointLocal( ThisPoint, Result, ResultDistance, Box );
return Result;
}
//************************************************************************
PointerType SearchNearestPoint( PointType const& ThisPoint, CoordinateType& rResultDistance )
{
PointerType Result = *mPointBegin; //static_cast<PointerType>(NULL);
rResultDistance = static_cast<CoordinateType>(DBL_MAX);
SearchStructureType Box( CalculateCell(ThisPoint), mN, mIndexCellBegin );
SearchNearestPointLocal( ThisPoint, Result, rResultDistance, Box);
return Result;
}
//************************************************************************
// New Thread Safe!!!
PointerType SearchNearestPoint( PointType const& ThisPoint, CoordinateType& rResultDistance, SearchStructureType& Box )
{
PointerType Result = *mPointBegin; //static_cast<PointerType>(NULL);
rResultDistance = static_cast<CoordinateType>(DBL_MAX);
Box.Set( CalculateCell(ThisPoint), mN, mIndexCellBegin );
SearchNearestPointLocal( ThisPoint, Result, rResultDistance, Box);
return Result;
}
//************************************************************************
//************************************************************************
void SearchNearestPoint( PointType const& ThisPoint, PointerType& rResult, CoordinateType& rResultDistance ) override
{
SearchStructureType Box( CalculateCell(ThisPoint), mN, mIndexCellBegin );
SearchNearestPointLocal(ThisPoint,rResult,rResultDistance,Box);
}
//************************************************************************
void SearchNearestPoint( PointType const& ThisPoint, PointerType& rResult, CoordinateType& rResultDistance, SearchStructureType& Box ) override
{
// This case is when BinStatic is a LeafType in Other Spacial Structure
// Then, it is possible a better Result before this search
Box.Set( CalculateCell(ThisPoint), mN, mIndexCellBegin );
SearchNearestPointLocal( ThisPoint, rResult, rResultDistance, Box );
}
//************************************************************************
void SearchNearestPoint( PointerType const& ThisPoints, SizeType const& NumberOfPoints, IteratorType &Results, std::vector<CoordinateType> ResultsDistances)
{
#pragma omp parallel for
for(int k=0; k< NumberOfPoints; k++)
Results[k] = SearchNearestPoint((&(*ThisPoints))[k],ResultsDistances[k]);
}
//************************************************************************
//************************************************************************
// **** THREAD SAFE -> The user pass the SearchStructure (BinBox)
void SearchNearestPointLocal( PointType const& ThisPoint, PointerType& rResult, CoordinateType& rResultDistance, SearchStructureType& Box )
{
if( mPointBegin == mPointEnd )
return;
bool Found;
// initial search
++Box;
SearchNearestInBox( ThisPoint, rResult, rResultDistance, Box, Found );
// increase mBox and try again
while(!Found)
{
++Box;
SearchNearestInBox( ThisPoint, rResult, rResultDistance, Box, Found );
}
}
//************************************************************************
//************************************************************************
// **** THREAD SAFE -> The user pass the SearchStructure (BinBox)
void SearchNearestPointLocalInner( PointerType& ThisPoint, PointerType& rResult, CoordinateType& rResultDistance, SearchStructureType& Box )
{
if( mPointBegin == mPointEnd )
return;
bool Found;
// initial search
++Box;
SearchNearestInBoxInner( ThisPoint, rResult, rResultDistance, Box, Found );
// increase mBox and try again
while(!Found)
{
++Box;
SearchNearestInBoxInner( ThisPoint, rResult, rResultDistance, Box, Found );
}
}
//************************************************************************
//************************************************************************
SizeType SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, IteratorType Results,
DistanceIteratorType ResultsDistances, SizeType const& MaxNumberOfResults )
{
CoordinateType Radius2 = Radius * Radius;
SizeType NumberOfResults = 0;
SearchStructureType Box( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN, mIndexCellBegin );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
//************************************************************************
SizeType SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, IteratorType Results,
DistanceIteratorType ResultsDistances, SizeType const& MaxNumberOfResults, SearchStructureType& Box )
{
CoordinateType Radius2 = Radius * Radius;
SizeType NumberOfResults = 0;
Box.Set( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN, mIndexCellBegin );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
//************************************************************************
void SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults ) override
{
SearchStructureType Box( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN, mIndexCellBegin );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box);
}
//************************************************************************
void SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructureType& Box ) override
{
Box.Set( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN, mIndexCellBegin );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, ResultsDistances, NumberOfResults, MaxNumberOfResults, Box);
}
//************************************************************************
void SearchInRadius( PointerType const& ThisPoints, SizeType const& NumberOfPoints, std::vector<CoordinateType> const& Radius, std::vector<IteratorType> Results,
std::vector<DistanceIteratorType> ResultsDistances, std::vector<SizeType>& NumberOfResults, SizeType const& MaxNumberOfResults )
{
#pragma omp parallel for
for(int k=0; k< NumberOfPoints; k++)
NumberOfResults[k] = SearchInRadius((&(*ThisPoints))[k],Radius[k],Results[k],ResultsDistances[k],MaxNumberOfResults);
}
//************************************************************************
// **** THREAD SAFE
// Dimension = 1
void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
SearchRadiusInRange()(*(Box.RowBegin),*(Box.RowEnd),ThisPoint,Radius2,Results,ResultsDistances,NumberOfResults,MaxNumberOfResults);
}
// Dimension = 2
void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
for(IndexType I = Box.Axis[1].Begin() ; I <= Box.Axis[1].End() ; I += Box.Axis[1].Block )
SearchRadiusInRange()(Box.RowBegin[I],Box.RowEnd[I],ThisPoint,Radius2,Results,ResultsDistances,NumberOfResults,MaxNumberOfResults);
}
// Dimension = 3
void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
DistanceIteratorType& ResultsDistances, SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
for(IndexType II = Box.Axis[2].Begin() ; II <= Box.Axis[2].End() ; II += Box.Axis[2].Block )
for(IndexType I = II + Box.Axis[1].Begin() ; I <= II + Box.Axis[1].End() ; I += Box.Axis[1].Block )
SearchRadiusInRange()(Box.RowBegin[I],Box.RowEnd[I],ThisPoint,Radius2,Results,ResultsDistances,NumberOfResults,MaxNumberOfResults);
}
//************************************************************************
//************************************************************************
SizeType SearchInRadius( PointType const& ThisPoint, CoordinateType Radius, IteratorType Results, SizeType MaxNumberOfResults )
{
CoordinateType Radius2 = Radius * Radius;
SizeType NumberOfResults = 0;
SearchStructureType Box( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN, mIndexCellBegin );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
//************************************************************************
SizeType SearchInRadius( PointType const& ThisPoint, CoordinateType Radius, IteratorType Results,
SizeType MaxNumberOfResults, SearchStructureType& Box )
{
CoordinateType Radius2 = Radius * Radius;
SizeType NumberOfResults = 0;
Box.Set( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN, mIndexCellBegin );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
//************************************************************************
void SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults ) override
{
SearchStructureType Box( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN, mIndexCellBegin );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, NumberOfResults, MaxNumberOfResults, Box );
}
//************************************************************************
void SearchInRadius( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults, SearchStructureType& Box ) override
{
Box.Set( CalculateCell(ThisPoint,-Radius), CalculateCell(ThisPoint,Radius), mN, mIndexCellBegin );
SearchInRadiusLocal( ThisPoint, Radius, Radius2, Results, NumberOfResults, MaxNumberOfResults, Box );
}
//************************************************************************
// Dimension = 1
void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
SearchRadiusInRange()(*(Box.RowBegin),*(Box.RowEnd),ThisPoint,Radius2,Results,NumberOfResults,MaxNumberOfResults);
}
// Dimension = 2
void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
for(IndexType I = Box.Axis[1].Begin() ; I <= Box.Axis[1].End() ; I += Box.Axis[1].Block )
SearchRadiusInRange()(Box.RowBegin[I],Box.RowEnd[I],ThisPoint,Radius2,Results,NumberOfResults,MaxNumberOfResults);
}
// Dimension = 3
void SearchInRadiusLocal( PointType const& ThisPoint, CoordinateType const& Radius, CoordinateType const& Radius2, IteratorType& Results,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
for(IndexType II = Box.Axis[2].Begin() ; II <= Box.Axis[2].End() ; II += Box.Axis[2].Block )
for(IndexType I = II + Box.Axis[1].Begin() ; I <= II + Box.Axis[1].End() ; I += Box.Axis[1].Block )
SearchRadiusInRange()(Box.RowBegin[I],Box.RowEnd[I],ThisPoint,Radius2,Results,NumberOfResults,MaxNumberOfResults);
}
//************************************************************************
//************************************************************************
// Dimension = 1
void SearchNearestInBox( PointType const& ThisPoint, PointerType& ResultPoint, CoordinateType& ResultDistance,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box, bool& Found )
{
Found = false;
SearchNearestInRange()( *(Box.RowBegin), *(Box.RowEnd), ThisPoint, ResultPoint, ResultDistance, Found );
}
// Dimension = 2
void SearchNearestInBox( PointType const& ThisPoint, PointerType& ResultPoint, CoordinateType& ResultDistance,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box, bool& Found )
{
Found = false;
for(IndexType I = Box.Axis[1].Begin() ; I <= Box.Axis[1].End() ; I += Box.Axis[1].Block )
SearchNearestInRange()( Box.RowBegin[I], Box.RowEnd[I], ThisPoint, ResultPoint, ResultDistance, Found );
}
// Dimension = 3
void SearchNearestInBox( PointType const& ThisPoint, PointerType& ResultPoint, CoordinateType& ResultDistance,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box, bool& Found )
{
Found = false;
for(IndexType II = Box.Axis[2].Begin() ; II <= Box.Axis[2].End() ; II += Box.Axis[2].Block )
for(IndexType I = II + Box.Axis[1].Begin() ; I <= II + Box.Axis[1].End() ; I += Box.Axis[1].Block )
SearchNearestInRange()( Box.RowBegin[I], Box.RowEnd[I], ThisPoint, ResultPoint, ResultDistance, Found );
}
//************************************************************************
//************************************************************************
// Dimension = 1
void SearchNearestInBoxInner( PointerType& ThisPoint, PointerType& ResultPoint, CoordinateType& ResultDistance,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box, bool& Found )
{
Found = false;
SearchNearestInnerInRange( *(Box.RowBegin), *(Box.RowEnd), ThisPoint, ResultPoint, ResultDistance, Found );
}
// Dimension = 2
void SearchNearestInBoxInner( PointerType& ThisPoint, PointerType& ResultPoint, CoordinateType& ResultDistance,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box, bool& Found )
{
Found = false;
for(IndexType I = Box.Axis[1].Begin() ; I <= Box.Axis[1].End() ; I += Box.Axis[1].Block )
SearchNearestInnerInRange( Box.RowBegin[I], Box.RowEnd[I], ThisPoint, ResultPoint, ResultDistance, Found );
}
// Dimension = 3
void SearchNearestInBoxInner( PointerType& ThisPoint, PointerType& ResultPoint, CoordinateType& ResultDistance,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box, bool& Found )
{
Found = false;
for(IndexType II = Box.Axis[2].Begin() ; II <= Box.Axis[2].End() ; II += Box.Axis[2].Block )
for(IndexType I = II + Box.Axis[1].Begin() ; I <= II + Box.Axis[1].End() ; I += Box.Axis[1].Block )
SearchNearestInnerInRange( Box.RowBegin[I], Box.RowEnd[I], ThisPoint, ResultPoint, ResultDistance, Found );
}
//************************************************************************
//************************************************************************
void SearchNearestInnerInRange( const IteratorType& RangeBegin, const IteratorType& RangeEnd, PointerType& ThisPoint,
PointerType& Result, CoordinateType& Distance, bool& Found )
{
CoordinateType NewDistance;
for(IteratorType Point = RangeBegin ; Point != RangeEnd ; Point++)
{
NewDistance = TDistanceFunction()(**Point,*ThisPoint);
if( NewDistance < Distance && *Point != ThisPoint)
{
Result = *Point;
Distance = NewDistance;
Found = true;
}
}
}
//************************************************************************
//************************************************************************
SizeType SearchInBox( PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType Results,
SizeType MaxNumberOfResults )
{
SizeType NumberOfResults = 0;
SearchStructureType Box( CalculateCell(SearchMinPoint), CalculateCell(SearchMaxPoint), mN, mIndexCellBegin );
SearchInBoxLocal( SearchMinPoint, SearchMaxPoint, Results, NumberOfResults, MaxNumberOfResults, Box );
return NumberOfResults;
}
//************************************************************************
void SearchInBox(PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType& Results, SizeType& NumberOfResults,
SizeType const& MaxNumberOfResults ) override
{
NumberOfResults = 0;
SearchStructureType Box( CalculateCell(SearchMinPoint), CalculateCell(SearchMaxPoint), mN, mIndexCellBegin );
SearchInBoxLocal( SearchMinPoint, SearchMaxPoint, Results, NumberOfResults, MaxNumberOfResults, Box );
}
//************************************************************************
// Dimension = 1
void SearchInBoxLocal( PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType& ResultsPoint,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,1>& Box )
{
SearchBoxInRange()(SearchMinPoint,SearchMaxPoint,*(Box.RowBegin),*(Box.RowEnd),ResultsPoint,NumberOfResults,MaxNumberOfResults);
}
// Dimension = 2
void SearchInBoxLocal( PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType& ResultsPoint,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,2>& Box )
{
for(IndexType I = Box.Axis[1].Begin() ; I <= Box.Axis[1].End() ; I += Box.Axis[1].Block )
SearchBoxInRange()(SearchMinPoint,SearchMaxPoint,Box.RowBegin[I],Box.RowEnd[I],ResultsPoint,NumberOfResults,MaxNumberOfResults);
}
// Dimension = 3
void SearchInBoxLocal( PointType const& SearchMinPoint, PointType const& SearchMaxPoint, IteratorType& ResultsPoint,
SizeType& NumberOfResults, SizeType const& MaxNumberOfResults,
SearchStructure<IndexType,SizeType,CoordinateType,IteratorType,IteratorIteratorType,3>& Box )
{
for(IndexType II = Box.Axis[2].Begin() ; II <= Box.Axis[2].End() ; II += Box.Axis[2].Block )
for(IndexType I = II + Box.Axis[1].Begin() ; I <= II + Box.Axis[1].End() ; I += Box.Axis[1].Block )
SearchBoxInRange()(SearchMinPoint,SearchMaxPoint,Box.RowBegin[I],Box.RowEnd[I],ResultsPoint,NumberOfResults,MaxNumberOfResults);
}
//************************************************************************
//************************************************************************
/// Turn back information as a string.
virtual std::string Info() const
{
return "BinsContainer";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
rOStream << "BinsContainer";
}
/// Print object's data.
void PrintData(std::ostream& rOStream, std::string const& Perfix = std::string()) const override
{
rOStream << Perfix << "Bin[" << SearchUtils::PointerDistance(mPointBegin, mPointEnd) << "] : " << std::endl;
for(IteratorConstIterator i_cell = mIndexCell.begin() ; i_cell != mIndexCell.end()-1 ; i_cell++)
{
rOStream << Perfix << "[ " ;
for(IteratorType i_point = *i_cell ; i_point != *(i_cell+1) ; i_point++)
rOStream << **i_point << " ";
rOStream << "]" << std::endl;
}
rOStream << std::endl;
}
/// Print Size of Container
void PrintSize( std::ostream& rout )
{
rout << " BinsSize: ";
for(SizeType i = 0 ; i < TDimension ; i++)
rout << "[" << mN[i] << "]";
rout << std::endl;
}
/// Print Limits Points of the Container
void PrintBox( std::ostream& rout )
{
rout << " BinsBox: Min [";
mMinPoint.Print(rout);
rout << "]; Max [";
mMaxPoint.Print(rout);
rout << "]; Size [";
mCellSize.Print(rout);
rout << "]" << std::endl;
}
/// Assignment operator.
Bins& operator=(Bins const& rOther);
/// Copy constructor.
Bins(Bins const& rOther);
private:
// Point Access Iterators (vector reordered!!)
IteratorType mPointBegin;
IteratorType mPointEnd;
// Bin Parameters (Sizes,BoundingBox,...)
PointType mMinPoint;
PointType mMaxPoint;
CoordinateArray mCellSize;
CoordinateArray mInvCellSize;
SizeArray mN;
// Bins Access Vector ( vector<Iterator> )
IteratorVector mIndexCell;
IteratorIterator mIndexCellBegin;
IteratorIterator mIndexCellEnd;
// Work Variables ( For non-copy of Search Variables )
//SearchStructureType mBox;
public:
//TODO: check -- changed to avoid copy construction
// static TreeNodeType* Construct(IteratorType PointsBegin, IteratorType PointsEnd, PointType MaxPoint, PointType MinPoint, SizeType BucketSize)
static TreeNodeType* Construct(IteratorType PointsBegin, IteratorType PointsEnd, const PointType& MaxPoint, const PointType& MinPoint, SizeType BucketSize)
{
SizeType number_of_points = SearchUtils::PointerDistance(PointsBegin,PointsEnd);
if (number_of_points == 0)
return NULL;
else
{
return new Bins( PointsBegin, PointsEnd, MinPoint, MaxPoint, BucketSize );
}
}
};
template< std::size_t TDimension, class TPointType, class TContainerType, class TPointerType,
class TIteratorType, class TDistanceIteratorType, class TDistanceFunction >
std::ostream & operator<<( std::ostream& rOStream, Bins<TDimension,TPointType,TContainerType,TPointerType,TIteratorType,TDistanceIteratorType,TDistanceFunction>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintSize(rOStream);
rThis.PrintData(rOStream);
return rOStream;
}
}
#endif // KRATOS_BINS_CONTAINER_H_INCLUDE
|
ch_common.h | #ifndef _BENCH_CHOLESKY_COMMON_
#define _BENCH_CHOLESKY_COMMON_
#include <mkl.h>
#include <mpi.h>
#include <omp.h>
//#define DEBUG
#ifdef _USE_HBW
#include <hbwmalloc.h>
#endif
void dgemm_ (const char *transa, const char *transb, int *l, int *n, int *m, double *alpha,
const void *a, int *lda, void *b, int *ldb, double *beta, void *c, int *ldc);
void dtrsm_ (char *side, char *uplo, char *transa, char *diag, int *m, int *n, double *alpha,
double *a, int *lda, double *b, int *ldb);
void dsyrk_ (char *uplo, char *trans, int *n, int *k, double *alpha, double *a, int *lda,
double *beta, double *c, int *ldc);
void cholesky_single(const int ts, const int nt, double* A[nt][nt]);
void cholesky_mpi(const int ts, const int nt, double *A[nt][nt], double *B, double *C[nt], int *block_rank);
void omp_potrf(double * const A, int ts, int ld);
void omp_trsm(double *A, double *B, int ts, int ld);
void omp_gemm(double *A, double *B, double *C, int ts, int ld);
void omp_syrk(double *A, double *B, int ts, int ld);
inline static void waitall(MPI_Request *comm_req, int n)
{
while (1) {
int flag = 0;
MPI_Testall(n, comm_req, &flag, MPI_STATUSES_IGNORE);
if (flag) break;
(void)flag; // <-- make the Cray compiler happy
#pragma omp taskyield
}
}
void reset_send_flags(char *send_flags);
#ifdef MAIN
int np;
int mype;
int num_threads;
#else
extern int np;
extern int mype;
extern int num_threads;
#endif
#endif
|
convolution_1x1_pack4to1_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack4to1_bf16s_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch)
{
// interleave
// src = inch-outch
// dst = 4a-inch/4a-outch
#if __aarch64__
kernel_tm_pack4.create(8, inch/4, outch/8 + (outch%8)/4 + outch%4, (size_t)2u*4, 4);
#else
kernel_tm_pack4.create(4, inch/4, outch/4 + outch%4, (size_t)2u*4, 4);
#endif
int p=0;
#if __aarch64__
for (; p+7<outch; p+=8)
{
const float* k0 = (const float*)kernel + (p+0)*inch;
const float* k1 = (const float*)kernel + (p+1)*inch;
const float* k2 = (const float*)kernel + (p+2)*inch;
const float* k3 = (const float*)kernel + (p+3)*inch;
const float* k4 = (const float*)kernel + (p+4)*inch;
const float* k5 = (const float*)kernel + (p+5)*inch;
const float* k6 = (const float*)kernel + (p+6)*inch;
const float* k7 = (const float*)kernel + (p+7)*inch;
unsigned short* ktmp = kernel_tm_pack4.channel(p/8);
for (int q=0; q+3<inch; q+=4)
{
ktmp[0] = float32_to_bfloat16(k0[0]);
ktmp[1] = float32_to_bfloat16(k1[0]);
ktmp[2] = float32_to_bfloat16(k2[0]);
ktmp[3] = float32_to_bfloat16(k3[0]);
ktmp[4] = float32_to_bfloat16(k4[0]);
ktmp[5] = float32_to_bfloat16(k5[0]);
ktmp[6] = float32_to_bfloat16(k6[0]);
ktmp[7] = float32_to_bfloat16(k7[0]);
ktmp[8] = float32_to_bfloat16(k0[1]);
ktmp[9] = float32_to_bfloat16(k1[1]);
ktmp[10] = float32_to_bfloat16(k2[1]);
ktmp[11] = float32_to_bfloat16(k3[1]);
ktmp[12] = float32_to_bfloat16(k4[1]);
ktmp[13] = float32_to_bfloat16(k5[1]);
ktmp[14] = float32_to_bfloat16(k6[1]);
ktmp[15] = float32_to_bfloat16(k7[1]);
ktmp[16] = float32_to_bfloat16(k0[2]);
ktmp[17] = float32_to_bfloat16(k1[2]);
ktmp[18] = float32_to_bfloat16(k2[2]);
ktmp[19] = float32_to_bfloat16(k3[2]);
ktmp[20] = float32_to_bfloat16(k4[2]);
ktmp[21] = float32_to_bfloat16(k5[2]);
ktmp[22] = float32_to_bfloat16(k6[2]);
ktmp[23] = float32_to_bfloat16(k7[2]);
ktmp[24] = float32_to_bfloat16(k0[3]);
ktmp[25] = float32_to_bfloat16(k1[3]);
ktmp[26] = float32_to_bfloat16(k2[3]);
ktmp[27] = float32_to_bfloat16(k3[3]);
ktmp[28] = float32_to_bfloat16(k4[3]);
ktmp[29] = float32_to_bfloat16(k5[3]);
ktmp[30] = float32_to_bfloat16(k6[3]);
ktmp[31] = float32_to_bfloat16(k7[3]);
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
k4 += 4;
k5 += 4;
k6 += 4;
k7 += 4;
ktmp += 32;
}
}
#endif
for (; p+3<outch; p+=4)
{
const float* k0 = (const float*)kernel + (p+0)*inch;
const float* k1 = (const float*)kernel + (p+1)*inch;
const float* k2 = (const float*)kernel + (p+2)*inch;
const float* k3 = (const float*)kernel + (p+3)*inch;
#if __aarch64__
unsigned short* ktmp = kernel_tm_pack4.channel(p/8 + (p%8)/4);
#else
unsigned short* ktmp = kernel_tm_pack4.channel(p/4);
#endif
for (int q=0; q+3<inch; q+=4)
{
ktmp[0] = float32_to_bfloat16(k0[0]);
ktmp[1] = float32_to_bfloat16(k1[0]);
ktmp[2] = float32_to_bfloat16(k2[0]);
ktmp[3] = float32_to_bfloat16(k3[0]);
ktmp[4] = float32_to_bfloat16(k0[1]);
ktmp[5] = float32_to_bfloat16(k1[1]);
ktmp[6] = float32_to_bfloat16(k2[1]);
ktmp[7] = float32_to_bfloat16(k3[1]);
ktmp[8] = float32_to_bfloat16(k0[2]);
ktmp[9] = float32_to_bfloat16(k1[2]);
ktmp[10] = float32_to_bfloat16(k2[2]);
ktmp[11] = float32_to_bfloat16(k3[2]);
ktmp[12] = float32_to_bfloat16(k0[3]);
ktmp[13] = float32_to_bfloat16(k1[3]);
ktmp[14] = float32_to_bfloat16(k2[3]);
ktmp[15] = float32_to_bfloat16(k3[3]);
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
ktmp += 16;
}
}
for (; p<outch; p++)
{
const float* k0 = (const float*)kernel + p*inch;
#if __aarch64__
float* ktmp = kernel_tm_pack4.channel(p/8 + (p%8)/4 + p%4);
#else
float* ktmp = kernel_tm_pack4.channel(p/4 + p%4);
#endif
for (int q=0; q+3<inch; q+=4)
{
ktmp[0] = float32_to_bfloat16(k0[0]);
ktmp[1] = float32_to_bfloat16(k0[1]);
ktmp[2] = float32_to_bfloat16(k0[2]);
ktmp[3] = float32_to_bfloat16(k0[3]);
k0 += 4;
ktmp += 4;
}
}
}
static void conv1x1s1_sgemm_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp;
#if __aarch64__
if (size >= 12)
tmp.create(12, inch, size/12 + (size%12)/8 + (size%12%8)/4 + size%12%4, elemsize, elempack, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8, inch, size/8 + (size%8)/4 + size%4, elemsize, elempack, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4, inch, size/4 + size%4, elemsize, elempack, opt.workspace_allocator);
else // if (size >= 1)
tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator);
#else
if (size >= 8)
tmp.create(8, inch, size/8 + (size%8)/4 + size%4, elemsize, elempack, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4, inch, size/4 + size%4, elemsize, elempack, opt.workspace_allocator);
else // if (size >= 1)
tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator);
#endif
{
int nn_size;
int remain_size_start;
#if __aarch64__
nn_size = size / 12;
remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 12;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i*4;
unsigned short* tmpptr = tmp.channel(i/12);
for (int q=0; q<inch; q++)
{
// transpose 4x12
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.4h, v5.4h, v6.4h, v7.4h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
"st1 {v4.4h}, [%1], #8 \n"
"st1 {v1.8h}, [%1], #16 \n"
"st1 {v5.4h}, [%1], #8 \n"
"sub %0, %0, #64 \n"
"st1 {v2.8h}, [%1], #16 \n"
"st1 {v6.4h}, [%1], #8 \n"
"st1 {v3.8h}, [%1], #16 \n"
"st1 {v7.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"
);
img0 += bottom_blob.cstep * 4;
}
}
#else
remain_size_start = 0;
#endif
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i*4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12+(i%12)/8);
#else
unsigned short* tmpptr = tmp.channel(i/8);
#endif
for (int q=0; q<inch; q++)
{
// transpose 4x8
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3"
);
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.u16 {d0-d3}, [%0]! \n"
"pld [%0, #256] \n"
"vld4.u16 {d4-d7}, [%0] \n"
"sub %0, %0, #32 \n"
"vst1.u16 {d0}, [%1 :64]! \n"
"vst1.u16 {d4}, [%1 :64]! \n"
"vst1.u16 {d1}, [%1 :64]! \n"
"vst1.u16 {d5}, [%1 :64]! \n"
"vst1.u16 {d2}, [%1 :64]! \n"
"vst1.u16 {d6}, [%1 :64]! \n"
"vst1.u16 {d3}, [%1 :64]! \n"
"vst1.u16 {d7}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3"
);
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i*4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4);
#else
unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4);
#endif
for (int q=0; q<inch; q++)
{
// transpose 4x4
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld4 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0] \n"
"st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1"
);
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.u16 {d0-d3}, [%0 :128] \n"
"vst1.u16 {d0-d3}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1"
);
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i*4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + i%12%4);
#else
unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
#endif
for (int q=0; q<inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.4h}, [%0] \n"
"st1 {v0.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0"
);
#else
asm volatile(
"pld [%0, #64] \n"
"vld1.u16 {d0}, [%0 :64] \n"
"vst1.u16 {d0}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0"
);
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
#if __aarch64__
nn_outch = outch >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
unsigned short* outptr0 = top_blob.channel(p);
unsigned short* outptr1 = top_blob.channel(p+1);
unsigned short* outptr2 = top_blob.channel(p+2);
unsigned short* outptr3 = top_blob.channel(p+3);
unsigned short* outptr4 = top_blob.channel(p+4);
unsigned short* outptr5 = top_blob.channel(p+5);
unsigned short* outptr6 = top_blob.channel(p+6);
unsigned short* outptr7 = top_blob.channel(p+7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i=0;
for (; i+11<size; i+=12)
{
unsigned short* tmpptr = tmp.channel(i/12);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v30.4s, v31.4s}, [%22] \n"
"dup v8.4s, v30.s[0] \n"
"dup v9.4s, v30.s[0] \n"
"dup v10.4s, v30.s[0] \n"
"dup v11.4s, v30.s[1] \n"
"dup v12.4s, v30.s[1] \n"
"dup v13.4s, v30.s[1] \n"
"dup v14.4s, v30.s[2] \n"
"dup v15.4s, v30.s[2] \n"
"dup v16.4s, v30.s[2] \n"
"dup v17.4s, v30.s[3] \n"
"dup v18.4s, v30.s[3] \n"
"dup v19.4s, v30.s[3] \n"
"dup v20.4s, v31.s[0] \n"
"dup v21.4s, v31.s[0] \n"
"dup v22.4s, v31.s[0] \n"
"dup v23.4s, v31.s[1] \n"
"dup v24.4s, v31.s[1] \n"
"dup v25.4s, v31.s[1] \n"
"dup v26.4s, v31.s[2] \n"
"dup v27.4s, v31.s[2] \n"
"dup v28.4s, v31.s[2] \n"
"dup v29.4s, v31.s[3] \n"
"dup v30.4s, v31.s[3] \n"
"dup v31.4s, v31.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v23.4s, v0.4s, v5.s[1] \n"
"fmla v26.4s, v0.4s, v5.s[2] \n"
"fmla v29.4s, v0.4s, v5.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v21.4s, v1.4s, v5.s[0] \n"
"fmla v24.4s, v1.4s, v5.s[1] \n"
"fmla v27.4s, v1.4s, v5.s[2] \n"
"fmla v30.4s, v1.4s, v5.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"fmla v22.4s, v2.4s, v5.s[0] \n"
"fmla v25.4s, v2.4s, v5.s[1] \n"
"fmla v28.4s, v2.4s, v5.s[2] \n"
"fmla v31.4s, v2.4s, v5.s[3] \n"
"fmla v8.4s, v3.4s, v6.s[0] \n"
"fmla v11.4s, v3.4s, v6.s[1] \n"
"fmla v14.4s, v3.4s, v6.s[2] \n"
"fmla v17.4s, v3.4s, v6.s[3] \n"
"fmla v20.4s, v3.4s, v7.s[0] \n"
"fmla v23.4s, v3.4s, v7.s[1] \n"
"fmla v26.4s, v3.4s, v7.s[2] \n"
"fmla v29.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v9.4s, v0.4s, v6.s[0] \n"
"fmla v12.4s, v0.4s, v6.s[1] \n"
"fmla v15.4s, v0.4s, v6.s[2] \n"
"fmla v18.4s, v0.4s, v6.s[3] \n"
"fmla v21.4s, v0.4s, v7.s[0] \n"
"fmla v24.4s, v0.4s, v7.s[1] \n"
"fmla v27.4s, v0.4s, v7.s[2] \n"
"fmla v30.4s, v0.4s, v7.s[3] \n"
"fmla v10.4s, v1.4s, v6.s[0] \n"
"fmla v13.4s, v1.4s, v6.s[1] \n"
"fmla v16.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v22.4s, v1.4s, v7.s[0] \n"
"fmla v25.4s, v1.4s, v7.s[1] \n"
"fmla v28.4s, v1.4s, v7.s[2] \n"
"fmla v31.4s, v1.4s, v7.s[3] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v2.4s, v4.s[0] \n"
"fmla v11.4s, v2.4s, v4.s[1] \n"
"fmla v14.4s, v2.4s, v4.s[2] \n"
"fmla v17.4s, v2.4s, v4.s[3] \n"
"fmla v20.4s, v2.4s, v5.s[0] \n"
"fmla v23.4s, v2.4s, v5.s[1] \n"
"fmla v26.4s, v2.4s, v5.s[2] \n"
"fmla v29.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v4.s[0] \n"
"fmla v12.4s, v3.4s, v4.s[1] \n"
"fmla v15.4s, v3.4s, v4.s[2] \n"
"fmla v18.4s, v3.4s, v4.s[3] \n"
"fmla v21.4s, v3.4s, v5.s[0] \n"
"fmla v24.4s, v3.4s, v5.s[1] \n"
"fmla v27.4s, v3.4s, v5.s[2] \n"
"fmla v30.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v10.4s, v0.4s, v4.s[0] \n"
"fmla v13.4s, v0.4s, v4.s[1] \n"
"fmla v16.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v22.4s, v0.4s, v5.s[0] \n"
"fmla v25.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v31.4s, v0.4s, v5.s[3] \n"
"fmla v8.4s, v1.4s, v6.s[0] \n"
"fmla v11.4s, v1.4s, v6.s[1] \n"
"fmla v14.4s, v1.4s, v6.s[2] \n"
"fmla v17.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v23.4s, v1.4s, v7.s[1] \n"
"fmla v26.4s, v1.4s, v7.s[2] \n"
"fmla v29.4s, v1.4s, v7.s[3] \n"
"fmla v9.4s, v2.4s, v6.s[0] \n"
"fmla v12.4s, v2.4s, v6.s[1] \n"
"fmla v15.4s, v2.4s, v6.s[2] \n"
"fmla v18.4s, v2.4s, v6.s[3] \n"
"fmla v21.4s, v2.4s, v7.s[0] \n"
"fmla v24.4s, v2.4s, v7.s[1] \n"
"fmla v27.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v3.4s, v6.s[0] \n"
"fmla v13.4s, v3.4s, v6.s[1] \n"
"fmla v16.4s, v3.4s, v6.s[2] \n"
"fmla v19.4s, v3.4s, v6.s[3] \n"
"fmla v22.4s, v3.4s, v7.s[0] \n"
"fmla v25.4s, v3.4s, v7.s[1] \n"
"fmla v28.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n"
"st1 {v11.4h, v12.4h, v13.4h}, [%2], #24 \n"
"st1 {v14.4h, v15.4h, v16.4h}, [%3], #24 \n"
"st1 {v17.4h, v18.4h, v19.4h}, [%4], #24 \n"
"st1 {v20.4h, v21.4h, v22.4h}, [%5], #24 \n"
"st1 {v23.4h, v24.4h, v25.4h}, [%6], #24 \n"
"st1 {v26.4h, v27.4h, v28.4h}, [%7], #24 \n"
"st1 {v29.4h, v30.4h, v31.4h}, [%8], #24 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+7<size; i+=8)
{
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v30.4s, v31.4s}, [%22] \n"
"dup v16.4s, v30.s[0] \n"
"dup v17.4s, v30.s[0] \n"
"dup v18.4s, v30.s[1] \n"
"dup v19.4s, v30.s[1] \n"
"dup v20.4s, v30.s[2] \n"
"dup v21.4s, v30.s[2] \n"
"dup v22.4s, v30.s[3] \n"
"dup v23.4s, v30.s[3] \n"
"dup v24.4s, v31.s[0] \n"
"dup v25.4s, v31.s[0] \n"
"dup v26.4s, v31.s[1] \n"
"dup v27.4s, v31.s[1] \n"
"dup v28.4s, v31.s[2] \n"
"dup v29.4s, v31.s[2] \n"
"dup v30.4s, v31.s[3] \n"
"dup v31.4s, v31.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v18.4s, v0.4s, v4.s[1] \n"
"fmla v20.4s, v0.4s, v4.s[2] \n"
"fmla v22.4s, v0.4s, v4.s[3] \n"
"fmla v24.4s, v0.4s, v5.s[0] \n"
"fmla v26.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v30.4s, v0.4s, v5.s[3] \n"
"fmla v17.4s, v1.4s, v4.s[0] \n"
"fmla v19.4s, v1.4s, v4.s[1] \n"
"fmla v21.4s, v1.4s, v4.s[2] \n"
"fmla v23.4s, v1.4s, v4.s[3] \n"
"fmla v25.4s, v1.4s, v5.s[0] \n"
"fmla v27.4s, v1.4s, v5.s[1] \n"
"fmla v29.4s, v1.4s, v5.s[2] \n"
"fmla v31.4s, v1.4s, v5.s[3] \n"
"fmla v16.4s, v2.4s, v6.s[0] \n"
"fmla v18.4s, v2.4s, v6.s[1] \n"
"fmla v20.4s, v2.4s, v6.s[2] \n"
"fmla v22.4s, v2.4s, v6.s[3] \n"
"fmla v24.4s, v2.4s, v7.s[0] \n"
"fmla v26.4s, v2.4s, v7.s[1] \n"
"fmla v28.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"fmla v17.4s, v3.4s, v6.s[0] \n"
"fmla v19.4s, v3.4s, v6.s[1] \n"
"fmla v21.4s, v3.4s, v6.s[2] \n"
"fmla v23.4s, v3.4s, v6.s[3] \n"
"fmla v25.4s, v3.4s, v7.s[0] \n"
"fmla v27.4s, v3.4s, v7.s[1] \n"
"fmla v29.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v12.4s, v8.s[0] \n"
"fmla v18.4s, v12.4s, v8.s[1] \n"
"fmla v20.4s, v12.4s, v8.s[2] \n"
"fmla v22.4s, v12.4s, v8.s[3] \n"
"fmla v24.4s, v12.4s, v9.s[0] \n"
"fmla v26.4s, v12.4s, v9.s[1] \n"
"fmla v28.4s, v12.4s, v9.s[2] \n"
"fmla v30.4s, v12.4s, v9.s[3] \n"
"fmla v17.4s, v13.4s, v8.s[0] \n"
"fmla v19.4s, v13.4s, v8.s[1] \n"
"fmla v21.4s, v13.4s, v8.s[2] \n"
"fmla v23.4s, v13.4s, v8.s[3] \n"
"fmla v25.4s, v13.4s, v9.s[0] \n"
"fmla v27.4s, v13.4s, v9.s[1] \n"
"fmla v29.4s, v13.4s, v9.s[2] \n"
"fmla v31.4s, v13.4s, v9.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v10.s[0] \n"
"fmla v18.4s, v14.4s, v10.s[1] \n"
"fmla v20.4s, v14.4s, v10.s[2] \n"
"fmla v22.4s, v14.4s, v10.s[3] \n"
"fmla v24.4s, v14.4s, v11.s[0] \n"
"fmla v26.4s, v14.4s, v11.s[1] \n"
"fmla v28.4s, v14.4s, v11.s[2] \n"
"fmla v30.4s, v14.4s, v11.s[3] \n"
"fmla v17.4s, v15.4s, v10.s[0] \n"
"fmla v19.4s, v15.4s, v10.s[1] \n"
"fmla v21.4s, v15.4s, v10.s[2] \n"
"fmla v23.4s, v15.4s, v10.s[3] \n"
"fmla v25.4s, v15.4s, v11.s[0] \n"
"fmla v27.4s, v15.4s, v11.s[1] \n"
"fmla v29.4s, v15.4s, v11.s[2] \n"
"fmla v31.4s, v15.4s, v11.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v16.4h, v17.4h}, [%1], #16 \n"
"st1 {v18.4h, v19.4h}, [%2], #16 \n"
"st1 {v20.4h, v21.4h}, [%3], #16 \n"
"st1 {v22.4h, v23.4h}, [%4], #16 \n"
"st1 {v24.4h, v25.4h}, [%5], #16 \n"
"st1 {v26.4h, v27.4h}, [%6], #16 \n"
"st1 {v28.4h, v29.4h}, [%7], #16 \n"
"st1 {v30.4h, v31.4h}, [%8], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+3<size; i+=4)
{
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v22.4s, v23.4s}, [%22] \n"
"dup v16.4s, v22.s[0] \n"
"dup v17.4s, v22.s[1] \n"
"dup v18.4s, v22.s[2] \n"
"dup v19.4s, v22.s[3] \n"
"dup v20.4s, v23.s[0] \n"
"dup v21.4s, v23.s[1] \n"
"dup v22.4s, v23.s[2] \n"
"dup v23.4s, v23.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v17.4s, v0.4s, v4.s[1] \n"
"fmla v18.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v21.4s, v0.4s, v5.s[1] \n"
"fmla v22.4s, v0.4s, v5.s[2] \n"
"fmla v23.4s, v0.4s, v5.s[3] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v1.4s, v6.s[0] \n"
"fmla v17.4s, v1.4s, v6.s[1] \n"
"fmla v18.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v21.4s, v1.4s, v7.s[1] \n"
"fmla v22.4s, v1.4s, v7.s[2] \n"
"fmla v23.4s, v1.4s, v7.s[3] \n"
"fmla v16.4s, v2.4s, v8.s[0] \n"
"fmla v17.4s, v2.4s, v8.s[1] \n"
"fmla v18.4s, v2.4s, v8.s[2] \n"
"fmla v19.4s, v2.4s, v8.s[3] \n"
"fmla v20.4s, v2.4s, v9.s[0] \n"
"fmla v21.4s, v2.4s, v9.s[1] \n"
"fmla v22.4s, v2.4s, v9.s[2] \n"
"fmla v23.4s, v2.4s, v9.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v3.4s, v10.s[0] \n"
"fmla v17.4s, v3.4s, v10.s[1] \n"
"fmla v18.4s, v3.4s, v10.s[2] \n"
"fmla v19.4s, v3.4s, v10.s[3] \n"
"fmla v20.4s, v3.4s, v11.s[0] \n"
"fmla v21.4s, v3.4s, v11.s[1] \n"
"fmla v22.4s, v3.4s, v11.s[2] \n"
"fmla v23.4s, v3.4s, v11.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v16.4h}, [%1], #8 \n"
"st1 {v17.4h}, [%2], #8 \n"
"st1 {v18.4h}, [%3], #8 \n"
"st1 {v19.4h}, [%4], #8 \n"
"st1 {v20.4h}, [%5], #8 \n"
"st1 {v21.4h}, [%6], #8 \n"
"st1 {v22.4h}, [%7], #8 \n"
"st1 {v23.4h}, [%8], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
for (; i<size; i++)
{
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + i%12%4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v16.4s, v17.4s}, [%22] \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #64] \n"
"ld1 {v0.4h}, [%9], #8 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v5.4s, v0.s[0] \n"
"fmla v18.4s, v6.4s, v0.s[1] \n"
"fmla v19.4s, v7.4s, v0.s[1] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[2] \n"
"fmla v17.4s, v9.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v18.4s, v10.4s, v0.s[3] \n"
"fmla v19.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"st1 {v16.h}[0], [%1], #2 \n"
"st1 {v16.h}[1], [%2], #2 \n"
"st1 {v16.h}[2], [%3], #2 \n"
"st1 {v16.h}[3], [%4], #2 \n"
"st1 {v17.h}[0], [%5], #2 \n"
"st1 {v17.h}[1], [%6], #2 \n"
"st1 {v17.h}[2], [%7], #2 \n"
"st1 {v17.h}[3], [%8], #2 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"
);
}
}
remain_outch_start += nn_outch << 3;
nn_outch = (outch - remain_outch_start) >> 2;
#else // __aarch64__
nn_outch = outch >> 2;
#endif // __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
unsigned short* outptr0 = top_blob.channel(p);
unsigned short* outptr1 = top_blob.channel(p+1);
unsigned short* outptr2 = top_blob.channel(p+2);
unsigned short* outptr3 = top_blob.channel(p+3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i=0;
#if __aarch64__
for (; i+11<size; i+=12)
{
unsigned short* tmpptr = tmp.channel(i/12);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4);
int nn = inch;// inch always > 0
asm volatile(
"ld1 {v19.4s}, [%14] \n"
"dup v8.4s, v19.s[0] \n"
"dup v9.4s, v19.s[0] \n"
"dup v10.4s, v19.s[0] \n"
"dup v11.4s, v19.s[1] \n"
"dup v12.4s, v19.s[1] \n"
"dup v13.4s, v19.s[1] \n"
"dup v14.4s, v19.s[2] \n"
"dup v15.4s, v19.s[2] \n"
"dup v16.4s, v19.s[2] \n"
"dup v17.4s, v19.s[3] \n"
"dup v18.4s, v19.s[3] \n"
"dup v19.4s, v19.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5], #32 \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v8.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v14.4s, v3.4s, v5.s[2] \n"
"fmla v17.4s, v3.4s, v5.s[3] \n"
"fmla v9.4s, v20.4s, v5.s[0] \n"
"fmla v12.4s, v20.4s, v5.s[1] \n"
"fmla v15.4s, v20.4s, v5.s[2] \n"
"fmla v18.4s, v20.4s, v5.s[3] \n"
"fmla v10.4s, v21.4s, v5.s[0] \n"
"fmla v13.4s, v21.4s, v5.s[1] \n"
"fmla v16.4s, v21.4s, v5.s[2] \n"
"fmla v19.4s, v21.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%5], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v8.4s, v22.4s, v6.s[0] \n"
"fmla v11.4s, v22.4s, v6.s[1] \n"
"fmla v14.4s, v22.4s, v6.s[2] \n"
"fmla v17.4s, v22.4s, v6.s[3] \n"
"fmla v9.4s, v23.4s, v6.s[0] \n"
"fmla v12.4s, v23.4s, v6.s[1] \n"
"fmla v15.4s, v23.4s, v6.s[2] \n"
"fmla v18.4s, v23.4s, v6.s[3] \n"
"fmla v10.4s, v24.4s, v6.s[0] \n"
"fmla v13.4s, v24.4s, v6.s[1] \n"
"fmla v16.4s, v24.4s, v6.s[2] \n"
"fmla v19.4s, v24.4s, v6.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v25.4s, v7.s[0] \n"
"fmla v11.4s, v25.4s, v7.s[1] \n"
"fmla v14.4s, v25.4s, v7.s[2] \n"
"fmla v17.4s, v25.4s, v7.s[3] \n"
"fmla v9.4s, v26.4s, v7.s[0] \n"
"fmla v12.4s, v26.4s, v7.s[1] \n"
"fmla v15.4s, v26.4s, v7.s[2] \n"
"fmla v18.4s, v26.4s, v7.s[3] \n"
"fmla v10.4s, v27.4s, v7.s[0] \n"
"fmla v13.4s, v27.4s, v7.s[1] \n"
"fmla v16.4s, v27.4s, v7.s[2] \n"
"fmla v19.4s, v27.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n"
"st1 {v11.4h, v12.4h, v13.4h}, [%2], #24 \n"
"st1 {v14.4h, v15.4h, v16.4h}, [%3], #24 \n"
"st1 {v17.4h, v18.4h, v19.4h}, [%4], #24 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
}
#endif // __aarch64__
for (; i+7<size; i+=8)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4);
#else
unsigned short* tmpptr = tmp.channel(i/8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4);
#endif
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v15.4s}, [%14] \n"
"dup v8.4s, v15.s[0] \n"
"dup v9.4s, v15.s[0] \n"
"dup v10.4s, v15.s[1] \n"
"dup v11.4s, v15.s[1] \n"
"dup v12.4s, v15.s[2] \n"
"dup v13.4s, v15.s[2] \n"
"dup v14.4s, v15.s[3] \n"
"dup v15.4s, v15.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v10.4s, v0.4s, v4.s[1] \n"
"fmla v12.4s, v0.4s, v4.s[2] \n"
"fmla v14.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v11.4s, v1.4s, v4.s[1] \n"
"fmla v13.4s, v1.4s, v4.s[2] \n"
"fmla v15.4s, v1.4s, v4.s[3] \n"
"fmla v8.4s, v2.4s, v5.s[0] \n"
"fmla v10.4s, v2.4s, v5.s[1] \n"
"fmla v12.4s, v2.4s, v5.s[2] \n"
"fmla v14.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v13.4s, v3.4s, v5.s[2] \n"
"fmla v15.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v8.4s, v16.4s, v6.s[0] \n"
"fmla v10.4s, v16.4s, v6.s[1] \n"
"fmla v12.4s, v16.4s, v6.s[2] \n"
"fmla v14.4s, v16.4s, v6.s[3] \n"
"fmla v9.4s, v17.4s, v6.s[0] \n"
"fmla v11.4s, v17.4s, v6.s[1] \n"
"fmla v13.4s, v17.4s, v6.s[2] \n"
"fmla v15.4s, v17.4s, v6.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v18.4s, v7.s[0] \n"
"fmla v10.4s, v18.4s, v7.s[1] \n"
"fmla v12.4s, v18.4s, v7.s[2] \n"
"fmla v14.4s, v18.4s, v7.s[3] \n"
"fmla v9.4s, v19.4s, v7.s[0] \n"
"fmla v11.4s, v19.4s, v7.s[1] \n"
"fmla v13.4s, v19.4s, v7.s[2] \n"
"fmla v15.4s, v19.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"st1 {v8.4h, v9.4h}, [%1], #16 \n"
"st1 {v10.4h, v11.4h}, [%2], #16 \n"
"st1 {v12.4h, v13.4h}, [%3], #16 \n"
"st1 {v14.4h, v15.4h}, [%4], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
#else // __aarch64__
asm volatile(
"vld1.f32 {d30-d31}, [%14] \n"
"vdup.f32 q8, d30[0] \n"
"vdup.f32 q9, d30[0] \n"
"vdup.f32 q10, d30[1] \n"
"vdup.f32 q11, d30[1] \n"
"vdup.f32 q12, d31[0] \n"
"vdup.f32 q13, d31[0] \n"
"vdup.f32 q14, d31[1] \n"
"vdup.f32 q15, d31[1] \n"
"0: \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5]! \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q10, q0, d8[1] \n"
"vmla.f32 q12, q0, d9[0] \n"
"vmla.f32 q14, q0, d9[1] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q11, q1, d8[1] \n"
"vmla.f32 q13, q1, d9[0] \n"
"vmla.f32 q15, q1, d9[1] \n"
"vmla.f32 q8, q2, d10[0] \n"
"vmla.f32 q10, q2, d10[1] \n"
"vmla.f32 q12, q2, d11[0] \n"
"vmla.f32 q14, q2, d11[1] \n"
"vmla.f32 q9, q3, d10[0] \n"
"vmla.f32 q11, q3, d10[1] \n"
"vmla.f32 q13, q3, d11[0] \n"
"vmla.f32 q15, q3, d11[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q8, q0, d12[0] \n"
"vmla.f32 q10, q0, d12[1] \n"
"vmla.f32 q12, q0, d13[0] \n"
"vmla.f32 q14, q0, d13[1] \n"
"vmla.f32 q9, q1, d12[0] \n"
"vmla.f32 q11, q1, d12[1] \n"
"vmla.f32 q13, q1, d13[0] \n"
"vmla.f32 q15, q1, d13[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d14[0] \n"
"vmla.f32 q10, q2, d14[1] \n"
"vmla.f32 q12, q2, d15[0] \n"
"vmla.f32 q14, q2, d15[1] \n"
"vmla.f32 q9, q3, d14[0] \n"
"vmla.f32 q11, q3, d14[1] \n"
"vmla.f32 q13, q3, d15[0] \n"
"vmla.f32 q15, q3, d15[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d21, q11, #16 \n"
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vshrn.u32 d28, q14, #16 \n"
"vshrn.u32 d29, q15, #16 \n"
"vst1.u16 {d16-d17}, [%1 :64]! \n"
"vst1.u16 {d20-d21}, [%2 :64]! \n"
"vst1.u16 {d24-d25}, [%3 :64]! \n"
"vst1.u16 {d28-d29}, [%4 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; i+3<size; i+=4)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4);
#else
unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4);
#endif
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v11.4s}, [%14] \n"
"dup v8.4s, v11.s[0] \n"
"dup v9.4s, v11.s[1] \n"
"dup v10.4s, v11.s[2] \n"
"dup v11.4s, v11.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v0.4s, v4.s[1] \n"
"fmla v10.4s, v0.4s, v4.s[2] \n"
"fmla v11.4s, v0.4s, v4.s[3] \n"
"fmla v8.4s, v1.4s, v5.s[0] \n"
"fmla v9.4s, v1.4s, v5.s[1] \n"
"fmla v10.4s, v1.4s, v5.s[2] \n"
"fmla v11.4s, v1.4s, v5.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v2.4s, v6.s[0] \n"
"fmla v9.4s, v2.4s, v6.s[1] \n"
"fmla v10.4s, v2.4s, v6.s[2] \n"
"fmla v11.4s, v2.4s, v6.s[3] \n"
"fmla v8.4s, v3.4s, v7.s[0] \n"
"fmla v9.4s, v3.4s, v7.s[1] \n"
"fmla v10.4s, v3.4s, v7.s[2] \n"
"fmla v11.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"st1 {v8.4h}, [%1], #8 \n"
"st1 {v9.4h}, [%2], #8 \n"
"st1 {v10.4h}, [%3], #8 \n"
"st1 {v11.4h}, [%4], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
#else // __aarch64__
asm volatile(
"vld1.f32 {d22-d23}, [%14] \n"
"vdup.f32 q8, d22[0] \n"
"vdup.f32 q9, d22[1] \n"
"vdup.f32 q10, d23[0] \n"
"vdup.f32 q11, d23[1] \n"
"0: \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5]! \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q0, d8[1] \n"
"vmla.f32 q10, q0, d9[0] \n"
"vmla.f32 q11, q0, d9[1] \n"
"vmla.f32 q8, q1, d10[0] \n"
"vmla.f32 q9, q1, d10[1] \n"
"vmla.f32 q10, q1, d11[0] \n"
"vmla.f32 q11, q1, d11[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d12[0] \n"
"vmla.f32 q9, q2, d12[1] \n"
"vmla.f32 q10, q2, d13[0] \n"
"vmla.f32 q11, q2, d13[1] \n"
"vmla.f32 q8, q3, d14[0] \n"
"vmla.f32 q9, q3, d14[1] \n"
"vmla.f32 q10, q3, d15[0] \n"
"vmla.f32 q11, q3, d15[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d18, q9, #16 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d22, q11, #16 \n"
"vst1.u16 {d16}, [%1 :64]! \n"
"vst1.u16 {d18}, [%2 :64]! \n"
"vst1.u16 {d20}, [%3 :64]! \n"
"vst1.u16 {d22}, [%4 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif // __aarch64__
}
for (; i<size; i++)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + i%12%4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4);
#else
unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4);
#endif
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v8.4s}, [%14] \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v6.4s, v0.s[2] \n"
"fmla v11.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"st1 {v8.h}[0], [%1], #2 \n"
"st1 {v8.h}[1], [%2], #2 \n"
"st1 {v8.h}[2], [%3], #2 \n"
"st1 {v8.h}[3], [%4], #2 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
#else // __aarch64__
asm volatile(
"vld1.f32 {d16-d17}, [%14] \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%5, #64] \n"
"vld1.u16 {d1}, [%5]! \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6]! \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q7, d1[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vshrn.u32 d16, q8, #16 \n"
"vst1.u16 {d16[0]}, [%1]! \n"
"vst1.u16 {d16[1]}, [%2]! \n"
"vst1.u16 {d16[2]}, [%3]! \n"
"vst1.u16 {d16[3]}, [%4]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif // __aarch64__
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
unsigned short* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i=0;
#if __aarch64__
for (; i+11<size; i+=12)
{
unsigned short* tmpptr = tmp.channel(i/12);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4 + p%4);
int nn = inch;// inch always > 0
asm volatile(
"dup v8.4s, %w8 \n"
"dup v9.4s, %w8 \n"
"dup v10.4s, %w8 \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%2], #32 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v5.4s, v3.4s, v4.s[1] \n"
"fmla v6.4s, v12.4s, v4.s[1] \n"
"fmla v7.4s, v13.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v8.4s, v14.4s, v4.s[2] \n"
"fmla v9.4s, v15.4s, v4.s[2] \n"
"fmla v10.4s, v16.4s, v4.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v5.4s, v17.4s, v4.s[3] \n"
"fmla v6.4s, v18.4s, v4.s[3] \n"
"fmla v7.4s, v19.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v5.4s \n"
"fadd v9.4s, v9.4s, v6.4s \n"
"fadd v10.4s, v10.4s, v7.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
}
#endif // __aarch64__
for (; i+7<size; i+=8)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4 + p%4);
#else
unsigned short* tmpptr = tmp.channel(i/8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4 + p%4);
#endif
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"dup v8.4s, %w8 \n"
"dup v9.4s, %w8 \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[1] \n"
"fmla v11.4s, v3.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%2], #32 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v8.4s, v12.4s, v4.s[2] \n"
"fmla v9.4s, v13.4s, v4.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v14.4s, v4.s[3] \n"
"fmla v11.4s, v15.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v9.4s, v9.4s, v11.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"st1 {v8.4h, v9.4h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %8 \n"
"vdup.f32 q9, %8 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"pld [%3, #64] \n"
"vld1.u16 {d9}, [%3]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q10, q2, d8[1] \n"
"vmla.f32 q11, q3, d8[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d28-d31}, [%2]! \n"
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q8, q12, d9[0] \n"
"vmla.f32 q9, q13, d9[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q10, q14, d9[1] \n"
"vmla.f32 q11, q15, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q9, q9, q11 \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vst1.u16 {d16-d17}, [%1 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; i+3<size; i+=4)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4 + p%4);
#else
unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4 + p%4);
#endif
int nn = inch;// inch always > 0
#if __aarch64__
asm volatile(
"dup v8.4s, %w8 \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"st1 {v8.4h}, [%1], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11"
);
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"pld [%3, #64] \n"
"vld1.u16 {d9}, [%3]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vshrn.u32 d16, q8, #16 \n"
"vst1.u16 {d16}, [%1]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11"
);
#endif // __aarch64__
}
for (; i<size; i++)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + i%12%4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/8 + (p%8)/4 + p%4);
#else
unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p/4 + p%4);
#endif
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (int q=0; q<inch; q++)
{
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(tmpptr), 16));
float32x4_t _k0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(kptr), 16));
_sum0 = vmlaq_f32(_sum0, _r0, _k0);
kptr += 4;
tmpptr += 4;
}
#if __aarch64__
float sum0 = vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss2 = vpadd_f32(_ss, _ss);
float sum0 = vget_lane_f32(_ss2, 0);
#endif
outptr0[0] = float32_to_bfloat16(bias0 + sum0);
outptr0++;
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const float bias0 = bias ? bias[p] : 0.f;
//
// unsigned short* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// float sum = bias0;
//
// const unsigned short* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const unsigned short* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s2_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2*outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<channels; p++)
{
const unsigned short* r0 = bottom_blob.channel(p);
unsigned short* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
uint16x4_t _v0 = vld1_u16(r0);
uint16x4_t _v1 = vld1_u16(r0+8);
uint16x4_t _v2 = vld1_u16(r0+16);
uint16x4_t _v3 = vld1_u16(r0+24);
uint16x8_t _v01 = vcombine_u16(_v0, _v1);
uint16x8_t _v23 = vcombine_u16(_v2, _v3);
vst1q_u16(outptr, _v01);
vst1q_u16(outptr+8, _v23);
r0 += 32;
outptr += 16;
}
for (; j+1 < outw; j+=2)
{
uint16x4_t _v0 = vld1_u16(r0);
uint16x4_t _v1 = vld1_u16(r0+8);
uint16x8_t _v = vcombine_u16(_v0, _v1);
vst1q_u16(outptr, _v);
r0 += 16;
outptr += 8;
}
for (; j < outw; j++)
{
uint16x4_t _v = vld1_u16(r0);
vst1_u16(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4to1_bf16s_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
GB_unaryop__abs_int8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int8_fp64
// op(A') function: GB_tran__abs_int8_fp64
// C type: int8_t
// A type: double
// cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8)
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int8_t z ; GB_CAST_SIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int8_fp64
(
int8_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__div_fc32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__div_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__div_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__div_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_fc32)
// A*D function (colscale): GB (_AxD__div_fc32)
// D*A function (rowscale): GB (_DxB__div_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__div_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__div_fc32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_fc32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_fc32)
// C=scalar+B GB (_bind1st__div_fc32)
// C=scalar+B' GB (_bind1st_tran__div_fc32)
// C=A+scalar GB (_bind2nd__div_fc32)
// C=A'+scalar GB (_bind2nd_tran__div_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// A pattern? 0
// B type: GxB_FC32_t
// B pattern? 0
// BinaryOp: cij = GB_FC32_div (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC32_div (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_FC32 || GxB_NO_DIV_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC32_t alpha_scalar ;
GxB_FC32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC32_div (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC32_div (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_div (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__div_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_div (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__div_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bufradixsort.c | #include "bufradixsort.h"
#include "bufradixsort_common.h"
#include "bufradixsort_histo.h"
#include "bufradixsort_relocate.h"
#include <limits.h>
#include <stdint.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#if BUFRADIXSORT_DEBUG
#include <stdio.h>
#include <sys/time.h>
#endif
static unsigned int correct_position(unsigned int bits, unsigned int pos) {
unsigned int i = 0;
switch (bits) {
#define CORRECT_POSITION_CASE(BITS) case BITS: { \
UTYP(BITS) tester = 0; \
unsigned char *tester_ptr = (unsigned char*)&tester; \
for (i = 0; i < BITS / CHAR_BIT; i++) \
tester |= (UTYP(BITS))i << (CHAR_BIT*i); \
for (i = 0; *tester_ptr != pos; tester_ptr++, i++); \
} break
ITERLIST(SUPPORTED_INT_BITS_LIST_LEN, SUPPORTED_INT_BITS_LIST, CORRECT_POSITION_CASE);
}
return i;
}
static int check_elem_size(const bufradix_layout_t *elem_layout, unsigned int *elem_size_log_p) {
unsigned int elem_size = 0, elem_size_tmp, elem_size_log;
bufradix_layout_t l;
while ((l = *elem_layout++).type != BUFRADIX_LAYOUT_END) {
if (l.type == BUFRADIX_LAYOUT_FLOAT) {
int ok = 0;
#define CHECK_FLOAT_BITS_KERNEL(n) ok = ok || l.bits == n
ITERLIST(SUPPORTED_FLOAT_BITS_LIST_LEN, SUPPORTED_FLOAT_BITS_LIST, CHECK_FLOAT_BITS_KERNEL);
if (!ok) return -1;
}
if (l.bits % BKT_BIT) return -2;
elem_size += l.bits / BKT_BIT;
}
for (elem_size_log = (unsigned int)0 - (unsigned int)1, elem_size_tmp = elem_size;
elem_size_tmp;
elem_size_log += 1, elem_size_tmp >>= 1);
if ((unsigned int)1 << elem_size_log != elem_size) return -3;
if (elem_size_log > ELEM_SIZE_LOG_MAX) return -4; /* elem_size_log is UINT_MAX if elem_size is 0 */
*elem_size_log_p = elem_size_log;
return 0;
}
void bufradixsort(void *data, void *work, size_t elem_cnt, const bufradix_layout_t *elem_layout) {
unsigned int elem_size_log;
#ifdef _OPENMP
size_t acc_histo[BKT];
memset(acc_histo, 0, sizeof(size_t[BKT]));
#endif
if (check_elem_size(elem_layout, &elem_size_log)) return;
#ifdef _OPENMP
#pragma omp parallel
#endif
{
#ifdef _OPENMP
int tnum = omp_get_num_threads();
int tid = omp_get_thread_num();
int t;
#endif
size_t histo[BKT];
unsigned char *copy_points[BKT];
unsigned char *from, *from_end;
size_t from_offset;
unsigned char *dest = work;
unsigned int bkt;
size_t acc;
unsigned int order = 0, sort_times = 0;
unsigned int bkt_pos_base;
#if BUFRADIXSORT_DEBUG
struct timeval ts1, ts2;
#define DEBUG_TIME1() gettimeofday(&ts1, NULL)
#define DEBUG_TIME2() gettimeofday(&ts2, NULL)
#ifdef _OPENMP
#define DEBUG_PRINT(str) printf(str": thread %u seconds %f\n", tid, \
ts2.tv_sec-ts1.tv_sec + (double)(ts2.tv_usec-ts1.tv_usec)/1000000)
#else /* _OPENMP */
#define DEBUG_PRINT(str) printf(str": seconds %f\n", \
ts2.tv_sec-ts1.tv_sec + (double)(ts2.tv_usec-ts1.tv_usec)/1000000)
#endif /* _OPENMP */
#else /* BUFRADIXSORT_DEBUG */
#define DEBUG_TIME1()
#define DEBUG_TIME2()
#define DEBUG_PRINT(str)
#endif /* BUFRADIXSORT_DEBUG */
#ifdef _OPENMP
{
size_t quo = elem_cnt / tnum;
int mod = elem_cnt % tnum;
from_offset = (tid * quo + (tid < mod ? tid : mod)) << elem_size_log;
from = data + from_offset;
from_end = from + ((quo + (tid < mod)) << elem_size_log);
}
#else
from_offset = 0;
from = data;
from_end = from + (elem_cnt << elem_size_log);
#endif
while (1) {
const bufradix_layout_t *elem_layout_tmp = elem_layout;
bufradix_layout_t l;
unsigned int pos;
bkt_pos_base = 0;
while ((l = *elem_layout_tmp++).type != BUFRADIX_LAYOUT_END && l.order != order)
bkt_pos_base += l.bits / BKT_BIT;
if (l.type == BUFRADIX_LAYOUT_END) break;
if (l.type == BUFRADIX_LAYOUT_IGNORE) continue;
order++, sort_times += l.bits / BKT_BIT;
for (pos = 0; pos < l.bits / BKT_BIT; pos++) {
unsigned int real_pos = correct_position(l.bits, pos);
unsigned int bkt_fix_sign =
(pos+1 == l.bits / BKT_BIT && (l.type == BUFRADIX_LAYOUT_INT || l.type == BUFRADIX_LAYOUT_FLOAT)) ?
1u << (BKT_BIT-1) : 0;
unsigned int float_bits_if_lsb =
(pos == 0 && l.type == BUFRADIX_LAYOUT_FLOAT) ? l.bits : 0;
unsigned int float_bits_if_msb =
(pos+1 == l.bits / BKT_BIT && l.type == BUFRADIX_LAYOUT_FLOAT) ? l.bits : 0;
DEBUG_TIME1();
count_histo(from, from_end, elem_size_log, bkt_pos_base, real_pos, float_bits_if_lsb, histo);
DEBUG_TIME2();
#ifdef _OPENMP
#pragma omp critical
#endif
DEBUG_PRINT("histo");
#ifdef _OPENMP
#pragma omp critical
for (bkt = 0, acc = 0; bkt < BKT; bkt++) {
acc += histo[bkt^bkt_fix_sign];
acc_histo[bkt^bkt_fix_sign] += acc;
}
for (t = tnum-1; t >= 0; t--) {
#pragma omp barrier
if (t == tid) {
for (bkt = 0; bkt < BKT; bkt++) {
acc_histo[bkt] -= histo[bkt];
copy_points[bkt] = dest + acc_histo[bkt];
}
}
}
#else
for (bkt = 0, acc = 0; bkt < BKT; bkt++) {
copy_points[bkt^bkt_fix_sign] = dest + acc;
acc += histo[bkt^bkt_fix_sign];
}
#endif
DEBUG_TIME1();
relocate_data(from, from_end, dest,
elem_size_log, bkt_pos_base, real_pos, float_bits_if_msb, bkt_fix_sign, histo, copy_points);
DEBUG_TIME2();
#ifdef _OPENMP
#pragma omp critical
#endif
DEBUG_PRINT("relocate");
#ifdef _OPENMP
#pragma omp single
memset(acc_histo, 0, sizeof(size_t[BKT])); /* here is the only safe position to clear acc_histo */
#endif
{
size_t mylen = from_end - from;
from_end = from - from_offset;
from = dest + from_offset;
dest = from_end;
from_end = from + mylen;
}
#ifdef _OPENMP
#pragma omp barrier
#endif
}
}
if (sort_times % 2)
memcpy(dest + from_offset, from, from_end - from);
}
}
|
pngquant.c | /* pngquant.c - quantize the colors in an alphamap down to a specified number
**
** © 2009-2018 by Kornel Lesiński.
** © 1989, 1991 by Jef Poskanzer.
** © 1997-2002 by Greg Roelofs; based on an idea by Stefan Schneider.
**
** See COPYRIGHT file for license.
*/
#define PNGQUANT_VERSION LIQ_VERSION_STRING " (November 2018)"
#define PNGQUANT_USAGE "\
usage: pngquant [options] [ncolors] -- pngfile [pngfile ...]\n\
pngquant [options] [ncolors] - >stdout <stdin\n\n\
options:\n\
--force overwrite existing output files (synonym: -f)\n\
--skip-if-larger only save converted files if they're smaller than original\n\
--output file destination file path to use instead of --ext (synonym: -o)\n\
--ext new.png set custom suffix/extension for output filenames\n\
--quality min-max don't save below min, use fewer colors below max (0-100)\n\
--speed N speed/quality trade-off. 1=slow, 3=default, 11=fast & rough\n\
--nofs disable Floyd-Steinberg dithering\n\
--posterize N output lower-precision color (e.g. for ARGB4444 output)\n\
--strip remove optional metadata (default on Mac)\n\
--verbose print status messages (synonym: -v)\n\
\n\
Quantizes one or more 32-bit RGBA PNGs to 8-bit (or smaller) RGBA-palette.\n\
The output filename is the same as the input name except that\n\
it ends in \"-fs8.png\", \"-or8.png\" or your custom extension (unless the\n\
input is stdin, in which case the quantized image will go to stdout).\n\
If you pass the special output path \"-\" and a single input file, that file\n\
will be processed and the quantized image will go to stdout.\n\
The default behavior if the output file exists is to skip the conversion;\n\
use --force to overwrite. See man page for full list of options.\n"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <stdbool.h>
#include <math.h>
#if defined(_WIN32) || defined(WIN32) || defined(__WIN32__)
# include <fcntl.h> /* O_BINARY */
# include <io.h> /* setmode() */
#else
# include <unistd.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
#include "rwpng.h" /* typedefs, common macros, public prototypes */
#include "libimagequant.h" /* if it fails here, run: git submodule update; ./configure; or add -Ilib to compiler flags */
#include "pngquant_opts.h"
static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform tag, png8_image *output_image);
static void set_palette(liq_result *result, png8_image *output_image);
static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose);
static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options);
static char *add_filename_extension(const char *filename, const char *newext);
static bool file_exists(const char *outname);
static void verbose_printf(struct pngquant_options *context, const char *fmt, ...)
{
if (context->log_callback) {
va_list va;
va_start(va, fmt);
int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0
va_end(va);
#if defined(_MSC_VER)
char *buf = malloc(required_space);
#else
char buf[required_space];
#endif
va_start(va, fmt);
vsnprintf(buf, required_space, fmt, va);
va_end(va);
context->log_callback(context->liq, buf, context->log_callback_user_info);
#if defined(_MSC_VER)
free(buf);
#endif
}
}
static void log_callback(const liq_attr *attr, const char *msg, void* user_info)
{
fprintf(stderr, "%s\n", msg);
}
#ifdef _OPENMP
#define LOG_BUFFER_SIZE 1300
struct buffered_log {
int buf_used;
char buf[LOG_BUFFER_SIZE];
};
static void log_callback_buferred_flush(const liq_attr *attr, void *context)
{
struct buffered_log *log = context;
if (log->buf_used) {
fwrite(log->buf, 1, log->buf_used, stderr);
fflush(stderr);
log->buf_used = 0;
}
}
static void log_callback_buferred(const liq_attr *attr, const char *msg, void* context)
{
struct buffered_log *log = context;
int len = strlen(msg);
if (len > LOG_BUFFER_SIZE-2) len = LOG_BUFFER_SIZE-2;
if (len > LOG_BUFFER_SIZE - log->buf_used - 2) log_callback_buferred_flush(attr, log);
memcpy(&log->buf[log->buf_used], msg, len);
log->buf_used += len+1;
log->buf[log->buf_used-1] = '\n';
log->buf[log->buf_used] = '\0';
}
#endif
static void print_full_version(FILE *fd)
{
fprintf(fd, "pngquant, %s, by Kornel Lesinski, Greg Roelofs.\n"
#ifndef NDEBUG
" WARNING: this is a DEBUG (slow) version.\n" /* NDEBUG disables assert() */
#endif
#if !USE_SSE && (defined(__SSE__) || defined(__amd64__) || defined(__X86_64__) || defined(__i386__))
" SSE acceleration disabled.\n"
#endif
#if _OPENMP
" Compiled with OpenMP (multicore support).\n"
#endif
, PNGQUANT_VERSION);
rwpng_version_info(fd);
fputs("\n", fd);
}
static void print_usage(FILE *fd)
{
fputs(PNGQUANT_USAGE, fd);
}
/**
* N = automatic quality, uses limit unless force is set (N-N or 0-N)
* -N = no better than N (same as 0-N)
* N-M = no worse than N, no better than M
* N- = no worse than N, perfect if possible (same as N-100)
*
* where N,M are numbers between 0 (lousy) and 100 (perfect)
*/
static bool parse_quality(const char *quality, liq_attr *options, bool *min_quality_limit)
{
long limit, target;
const char *str = quality; char *end;
long t1 = strtol(str, &end, 10);
if (str == end) return false;
str = end;
if ('\0' == end[0] && t1 < 0) { // quality="-%d"
target = -t1;
limit = 0;
} else if ('\0' == end[0]) { // quality="%d"
target = t1;
limit = t1*9/10;
} else if ('-' == end[0] && '\0' == end[1]) { // quality="%d-"
target = 100;
limit = t1;
} else { // quality="%d-%d"
long t2 = strtol(str, &end, 10);
if (str == end || t2 > 0) return false;
target = -t2;
limit = t1;
}
*min_quality_limit = (limit > 0);
return LIQ_OK == liq_set_quality(options, limit, target);
}
pngquant_error pngquant_main(struct pngquant_options *options);
static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options);
#ifndef PNGQUANT_NO_MAIN
int main(int argc, char *argv[])
{
struct pngquant_options options = {
.floyd = 1.f, // floyd-steinberg dithering
.strip = false,
};
pngquant_error retval = pngquant_parse_options(argc, argv, &options);
if (retval != SUCCESS) {
return retval;
}
return pngquant_main(&options);
}
#endif
pngquant_error pngquant_main(struct pngquant_options *options)
{
if (options->print_version) {
puts(PNGQUANT_VERSION);
return SUCCESS;
}
if (options->missing_arguments) {
print_full_version(stderr);
print_usage(stderr);
return MISSING_ARGUMENT;
}
if (options->print_help) {
print_full_version(stdout);
print_usage(stdout);
return SUCCESS;
}
options->liq = liq_attr_create();
if (!options->liq) {
fputs("SSE-capable CPU is required for this build.\n", stderr);
return WRONG_ARCHITECTURE;
}
if (options->verbose) {
liq_set_log_callback(options->liq, log_callback, NULL);
options->log_callback = log_callback;
}
if (options->quality && !parse_quality(options->quality, options->liq, &options->min_quality_limit)) {
fputs("Quality should be in format min-max where min and max are numbers in range 0-100.\n", stderr);
return INVALID_ARGUMENT;
}
if (options->iebug) {
// opacities above 238 will be rounded up to 255, because IE6 truncates <255 to 0.
liq_set_min_opacity(options->liq, 238);
fputs(" warning: the workaround for IE6 is deprecated\n", stderr);
}
if (options->last_index_transparent) {
liq_set_last_index_transparent(options->liq, true);
}
if (options->speed >= 10) {
options->fast_compression = true;
if (options->speed == 11) {
options->floyd = 0;
options->speed = 10;
}
}
if (options->speed && LIQ_OK != liq_set_speed(options->liq, options->speed)) {
fputs("Speed should be between 1 (slow) and 11 (fast).\n", stderr);
return INVALID_ARGUMENT;
}
if (options->colors && LIQ_OK != liq_set_max_colors(options->liq, options->colors)) {
fputs("Number of colors must be between 2 and 256.\n", stderr);
return INVALID_ARGUMENT;
}
if (options->posterize && LIQ_OK != liq_set_min_posterization(options->liq, options->posterize)) {
fputs("Posterization should be number of bits in range 0-4.\n", stderr);
return INVALID_ARGUMENT;
}
if (options->extension && options->output_file_path) {
fputs("--ext and --output options can't be used at the same time\n", stderr);
return INVALID_ARGUMENT;
}
// new filename extension depends on options used. Typically basename-fs8.png
if (options->extension == NULL) {
options->extension = options->floyd > 0 ? "-fs8.png" : "-or8.png";
}
if (options->output_file_path && options->num_files != 1) {
fputs(" error: Only one input file is allowed when --output is used. This error also happens when filenames with spaces are not in quotes.\n", stderr);
return INVALID_ARGUMENT;
}
if (options->using_stdout && !options->using_stdin && options->num_files != 1) {
fputs(" error: Only one input file is allowed when using the special output path \"-\" to write to stdout. This error also happens when filenames with spaces are not in quotes.\n", stderr);
return INVALID_ARGUMENT;
}
if (options->map_file) {
png24_image tmp = {.width=0};
if (SUCCESS != read_image(options->liq, options->map_file, false, &tmp, &options->fixed_palette_image, true, true, false)) {
fprintf(stderr, " error: unable to load %s", options->map_file);
return INVALID_ARGUMENT;
}
liq_result *tmp_quantize = liq_quantize_image(options->liq, options->fixed_palette_image);
const liq_palette *pal = liq_get_palette(tmp_quantize);
if (!pal) {
fprintf(stderr, " error: unable to read colors from %s", options->map_file);
return INVALID_ARGUMENT;
}
for(unsigned int i=0; i < pal->count; i++) {
liq_image_add_fixed_color(options->fixed_palette_image, pal->entries[i]);
}
liq_result_destroy(tmp_quantize);
}
if (!options->num_files && !options->using_stdin) {
fputs("No input files specified.\n", stderr);
if (options->verbose) {
print_full_version(stderr);
}
print_usage(stderr);
return MISSING_ARGUMENT;
}
#ifdef _OPENMP
// if there's a lot of files, coarse parallelism can be used
if (options->num_files > 2*omp_get_max_threads()) {
omp_set_nested(0);
omp_set_dynamic(1);
} else {
omp_set_nested(1);
}
#endif
unsigned int error_count=0, skipped_count=0, file_count=0;
pngquant_error latest_error=SUCCESS;
#pragma omp parallel for \
schedule(static, 1) reduction(+:skipped_count) reduction(+:error_count) reduction(+:file_count) shared(latest_error)
for(int i=0; i < options->num_files; i++) {
const char *filename = options->using_stdin ? "stdin" : options->files[i];
struct pngquant_options opts = *options;
opts.liq = liq_attr_copy(options->liq);
#ifdef _OPENMP
struct buffered_log buf = {0};
if (opts.log_callback && omp_get_num_threads() > 1 && opts.num_files > 1) {
liq_set_log_callback(opts.liq, log_callback_buferred, &buf);
liq_set_log_flush_callback(opts.liq, log_callback_buferred_flush, &buf);
opts.log_callback = log_callback_buferred;
opts.log_callback_user_info = &buf;
}
#endif
pngquant_error retval = SUCCESS;
const char *outname = opts.output_file_path;
char *outname_free = NULL;
if (!opts.using_stdout) {
if (!outname) {
outname = outname_free = add_filename_extension(filename, opts.extension);
}
if (!opts.force && file_exists(outname)) {
fprintf(stderr, " error: '%s' exists; not overwriting\n", outname);
retval = NOT_OVERWRITING_ERROR;
}
}
if (SUCCESS == retval) {
retval = pngquant_file_internal(filename, outname, &opts);
}
free(outname_free);
liq_attr_destroy(opts.liq);
if (retval) {
#pragma omp critical
{
latest_error = retval;
}
if (retval == TOO_LOW_QUALITY || retval == TOO_LARGE_FILE) {
skipped_count++;
} else {
error_count++;
}
}
++file_count;
}
if (error_count) {
verbose_printf(options, "There were errors quantizing %d file%s out of a total of %d file%s.",
error_count, (error_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s");
}
if (skipped_count) {
verbose_printf(options, "Skipped %d file%s out of a total of %d file%s.",
skipped_count, (skipped_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s");
}
if (!skipped_count && !error_count) {
verbose_printf(options, "Quantized %d image%s.",
file_count, (file_count == 1)? "" : "s");
}
if (options->fixed_palette_image) liq_image_destroy(options->fixed_palette_image);
liq_attr_destroy(options->liq);
return latest_error;
}
/// Don't hack this. Instead use https://github.com/ImageOptim/libimagequant/blob/f54d2f1a3e1cf728e17326f4db0d45811c63f063/example.c
static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options)
{
pngquant_error retval = SUCCESS;
verbose_printf(options, "%s:", filename);
liq_image *input_image = NULL;
png24_image input_image_rwpng = {.width=0};
bool keep_input_pixels = options->skip_if_larger || (options->using_stdout && options->min_quality_limit); // original may need to be output to stdout
if (SUCCESS == retval) {
retval = read_image(options->liq, filename, options->using_stdin, &input_image_rwpng, &input_image, keep_input_pixels, options->strip, options->verbose);
}
int quality_percent = 90; // quality on 0-100 scale, updated upon successful remap
png8_image output_image = {.width=0};
if (SUCCESS == retval) {
verbose_printf(options, " read %luKB file", (input_image_rwpng.file_size+1023UL)/1024UL);
if (RWPNG_ICCP == input_image_rwpng.input_color) {
verbose_printf(options, " used embedded ICC profile to transform image to sRGB colorspace");
} else if (RWPNG_GAMA_CHRM == input_image_rwpng.input_color) {
verbose_printf(options, " used gAMA and cHRM chunks to transform image to sRGB colorspace");
} else if (RWPNG_ICCP_WARN_GRAY == input_image_rwpng.input_color) {
verbose_printf(options, " warning: ignored ICC profile in GRAY colorspace");
} else if (RWPNG_COCOA == input_image_rwpng.input_color) {
// No comment
} else if (RWPNG_SRGB == input_image_rwpng.input_color) {
verbose_printf(options, " passing sRGB tag from the input");
} else if (input_image_rwpng.gamma != 0.45455) {
verbose_printf(options, " converted image from gamma %2.1f to gamma 2.2",
1.0/input_image_rwpng.gamma);
}
// when using image as source of a fixed palette the palette is extracted using regular quantization
liq_result *remap;
liq_error remap_error = liq_image_quantize(options->fixed_palette_image ? options->fixed_palette_image : input_image, options->liq, &remap);
if (LIQ_OK == remap_error) {
// fixed gamma ~2.2 for the web. PNG can't store exact 1/2.2
// NB: can't change gamma here, because output_color is allowed to be an sRGB tag
liq_set_output_gamma(remap, 0.45455);
liq_set_dithering_level(remap, options->floyd);
retval = prepare_output_image(remap, input_image, input_image_rwpng.output_color, &output_image);
if (SUCCESS == retval) {
if (LIQ_OK != liq_write_remapped_image_rows(remap, input_image, output_image.row_pointers)) {
retval = OUT_OF_MEMORY_ERROR;
}
set_palette(remap, &output_image);
double palette_error = liq_get_quantization_error(remap);
if (palette_error >= 0) {
quality_percent = liq_get_quantization_quality(remap);
verbose_printf(options, " mapped image to new colors...MSE=%.3f (Q=%d)", palette_error, quality_percent);
}
}
liq_result_destroy(remap);
} else if (LIQ_QUALITY_TOO_LOW == remap_error) {
retval = TOO_LOW_QUALITY;
} else {
retval = INVALID_ARGUMENT; // dunno
}
}
if (SUCCESS == retval) {
if (options->skip_if_larger) {
// this is very rough approximation, but generally avoid losing more quality than is gained in file size.
// Quality is raised to 1.5, because even greater savings are needed to justify big quality loss.
// but >50% savings are considered always worthwile in order to allow low quality conversions to work at all
const double quality = quality_percent/100.0;
const double expected_reduced_size = pow(quality, 1.5);
output_image.maximum_file_size = (input_image_rwpng.file_size-1) * (expected_reduced_size < 0.5 ? 0.5 : expected_reduced_size);
}
output_image.fast_compression = options->fast_compression;
output_image.chunks = input_image_rwpng.chunks; input_image_rwpng.chunks = NULL;
retval = write_image(&output_image, NULL, outname, options);
if (TOO_LARGE_FILE == retval) {
verbose_printf(options, " file exceeded expected size of %luKB", (unsigned long)output_image.maximum_file_size/1024UL);
}
if (SUCCESS == retval && output_image.metadata_size > 0) {
verbose_printf(options, " copied %dKB of additional PNG metadata", (int)(output_image.metadata_size+999)/1000);
}
}
if (options->using_stdout && keep_input_pixels && (TOO_LARGE_FILE == retval || TOO_LOW_QUALITY == retval)) {
// when outputting to stdout it'd be nasty to create 0-byte file
// so if quality is too low, output 24-bit original
pngquant_error write_retval = write_image(NULL, &input_image_rwpng, outname, options);
if (write_retval) {
retval = write_retval;
}
}
if (input_image) liq_image_destroy(input_image);
rwpng_free_image24(&input_image_rwpng);
rwpng_free_image8(&output_image);
return retval;
}
static void set_palette(liq_result *result, png8_image *output_image)
{
const liq_palette *palette = liq_get_palette(result);
output_image->num_palette = palette->count;
for(unsigned int i=0; i < palette->count; i++) {
const liq_color px = palette->entries[i];
output_image->palette[i] = (rwpng_rgba){.r=px.r, .g=px.g, .b=px.b, .a=px.a};
}
}
static bool file_exists(const char *outname)
{
FILE *outfile = fopen(outname, "rb");
if ((outfile ) != NULL) {
fclose(outfile);
return true;
}
return false;
}
/* build the output filename from the input name by inserting "-fs8" or
* "-or8" before the ".png" extension (or by appending that plus ".png" if
* there isn't any extension), then make sure it doesn't exist already */
static char *add_filename_extension(const char *filename, const char *newext)
{
size_t x = strlen(filename);
char* outname = malloc(x+4+strlen(newext)+1);
if (!outname) return NULL;
strncpy(outname, filename, x);
if (strncmp(outname+x-4, ".png", 4) == 0 || strncmp(outname+x-4, ".PNG", 4) == 0) {
strcpy(outname+x-4, newext);
} else {
strcpy(outname+x, newext);
}
return outname;
}
static char *temp_filename(const char *basename) {
size_t x = strlen(basename);
char *outname = malloc(x+1+4);
if (!outname) return NULL;
strcpy(outname, basename);
strcpy(outname+x, ".tmp");
return outname;
}
static void set_binary_mode(FILE *fp)
{
#if defined(_WIN32) || defined(WIN32) || defined(__WIN32__)
setmode(fp == stdout ? 1 : 0, O_BINARY);
#endif
}
static const char *filename_part(const char *path)
{
const char *outfilename = strrchr(path, '/');
if (outfilename) {
return outfilename+1;
} else {
return path;
}
}
static bool replace_file(const char *from, const char *to, const bool force) {
#if defined(_WIN32) || defined(WIN32) || defined(__WIN32__)
if (force) {
// On Windows rename doesn't replace
unlink(to);
}
#endif
return (0 == rename(from, to));
}
static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options)
{
FILE *outfile;
char *tempname = NULL;
if (options->using_stdout) {
set_binary_mode(stdout);
outfile = stdout;
if (output_image) {
verbose_printf(options, " writing %d-color image to stdout", output_image->num_palette);
} else {
verbose_printf(options, " writing truecolor image to stdout");
}
} else {
tempname = temp_filename(outname);
if (!tempname) return OUT_OF_MEMORY_ERROR;
if ((outfile = fopen(tempname, "wb")) == NULL) {
fprintf(stderr, " error: cannot open '%s' for writing\n", tempname);
free(tempname);
return CANT_WRITE_ERROR;
}
if (output_image) {
verbose_printf(options, " writing %d-color image as %s", output_image->num_palette, filename_part(outname));
} else {
verbose_printf(options, " writing truecolor image as %s", filename_part(outname));
}
}
pngquant_error retval;
#pragma omp critical (libpng)
{
if (output_image) {
retval = rwpng_write_image8(outfile, output_image);
} else {
retval = rwpng_write_image24(outfile, output_image24);
}
}
if (!options->using_stdout) {
fclose(outfile);
if (SUCCESS == retval) {
// Image has been written to a temporary file and then moved over destination.
// This makes replacement atomic and avoids damaging destination file on write error.
if (!replace_file(tempname, outname, options->force)) {
retval = CANT_WRITE_ERROR;
}
}
if (retval) {
unlink(tempname);
}
}
free(tempname);
if (retval && retval != TOO_LARGE_FILE) {
fprintf(stderr, " error: failed writing image to %s (%d)\n", options->using_stdout ? "stdout" : outname, retval);
}
return retval;
}
static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose)
{
FILE *infile;
if (using_stdin) {
set_binary_mode(stdin);
infile = stdin;
} else if ((infile = fopen(filename, "rb")) == NULL) {
fprintf(stderr, " error: cannot open %s for reading\n", filename);
return READ_ERROR;
}
pngquant_error retval;
#pragma omp critical (libpng)
{
retval = rwpng_read_image24(infile, input_image_p, strip, verbose);
}
if (!using_stdin) {
fclose(infile);
}
if (retval) {
fprintf(stderr, " error: cannot decode image %s\n", using_stdin ? "from stdin" : filename_part(filename));
return retval;
}
*liq_image_p = liq_image_create_rgba_rows(options, (void**)input_image_p->row_pointers, input_image_p->width, input_image_p->height, input_image_p->gamma);
if (!*liq_image_p) {
return OUT_OF_MEMORY_ERROR;
}
if (!keep_input_pixels) {
if (LIQ_OK != liq_image_set_memory_ownership(*liq_image_p, LIQ_OWN_ROWS | LIQ_OWN_PIXELS)) {
return OUT_OF_MEMORY_ERROR;
}
input_image_p->row_pointers = NULL;
input_image_p->rgba_data = NULL;
}
return SUCCESS;
}
static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform output_color, png8_image *output_image)
{
output_image->width = liq_image_get_width(input_image);
output_image->height = liq_image_get_height(input_image);
output_image->gamma = liq_get_output_gamma(result);
output_image->output_color = output_color;
/*
** Step 3.7 [GRR]: allocate memory for the entire indexed image
*/
output_image->indexed_data = malloc(output_image->height * output_image->width);
output_image->row_pointers = malloc(output_image->height * sizeof(output_image->row_pointers[0]));
if (!output_image->indexed_data || !output_image->row_pointers) {
return OUT_OF_MEMORY_ERROR;
}
for(size_t row = 0; row < output_image->height; row++) {
output_image->row_pointers[row] = output_image->indexed_data + row * output_image->width;
}
const liq_palette *palette = liq_get_palette(result);
// tRNS, etc.
output_image->num_palette = palette->count;
return SUCCESS;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unop__isfinite_bool_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isfinite_bool_fp32)
// op(A') function: GB (_unop_tran__isfinite_bool_fp32)
// C type: bool
// A type: float
// cast: float cij = (aij)
// unaryop: cij = isfinite (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = isfinite (x) ;
// casting
#define GB_CAST(z, aij) \
float z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (aij) ; \
Cx [pC] = isfinite (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isfinite_bool_fp32)
(
bool *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = (aij) ;
Cx [p] = isfinite (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = (aij) ;
Cx [p] = isfinite (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isfinite_bool_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_uint8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_int8
// op(A') function: GB_tran__lnot_uint8_int8
// C type: uint8_t
// A type: int8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_int8
(
uint8_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bml_submatrix_ellpack.c | #include "../../macros.h"
#include "../bml_logger.h"
#include "../bml_submatrix.h"
#include "../bml_types.h"
#include "../dense/bml_types_dense.h"
#include "bml_submatrix_ellpack.h"
#include "bml_types_ellpack.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Determine element indices for submatrix, given a set of nodes/orbitals.
*
* \ingroup submatrix_group_C
*
* \param A Hamiltonian matrix A
* \param B Graph matrix B
* \param nodelist List of node/orbital indeces
* \param nsize Size of nodelist
* \param core_halo_index List of core+halo indeces
* \param vsize Size of core_halo_index and core_pos
* \param double_jump_flag Flag to use double jump (0=no, 1=yes)
*/
void
bml_matrix2submatrix_index_ellpack(
bml_matrix_ellpack_t * A,
bml_matrix_ellpack_t * B,
int *nodelist,
int nsize,
int *core_halo_index,
int *vsize,
int double_jump_flag)
{
switch (A->matrix_precision)
{
case single_real:
bml_matrix2submatrix_index_ellpack_single_real(A, B, nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
case double_real:
bml_matrix2submatrix_index_ellpack_double_real(A, B, nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
case single_complex:
bml_matrix2submatrix_index_ellpack_single_complex(A, B, nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
case double_complex:
bml_matrix2submatrix_index_ellpack_double_complex(A, B, nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
default:
LOG_ERROR("unknown precision\n");
break;
}
}
/** Determine element indices for submatrix, given a set of nodes/orbitals.
*
* \ingroup submatrix_group_C
*
* \param B Graph matrix B
* \param nodelist List of node/orbital indeces
* \param nsize Size of nodelist
* \param core_halo_index List of core+halo indeces
* \param vsize Size of core_halo_index and core_pos
* \param double_jump_flag Flag to use double jump (0=no, 1=yes)
*/
void
bml_matrix2submatrix_index_graph_ellpack(
bml_matrix_ellpack_t * B,
int *nodelist,
int nsize,
int *core_halo_index,
int *vsize,
int double_jump_flag)
{
switch (B->matrix_precision)
{
case single_real:
bml_matrix2submatrix_index_graph_ellpack_single_real(B, nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
case double_real:
bml_matrix2submatrix_index_graph_ellpack_single_real(B, nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
case single_complex:
bml_matrix2submatrix_index_graph_ellpack_double_complex(B,
nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
case double_complex:
bml_matrix2submatrix_index_graph_ellpack_double_complex(B,
nodelist,
nsize,
core_halo_index,
vsize,
double_jump_flag);
break;
default:
LOG_ERROR("unknown precision\n");
break;
}
}
/** Extract a submatrix from a matrix given a set of core+halo rows.
*
* \ingroup submatrix_group_C
*
* \param A Matrix A
* \param B Submatrix B
* \param core_halo_index Set of row indeces for submatrix
* \param llsize Number of indeces
*/
void
bml_matrix2submatrix_ellpack(
bml_matrix_ellpack_t * A,
bml_matrix_dense_t * B,
int *core_halo_index,
int lsize)
{
switch (A->matrix_precision)
{
case single_real:
bml_matrix2submatrix_ellpack_single_real(A, B, core_halo_index,
lsize);
break;
case double_real:
bml_matrix2submatrix_ellpack_double_real(A, B, core_halo_index,
lsize);
break;
case single_complex:
bml_matrix2submatrix_ellpack_single_complex(A, B, core_halo_index,
lsize);
break;
case double_complex:
bml_matrix2submatrix_ellpack_double_complex(A, B, core_halo_index,
lsize);
break;
default:
LOG_ERROR("unknown precision\n");
break;
}
}
/** Assemble submatrix into a full matrix based on core+halo indeces.
*
* \ingroup submatrix_group_C
*
* \param A Submatrix A
* \param B Matrix B
* \param core_halo_index Set of submatrix row indeces
* \param lsize Number of indeces
* \param llsize Number of core positions
* \param threshold Threshold for elements
*/
void
bml_submatrix2matrix_ellpack(
bml_matrix_dense_t * A,
bml_matrix_ellpack_t * B,
int *core_halo_index,
int lsize,
int llsize,
double threshold)
{
switch (A->matrix_precision)
{
case single_real:
bml_submatrix2matrix_ellpack_single_real(A, B, core_halo_index,
lsize, llsize,
threshold);
break;
case double_real:
bml_submatrix2matrix_ellpack_double_real(A, B, core_halo_index,
lsize, llsize,
threshold);
break;
case single_complex:
bml_submatrix2matrix_ellpack_single_complex(A, B, core_halo_index,
lsize,
llsize, threshold);
break;
case double_complex:
bml_submatrix2matrix_ellpack_double_complex(A, B, core_halo_index,
lsize,
llsize, threshold);
break;
default:
LOG_ERROR("unknown precision\n");
break;
}
}
/** Get vector from matrix.
*
* \ingroup submatrix_group_C
*
* \param A Matrix A
* \param jj Index set
* \param irow Which row
* \param colCnt Number of columns
* \param rvalue Returned vector
*/
void *
bml_getVector_ellpack(
bml_matrix_ellpack_t * A,
int *jj,
int irow,
int colCnt)
{
switch (A->matrix_precision)
{
case single_real:
return bml_getVector_ellpack_single_real(A, jj, irow, colCnt);
break;
case double_real:
return bml_getVector_ellpack_double_real(A, jj, irow, colCnt);
break;
case single_complex:
return bml_getVector_ellpack_single_complex(A, jj, irow, colCnt);
break;
case double_complex:
return bml_getVector_ellpack_double_complex(A, jj, irow, colCnt);
break;
default:
LOG_ERROR("unknown precision\n");
break;
}
return NULL;
}
/** Assemble matrix based on groups of rows from a matrix.
*
* \ingroup submatrix_group_C
*
* \param A Matrix A
* \param hindex Indeces of nodes
* \param ngroups Number of groups
* \param threshold Threshold for graph
*/
bml_matrix_ellpack_t *
bml_group_matrix_ellpack(
bml_matrix_ellpack_t * A,
int *hindex,
int ngroups,
double threshold)
{
switch (A->matrix_precision)
{
case single_real:
return bml_group_matrix_ellpack_single_real(A, hindex, ngroups,
threshold);
break;
case double_real:
return bml_group_matrix_ellpack_double_real(A, hindex, ngroups,
threshold);
break;
case single_complex:
return bml_group_matrix_ellpack_single_complex(A, hindex, ngroups,
threshold);
break;
case double_complex:
return bml_group_matrix_ellpack_double_complex(A, hindex, ngroups,
threshold);
break;
default:
LOG_ERROR("unknown precision\n");
break;
}
return NULL;
}
int
sortById(
const void *a,
const void *b)
{
int aId = *((int *) a);
int bId = *((int *) b);
if (aId < bId)
return -1;
else if (aId == bId)
return 0;
else
return 1;
}
/** Assemble adjacency structure from matrix.
*
* \ingroup submatrix_group_C
*
* \param A Matrix A
* \param xadj Index of each row in adjncy
* \param adjncy Adjacency vector
* \param base_flag Return 0- or 1-based
*/
void
bml_adjacency_ellpack(
bml_matrix_ellpack_t * A,
int *xadj,
int *adjncy,
int base_flag)
{
int A_N = A->N;
int A_M = A->M;
int *A_nnz = A->nnz;
int *A_index = A->index;
int j;
int check;
xadj[0] = 0;
// Check if diagonal elements are included
check = 0;
for (int i = 0; i < A_nnz[0]; i++)
{
if (A_index[ROWMAJOR(0, i, A_N, A_M)] == 0)
{
check = 1;
break;
}
}
for (int i = 1; i < A_N + 1; i++)
{
if (check == 1)
xadj[i] = xadj[i - 1] + A_nnz[i - 1] - 1;
else
xadj[i] = xadj[i - 1] + A_nnz[i - 1];
}
#pragma omp parallel for \
private(j) \
shared(A_N, A_M, A_index, A_nnz, xadj, adjncy)
for (int i = 0; i < A_N; i++)
{
j = xadj[i];
for (int jj = 0; jj < A_nnz[i]; jj++)
{
if (A_index[ROWMAJOR(i, jj, A_N, A_M)] != i)
{
adjncy[j] = A_index[ROWMAJOR(i, jj, A_N, A_M)];
j++;
}
}
//assert(j == xadj[i+1]);
}
#pragma omp parallel for \
shared(A_N, xadj, adjncy)
for (int i = 0; i < A_N; i++)
{
qsort(&adjncy[xadj[i]], xadj[i + 1] - xadj[i], sizeof(int), sortById);
}
// Add 1 for 1-based
if (base_flag == 1)
{
#pragma omp parallel for \
shared(xadj, A_N, adjncy)
for (int i = 0; i < A_N; i++)
{
for (int j = xadj[i]; j < xadj[i + 1]; j++)
{
adjncy[j] += 1;
}
}
#pragma omp parallel for \
shared(xadj, A_N)
for (int i = 0; i < A_N + 1; i++)
{
xadj[i] += 1;
}
}
}
/** Assemble adjacency structure from matrix based on groups of rows.
*
* \ingroup submatrix_group_C
*
* \param A Matrix A
* \param hindex Indeces of nodes
* \param nnodes Number of groups
* \param xadj Index of each row in adjncy
* \param adjncy Adjacency vector
* \param base_flag Return 0- or 1-based
*/
void
bml_adjacency_group_ellpack(
bml_matrix_ellpack_t * A,
int *hindex,
int nnodes,
int *xadj,
int *adjncy,
int base_flag)
{
int A_N = A->N;
int A_M = A->M;
int *A_nnz = A->nnz;
int *A_index = A->index;
int *hnode = malloc(nnodes * sizeof(int));
for (int i = 0; i < nnodes; i++)
{
hnode[i] = hindex[i] - 1;
}
// Determine number of adjacent atoms per atom
xadj[0] = 0;
for (int i = 1; i < nnodes + 1; i++)
{
int hcount = 0;
for (int j = 0; j < nnodes; j++)
{
for (int k = 0; k < A_nnz[hnode[i - 1]]; k++)
{
if (hnode[j] == A_index[ROWMAJOR(hnode[i - 1], k, A_N, A_M)])
{
hcount++;
break;
}
}
}
xadj[i] = xadj[i - 1] + hcount;
}
// Fill in adjacent atoms
#pragma omp parallel for \
shared(A_N, A_M, A_index, A_nnz) \
shared(xadj, adjncy, hnode)
for (int i = 0; i < nnodes; i++)
{
int ll = xadj[i];
for (int j = 0; j < nnodes; j++)
{
for (int k = 0; k < A_nnz[hnode[i]]; k++)
{
if (hnode[j] == A_index[ROWMAJOR(hnode[i], k, A_N, A_M)])
{
//adjncy[ll] = hnode[j];
adjncy[ll] = j;
ll++;
break;
}
}
}
}
// Add 1 for 1-based
if (base_flag == 1)
{
#pragma omp parallel for \
shared(xadj, A_N, adjncy)
for (int i = 0; i <= xadj[nnodes]; i++)
{
adjncy[i] += 1;
}
#pragma omp parallel for \
shared(xadj, A_N)
for (int i = 0; i < nnodes + 1; i++)
{
xadj[i] += 1;
}
}
}
|
ast-dump-openmp-single.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp single
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-single.c:3:1, line:6:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1>
// CHECK-NEXT: `-OMPSingleDirective {{.*}} <line:4:1, col:19>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: |-NullStmt {{.*}} <col:3>
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-single.c:4:1) *const restrict'
|
subopt.c | /*
* suboptimal folding - Stefan Wuchty, Walter Fontana & Ivo Hofacker
*
* Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <ctype.h>
#include <string.h>
#include <math.h>
#include "ViennaRNA/fold.h"
#include "ViennaRNA/constraints/hard.h"
#include "ViennaRNA/constraints/soft.h"
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/utils/strings.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/datastructures/lists.h"
#include "ViennaRNA/eval.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/cofold.h"
#include "ViennaRNA/gquad.h"
#include "ViennaRNA/alphabet.h"
#include "ViennaRNA/subopt.h"
/* hack */
#include "ViennaRNA/color_output.inc"
#ifdef _OPENMP
#include <omp.h>
#endif
#define true 1
#define false 0
#ifndef ON_SAME_STRAND
#define ON_SAME_STRAND(I, J, C) (((I) >= (C)) || ((J) < (C)))
#endif
/**
* @brief Sequence interval stack element used in subopt.c
*/
typedef struct INTERVAL {
int i;
int j;
int array_flag;
} INTERVAL;
typedef struct {
char *structure;
LIST *Intervals;
int partial_energy;
int is_duplex;
/* int best_energy; */ /* best attainable energy */
} STATE;
typedef struct {
LIST *Intervals;
LIST *Stack;
int nopush;
} subopt_env;
struct old_subopt_dat {
unsigned long max_sol;
unsigned long n_sol;
SOLUTION *SolutionList;
FILE *fp;
};
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
PUBLIC int subopt_sorted = 0; /* output sorted by energy */
PUBLIC int density_of_states[MAXDOS + 1];
PUBLIC double print_energy = 9999; /* printing threshold for use with logML */
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
/* some backward compatibility stuff */
PRIVATE int backward_compat = 0;
PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL;
#ifdef _OPENMP
#pragma omp threadprivate(backward_compat_compound, backward_compat)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
PRIVATE SOLUTION *
wrap_subopt(char *seq,
char *structure,
vrna_param_t *parameters,
int delta,
int is_constrained,
int is_circular,
FILE *fp);
#endif
PRIVATE void
make_pair(int i,
int j,
STATE *state);
/* mark a gquadruplex in the resulting dot-bracket structure */
PRIVATE void
make_gquad(int i,
int L,
int l[3],
STATE *state);
PRIVATE INTERVAL *
make_interval(int i,
int j,
int ml);
PRIVATE STATE *
make_state(LIST *Intervals,
char *structure,
int partial_energy,
int is_duplex,
int length);
PRIVATE STATE *
copy_state(STATE *state);
PRIVATE void
print_state(STATE *state);
PRIVATE void
UNUSED print_stack(LIST *list);
PRIVATE LIST *
make_list(void);
PRIVATE void
push(LIST *list,
void *data);
PRIVATE void
*pop(LIST *list);
PRIVATE int
best_attainable_energy(vrna_fold_compound_t *vc,
STATE *state);
PRIVATE void
scan_interval(vrna_fold_compound_t *vc,
int i,
int j,
int array_flag,
int threshold,
STATE *state,
subopt_env *env);
PRIVATE void
free_interval_node(INTERVAL *node);
PRIVATE void
free_state_node(STATE *node);
PRIVATE void
push_back(LIST *Stack,
STATE *state);
PRIVATE char *
get_structure(STATE *state);
PRIVATE int
compare(const void *solution1,
const void *solution2);
PRIVATE void
make_output(SOLUTION *SL,
int cp,
FILE *fp);
PRIVATE void
repeat(vrna_fold_compound_t *vc,
int i,
int j,
STATE *state,
int part_energy,
int temp_energy,
int best_energy,
int threshold,
subopt_env *env);
PRIVATE void
repeat_gquad(vrna_fold_compound_t *vc,
int i,
int j,
STATE *state,
int part_energy,
int temp_energy,
int best_energy,
int threshold,
subopt_env *env);
PRIVATE void
old_subopt_print(const char *structure,
float energy,
void *data);
PRIVATE void
old_subopt_store(const char *structure,
float energy,
void *data);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
/*---------------------------------------------------------------------------*/
/*List routines--------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
PRIVATE void
make_pair(int i,
int j,
STATE *state)
{
state->structure[i - 1] = '(';
state->structure[j - 1] = ')';
}
PRIVATE void
make_gquad(int i,
int L,
int l[3],
STATE *state)
{
int x;
for (x = 0; x < L; x++) {
state->structure[i - 1 + x] = '+';
state->structure[i - 1 + x + L + l[0]] = '+';
state->structure[i - 1 + x + 2 * L + l[0] + l[1]] = '+';
state->structure[i - 1 + x + 3 * L + l[0] + l[1] + l[2]] = '+';
}
}
/*---------------------------------------------------------------------------*/
PRIVATE INTERVAL *
make_interval(int i,
int j,
int array_flag)
{
INTERVAL *interval;
interval = lst_newnode(sizeof(INTERVAL));
interval->i = i;
interval->j = j;
interval->array_flag = array_flag;
return interval;
}
/*---------------------------------------------------------------------------*/
PRIVATE void
free_interval_node(INTERVAL *node)
{
lst_freenode(node);
}
/*---------------------------------------------------------------------------*/
PRIVATE void
free_state_node(STATE *node)
{
free(node->structure);
if (node->Intervals)
lst_kill(node->Intervals, lst_freenode);
lst_freenode(node);
}
/*---------------------------------------------------------------------------*/
PRIVATE STATE *
make_state(LIST *Intervals,
char *structure,
int partial_energy,
int is_duplex,
int length)
{
STATE *state;
state = lst_newnode(sizeof(STATE));
if (Intervals)
state->Intervals = Intervals;
else
state->Intervals = lst_init();
if (structure) {
state->structure = structure;
} else {
int i;
state->structure = (char *)vrna_alloc(length + 1);
for (i = 0; i < length; i++)
state->structure[i] = '.';
}
state->partial_energy = partial_energy;
return state;
}
/*---------------------------------------------------------------------------*/
PRIVATE STATE *
copy_state(STATE *state)
{
STATE *new_state;
void *after;
INTERVAL *new_interval, *next;
new_state = lst_newnode(sizeof(STATE));
new_state->Intervals = lst_init();
new_state->partial_energy = state->partial_energy;
/* new_state->best_energy = state->best_energy; */
if (state->Intervals->count) {
after = LST_HEAD(new_state->Intervals);
for (next = lst_first(state->Intervals); next; next = lst_next(next)) {
new_interval = lst_newnode(sizeof(INTERVAL));
*new_interval = *next;
lst_insertafter(new_state->Intervals, new_interval, after);
after = new_interval;
}
}
new_state->structure = strdup(state->structure);
if (!new_state->structure)
vrna_message_error("out of memory");
return new_state;
}
/*---------------------------------------------------------------------------*/
/*@unused @*/ PRIVATE void
print_state(STATE *state)
{
INTERVAL *next;
if (state->Intervals->count) {
printf("%d intervals:\n", state->Intervals->count);
for (next = lst_first(state->Intervals); next; next = lst_next(next))
printf("[%d,%d],%d ", next->i, next->j, next->array_flag);
printf("\n");
}
printf("partial structure: %s\n", state->structure);
printf("\n");
printf(" partial_energy: %d\n", state->partial_energy);
/* printf(" best_energy: %d\n", state->best_energy); */
(void)fflush(stdout);
}
/*---------------------------------------------------------------------------*/
/*@unused @*/ PRIVATE void
print_stack(LIST *list)
{
void *rec;
printf("================\n");
printf("%d states\n", list->count);
for (rec = lst_first(list); rec; rec = lst_next(rec)) {
printf("state-----------\n");
print_state(rec);
}
printf("================\n");
}
/*---------------------------------------------------------------------------*/
PRIVATE LIST *
make_list(void)
{
return lst_init();
}
/*---------------------------------------------------------------------------*/
PRIVATE void
push(LIST *list,
void *data)
{
lst_insertafter(list, data, LST_HEAD(list));
}
/* PRIVATE void */
/* push_stack(STATE *state) { */ /* keep the stack sorted by energy */
/* STATE *after, *next; */
/* nopush = false; */
/* next = after = LST_HEAD(Stack); */
/* while ( next = lst_next(next)) { */
/* if ( next->best_energy >= state->best_energy ) break; */
/* after = next; */
/* } */
/* lst_insertafter(Stack, state, after); */
/* } */
/*---------------------------------------------------------------------------*/
PRIVATE void *
pop(LIST *list)
{
void *data;
data = lst_deletenext(list, LST_HEAD(list));
return data;
}
/*---------------------------------------------------------------------------*/
/*auxiliary routines---------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
PRIVATE int
best_attainable_energy(vrna_fold_compound_t *vc,
STATE *state)
{
/* evaluation of best possible energy attainable within remaining intervals */
register int sum;
INTERVAL *next;
vrna_md_t *md;
vrna_mx_mfe_t *matrices;
int *indx;
md = &(vc->params->model_details);
matrices = vc->matrices;
indx = vc->jindx;
sum = state->partial_energy; /* energy of already found elements */
for (next = lst_first(state->Intervals); next; next = lst_next(next)) {
if (next->array_flag == 0)
sum += (md->circ) ? matrices->Fc : matrices->f5[next->j];
else if (next->array_flag == 1)
sum += matrices->fML[indx[next->j] + next->i];
else if (next->array_flag == 2)
sum += matrices->c[indx[next->j] + next->i];
else if (next->array_flag == 3)
sum += matrices->fM1[indx[next->j] + next->i];
else if (next->array_flag == 4)
sum += matrices->fc[next->i];
else if (next->array_flag == 5)
sum += matrices->fc[next->j];
else if (next->array_flag == 6)
sum += matrices->ggg[indx[next->j] + next->i];
}
return sum;
}
/*---------------------------------------------------------------------------*/
PRIVATE void
push_back(LIST *Stack,
STATE *state)
{
push(Stack, copy_state(state));
return;
}
/*---------------------------------------------------------------------------*/
PRIVATE char *
get_structure(STATE *state)
{
char *structure;
structure = strdup(state->structure);
return structure;
}
/*---------------------------------------------------------------------------*/
PRIVATE int
compare(const void *solution1,
const void *solution2)
{
if (((SOLUTION *)solution1)->energy > ((SOLUTION *)solution2)->energy)
return 1;
if (((SOLUTION *)solution1)->energy < ((SOLUTION *)solution2)->energy)
return -1;
return strcmp(((SOLUTION *)solution1)->structure,
((SOLUTION *)solution2)->structure);
}
/*---------------------------------------------------------------------------*/
PRIVATE void
make_output(SOLUTION *SL,
int cp,
FILE *fp) /* prints stuff */
{
SOLUTION *sol;
for (sol = SL; sol->structure != NULL; sol++) {
char *e_string = vrna_strdup_printf(" %6.2f", sol->energy);
print_structure(fp, sol->structure, e_string);
free(e_string);
}
}
PRIVATE STATE *
derive_new_state(int i,
int j,
STATE *s,
int e,
int flag)
{
STATE *s_new = copy_state(s);
INTERVAL *ival = make_interval(i, j, flag);
push(s_new->Intervals, ival);
s_new->partial_energy += e;
return s_new;
}
PRIVATE void
fork_state(int i,
int j,
STATE *s,
int e,
int flag,
subopt_env *env)
{
STATE *s_new = derive_new_state(i, j, s, e, flag);
push(env->Stack, s_new);
env->nopush = false;
}
PRIVATE void
fork_int_state(int i,
int j,
int p,
int q,
STATE *s,
int e,
subopt_env *env)
{
STATE *s_new = derive_new_state(p, q, s, e, 2);
make_pair(i, j, s_new);
make_pair(p, q, s_new);
push(env->Stack, s_new);
env->nopush = false;
}
PRIVATE void
fork_state_pair(int i,
int j,
STATE *s,
int e,
subopt_env *env)
{
STATE *new_state;
new_state = copy_state(s);
make_pair(i, j, new_state);
new_state->partial_energy += e;
push(env->Stack, new_state);
env->nopush = false;
}
PRIVATE void
fork_two_states_pair(int i,
int j,
int k,
STATE *s,
int e,
int flag1,
int flag2,
subopt_env *env)
{
INTERVAL *interval1, *interval2;
STATE *new_state;
new_state = copy_state(s);
interval1 = make_interval(i + 1, k - 1, flag1);
interval2 = make_interval(k, j - 1, flag2);
if (k - i < j - k) {
/* push larger interval first */
push(new_state->Intervals, interval1);
push(new_state->Intervals, interval2);
} else {
push(new_state->Intervals, interval2);
push(new_state->Intervals, interval1);
}
make_pair(i, j, new_state);
new_state->partial_energy += e;
push(env->Stack, new_state);
env->nopush = false;
}
PRIVATE void
fork_two_states(int i,
int j,
int p,
int q,
STATE *s,
int e,
int flag1,
int flag2,
subopt_env *env)
{
INTERVAL *interval1, *interval2;
STATE *new_state;
new_state = copy_state(s);
interval1 = make_interval(i, j, flag1);
interval2 = make_interval(p, q, flag2);
if ((j - i) < (q - p)) {
push(new_state->Intervals, interval1);
push(new_state->Intervals, interval2);
} else {
push(new_state->Intervals, interval2);
push(new_state->Intervals, interval1);
}
new_state->partial_energy += e;
push(env->Stack, new_state);
env->nopush = false;
}
/*---------------------------------------------------------------------------*/
/* start of subopt backtracking ---------------------------------------------*/
/*---------------------------------------------------------------------------*/
PUBLIC SOLUTION *
vrna_subopt(vrna_fold_compound_t *vc,
int delta,
int sorted,
FILE *fp)
{
struct old_subopt_dat data;
data.SolutionList = NULL;
data.max_sol = 128;
data.n_sol = 0;
data.fp = fp;
if (vc) {
/* SolutionList stores the suboptimal structures found */
data.SolutionList = (SOLUTION *)vrna_alloc(data.max_sol * sizeof(SOLUTION));
/* end initialize ------------------------------------------------------- */
if (fp) {
float min_en;
char *SeQ, *energies = NULL;
if (vc->cutpoint > 0)
min_en = vrna_mfe_dimer(vc, NULL);
else
min_en = vrna_mfe(vc, NULL);
SeQ = vrna_cut_point_insert(vc->sequence, vc->cutpoint);
energies = vrna_strdup_printf(" %6.2f %6.2f", min_en, (float)delta / 100.);
print_structure(fp, SeQ, energies);
free(SeQ);
free(energies);
vrna_mx_mfe_free(vc);
}
/* call subopt() */
vrna_subopt_cb(vc, delta, (!sorted && fp) ? &old_subopt_print : &old_subopt_store,
(void *)&data);
if (sorted) {
/* sort structures by energy */
if (data.n_sol > 0)
qsort(data.SolutionList, data.n_sol - 1, sizeof(SOLUTION), compare);
if (fp)
make_output(data.SolutionList, vc->cutpoint, fp);
}
if (fp) {
/* we've printed everything -- free solutions */
SOLUTION *sol;
for (sol = data.SolutionList; sol->structure != NULL; sol++)
free(sol->structure);
free(data.SolutionList);
data.SolutionList = NULL;
}
}
return data.SolutionList;
}
PUBLIC void
vrna_subopt_cb(vrna_fold_compound_t *vc,
int delta,
vrna_subopt_callback *cb,
void *data)
{
subopt_env *env;
STATE *state;
INTERVAL *interval;
int maxlevel, count, partial_energy, old_dangles, logML, dangle_model, length, circular,
threshold, cp;
double structure_energy, min_en, eprint;
char *struc, *structure;
float correction;
vrna_param_t *P;
vrna_md_t *md;
int minimal_energy;
int Fc;
int *f5;
vrna_fold_compound_prepare(vc, VRNA_OPTION_MFE | VRNA_OPTION_HYBRID);
length = vc->length;
cp = vc->cutpoint;
P = vc->params;
md = &(P->model_details);
/* do mfe folding to get fill arrays and get ground state energy */
/* in case dangles is neither 0 or 2, set dangles=2 while folding */
circular = md->circ;
logML = md->logML;
old_dangles = dangle_model = md->dangles;
if (md->uniq_ML != 1) /* failsafe mechanism to enforce valid fM1 array */
md->uniq_ML = 1;
/* temporarily set dangles to 2 if necessary */
if ((md->dangles != 0) && (md->dangles != 2))
md->dangles = 2;
struc = (char *)vrna_alloc(sizeof(char) * (length + 1));
if (circular) {
min_en = vrna_mfe(vc, struc);
Fc = vc->matrices->Fc;
f5 = vc->matrices->f5;
/* restore dangle model */
md->dangles = old_dangles;
/* re-evaluate in case we're using logML etc */
min_en = vrna_eval_structure(vc, struc);
} else {
min_en = vrna_mfe_dimer(vc, struc);
f5 = vc->matrices->f5;
/* restore dangle model */
md->dangles = old_dangles;
/* re-evaluate in case we're using logML etc */
min_en = vrna_eval_structure(vc, struc);
}
free(struc);
eprint = print_energy + min_en;
correction = (min_en < 0) ? -0.1 : 0.1;
/* Initialize ------------------------------------------------------------ */
maxlevel = 0;
count = 0;
partial_energy = 0;
/* Initialize the stack ------------------------------------------------- */
minimal_energy = (circular) ? Fc : f5[length];
threshold = minimal_energy + delta;
if (threshold >= INF) {
vrna_message_warning("Energy range too high, limiting to reasonable value");
threshold = INF - EMAX;
}
/* init env data structure */
env = (subopt_env *)vrna_alloc(sizeof(subopt_env));
env->Stack = NULL;
env->nopush = true;
env->Stack = make_list(); /* anchor */
env->Intervals = make_list(); /* initial state: */
interval = make_interval(1, length, 0); /* interval [1,length,0] */
push(env->Intervals, interval);
env->nopush = false;
state = make_state(env->Intervals, NULL, partial_energy, 0, length);
/* state->best_energy = minimal_energy; */
push(env->Stack, state);
env->nopush = false;
/* end initialize ------------------------------------------------------- */
while (1) {
/* forever, til nothing remains on stack */
maxlevel = (env->Stack->count > maxlevel ? env->Stack->count : maxlevel);
if (LST_EMPTY(env->Stack)) {
/* we are done! clean up and quit */
/* fprintf(stderr, "maxlevel: %d\n", maxlevel); */
lst_kill(env->Stack, free_state_node);
cb(NULL, 0, data); /* NULL (last time to call callback function */
break;
}
/* pop the last element ---------------------------------------------- */
state = pop(env->Stack); /* current state to work with */
if (LST_EMPTY(state->Intervals)) {
int e;
/* state has no intervals left: we got a solution */
count++;
structure = get_structure(state);
structure_energy = state->partial_energy / 100.;
#ifdef CHECK_ENERGY
structure_energy = vrna_eval_structure(vc, structure);
if (!logML)
if ((double)(state->partial_energy / 100.) != structure_energy) {
vrna_message_error("%s %6.2f %6.2f",
structure,
state->partial_energy / 100.,
structure_energy);
exit(1);
}
#endif
if (logML || (dangle_model == 1) || (dangle_model == 3)) /* recalc energy */
structure_energy = vrna_eval_structure(vc, structure);
e = (int)((structure_energy - min_en) * 10. - correction); /* avoid rounding errors */
if (e > MAXDOS)
e = MAXDOS;
density_of_states[e]++;
if (structure_energy <= eprint) {
char *outstruct = vrna_cut_point_insert(structure, cp);
cb((const char *)outstruct, structure_energy, data);
free(outstruct);
}
free(structure);
} else {
/* get (and remove) next interval of state to analyze */
interval = pop(state->Intervals);
scan_interval(vc, interval->i, interval->j, interval->array_flag, threshold, state, env);
free_interval_node(interval); /* free the current interval */
}
free_state_node(state); /* free the current state */
} /* end of while (1) */
/* cleanup memory */
free(env);
}
PRIVATE void
scan_interval(vrna_fold_compound_t *vc,
int i,
int j,
int array_flag,
int threshold,
STATE *state,
subopt_env *env)
{
/* real backtrack routine */
/* array_flag = 0: trace back in f5-array */
/* array_flag = 1: trace back in fML-array */
/* array_flag = 2: trace back in repeat() */
/* array_flag = 3: trace back in fM1-array */
STATE *new_state, *temp_state;
INTERVAL *new_interval;
vrna_param_t *P;
vrna_md_t *md;
register int k, fi, cij, ij;
register int type;
register int dangle_model;
register int noLP;
int element_energy, best_energy;
int *fc, *f5, *c, *fML, *fM1, *ggg;
int FcH, FcI, FcM, *fM2;
int length, *indx, *rtype, circular, with_gquad, turn, cp;
char *ptype;
short *S1;
unsigned char *hard_constraints, hc_decompose;
vrna_hc_t *hc;
vrna_sc_t *sc;
length = vc->length;
cp = vc->cutpoint;
indx = vc->jindx;
ptype = vc->ptype;
S1 = vc->sequence_encoding;
P = vc->params;
md = &(P->model_details);
rtype = &(md->rtype[0]);
dangle_model = md->dangles;
noLP = md->noLP;
circular = md->circ;
with_gquad = md->gquad;
turn = md->min_loop_size;
fc = vc->matrices->fc;
f5 = vc->matrices->f5;
c = vc->matrices->c;
fML = vc->matrices->fML;
fM1 = vc->matrices->fM1;
ggg = vc->matrices->ggg;
FcH = vc->matrices->FcH;
FcI = vc->matrices->FcI;
FcM = vc->matrices->FcM;
fM2 = vc->matrices->fM2;
hc = vc->hc;
hard_constraints = hc->mx;
sc = vc->sc;
best_energy = best_attainable_energy(vc, state); /* .. on remaining intervals */
env->nopush = true;
if ((i > 1) && (!array_flag))
vrna_message_error("Error while backtracking!");
if (j < i + turn + 1 && ON_SAME_STRAND(i, j, cp)) {
/* minimal structure element */
if (array_flag == 0)
/* do not forget to add f5[j], since it may contain pseudo energies from soft constraining */
state->partial_energy += f5[j];
if (env->nopush) {
push_back(env->Stack, state);
env->nopush = false;
}
return;
}
ij = indx[j] + i;
/* 13131313131313131313131313131313131313131313131313131313131313131313131 */
if (array_flag == 3 || array_flag == 1) {
/* array_flag = 3: interval i,j was generated during */
/* a multiloop decomposition using array fM1 in repeat() */
/* or in this block */
/* array_flag = 1: interval i,j was generated from a */
/* stack, bulge, or internal loop in repeat() */
/* or in this block */
if ((hc->up_ml[j]) &&
(((array_flag == 3) && (fM1[indx[j - 1] + i] != INF)) ||
(fML[indx[j - 1] + i] != INF))) {
if (array_flag == 3)
fi = fM1[indx[j - 1] + i] + P->MLbase;
else
fi = fML[indx[j - 1] + i] + P->MLbase;
if (sc) {
if (sc->energy_up)
fi += sc->energy_up[j][1];
if (sc->f)
fi += sc->f(i, j, i, j - 1, VRNA_DECOMP_ML_ML, sc->data);
}
if ((fi + best_energy <= threshold) && (ON_SAME_STRAND(j - 1, j, cp)))
/* no basepair, nibbling of 3'-end */
fork_state(i, j - 1, state, P->MLbase, array_flag, env);
}
hc_decompose = hard_constraints[length * i + j];
if (hc_decompose & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) {
/* i,j may pair */
cij = c[ij];
if (cij != INF) {
type = vrna_get_ptype(ij, ptype);
switch (dangle_model) {
case 0:
element_energy = E_MLstem(type, -1, -1, P);
break;
default:
element_energy = E_MLstem(type,
(((i > 1) && (ON_SAME_STRAND(i - 1,
i,
cp))) || circular) ? S1[i - 1] : -1,
(((j < length) && (ON_SAME_STRAND(j,
j + 1,
cp))) || circular) ? S1[j + 1] : -1,
P);
break;
}
if (sc) {
if (sc->f)
element_energy += sc->f(i, j, i, j, VRNA_DECOMP_ML_STEM, sc->data);
}
cij += element_energy;
if (cij + best_energy <= threshold)
repeat(vc, i, j, state, element_energy, 0, best_energy, threshold, env);
}
} else if ((with_gquad) && (ggg[ij] != INF)) {
element_energy = E_MLstem(0, -1, -1, P);
cij = ggg[ij] + element_energy;
if (cij + best_energy <= threshold)
repeat_gquad(vc, i, j, state, element_energy, 0, best_energy, threshold, env);
}
} /* array_flag == 3 || array_flag == 1 */
/* 11111111111111111111111111111111111111111111111111111111111111111111111 */
if (array_flag == 1) {
/* array_flag = 1: interval i,j was generated from a */
/* stack, bulge, or internal loop in repeat() */
/* or in this block */
int stopp, k1j;
if ((ON_SAME_STRAND(i - 1, i, cp)) && (ON_SAME_STRAND(j, j + 1, cp))) {
/*backtrack in FML only if multiloop is possible*/
for (k = i + turn + 1; k <= j - 1 - turn; k++) {
/* Multiloop decomposition if i,j contains more than 1 stack */
if ((with_gquad) &&
(ON_SAME_STRAND(k, k + 1, cp)) &&
(fML[indx[k] + i] != INF) &&
(ggg[indx[j] + k + 1] != INF)) {
element_energy = E_MLstem(0, -1, -1, P);
if (fML[indx[k] + i] + ggg[indx[j] + k + 1] + element_energy + best_energy <= threshold) {
temp_state = derive_new_state(i, k, state, 0, array_flag);
env->nopush = false;
repeat_gquad(vc,
k + 1,
j,
temp_state,
element_energy,
fML[indx[k] + i],
best_energy,
threshold,
env);
free_state_node(temp_state);
}
}
k1j = indx[j] + k + 1;
if ((hard_constraints[length * j + k + 1] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) &&
(fML[indx[k] + i] != INF) &&
(c[k1j] != INF)) {
short s5, s3;
type = vrna_get_ptype(k1j, ptype);
switch (dangle_model) {
case 0:
s5 = s3 = -1;
break;
default:
s5 = (ON_SAME_STRAND(i - 1, i, cp)) ? S1[k] : -1;
s3 = (ON_SAME_STRAND(j, j + 1, cp)) ? S1[j + 1] : -1;
break;
}
element_energy = E_MLstem(type, s5, s3, P);
if (sc) {
if (sc->f)
element_energy += sc->f(i, j, k, k + 1, VRNA_DECOMP_ML_ML_STEM, sc->data);
}
if (ON_SAME_STRAND(k, k + 1, cp)) {
if (fML[indx[k] + i] + c[k1j] + element_energy + best_energy <= threshold) {
temp_state = derive_new_state(i, k, state, 0, array_flag);
env->nopush = false;
repeat(vc,
k + 1,
j,
temp_state,
element_energy,
fML[indx[k] + i],
best_energy,
threshold,
env);
free_state_node(temp_state);
}
}
}
}
}
stopp = (cp > 0) ? (cp - 2) : (length); /*if cp -1: k on cut, => no ml*/
stopp = MIN2(stopp, j - 1 - turn);
if (i > cp)
stopp = j - 1 - turn;
else if (i == cp)
stopp = 0; /*not a multi loop*/
int up = 1;
for (k = i; k <= stopp; k++, up++) {
if (hc->up_ml[i] >= up) {
k1j = indx[j] + k + 1;
/* Multiloop decomposition if i,j contains only 1 stack */
if ((with_gquad) && (ggg[k1j] != INF)) {
element_energy = E_MLstem(0, -1, -1, P) + P->MLbase * up;
if (sc)
if (sc->energy_up)
element_energy += sc->energy_up[i][up];
if (ggg[k1j] + element_energy + best_energy <= threshold)
repeat_gquad(vc, k + 1, j, state, element_energy, 0, best_energy, threshold, env);
}
if ((hard_constraints[length * j + k + 1] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) &&
(c[k1j] != INF)) {
int s5, s3;
type = vrna_get_ptype(k1j, ptype);
switch (dangle_model) {
case 0:
s5 = s3 = -1;
break;
default:
s5 = (ON_SAME_STRAND(k - 1, k, cp)) ? S1[k] : -1;
s3 = (ON_SAME_STRAND(j, j + 1, cp)) ? S1[j + 1] : -1;
break;
}
element_energy = E_MLstem(type, s5, s3, P);
element_energy += P->MLbase * up;
if (sc) {
if (sc->energy_up)
element_energy += sc->energy_up[i][up];
if (sc->f)
element_energy += sc->f(i, j, k + 1, j, VRNA_DECOMP_ML_STEM, sc->data);
}
if (c[k1j] + element_energy + best_energy <= threshold)
repeat(vc, k + 1, j, state, element_energy, 0, best_energy, threshold, env);
}
}
}
} /* array_flag == 1 */
/* 22222222222222222222222222222222222222222222222222 */
/* */
/* array_flag = 2: interval i,j was generated from a */
/* stack, bulge, or internal loop in repeat() */
/* */
/* 22222222222222222222222222222222222222222222222222 */
if (array_flag == 2) {
repeat(vc, i, j, state, 0, 0, best_energy, threshold, env);
if (env->nopush)
if (!noLP)
vrna_message_warning("%d,%d\nOops, no solution in repeat!", i, j);
return;
}
/* 00000000000000000000000000000000000000000000000000 */
/* */
/* array_flag = 0: interval i,j was found while */
/* tracing back through f5-array and c-array */
/* or within this block */
/* */
/* 00000000000000000000000000000000000000000000000000 */
if ((array_flag == 0) && !circular) {
int s5, s3, kj, tmp_en;
if ((hc->up_ext[j]) &&
(f5[j - 1] != INF)) {
tmp_en = 0;
if (sc) {
if (sc->energy_up)
tmp_en += sc->energy_up[j][1];
if (sc->f)
tmp_en += sc->f(1, j, 1, j - 1, VRNA_DECOMP_EXT_EXT, sc->data);
}
if (f5[j - 1] + tmp_en + best_energy <= threshold)
/* no basepair, nibbling of 3'-end */
fork_state(i, j - 1, state, tmp_en, 0, env);
}
for (k = j - turn - 1; k > 1; k--) {
kj = indx[j] + k;
if ((with_gquad) &&
(ON_SAME_STRAND(k, j, cp)) &&
(f5[k - 1] != INF) &&
(ggg[kj] != INF)) {
element_energy = 0;
if (f5[k - 1] + ggg[kj] + element_energy + best_energy <= threshold) {
temp_state = derive_new_state(1, k - 1, state, 0, 0);
env->nopush = false;
/* backtrace the quadruplex */
repeat_gquad(vc,
k,
j,
temp_state,
element_energy,
f5[k - 1],
best_energy,
threshold,
env);
free_state_node(temp_state);
}
}
if ((hard_constraints[length * j + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) &&
(f5[k - 1] != INF) &&
(c[kj] != INF)) {
type = vrna_get_ptype(kj, ptype);
/* k and j pair */
switch (dangle_model) {
case 0:
s5 = s3 = -1;
break;
default:
s5 = (ON_SAME_STRAND(k - 1, k, cp)) ? S1[k - 1] : -1;
s3 = ((j < length) && (ON_SAME_STRAND(j, j + 1, cp))) ? S1[j + 1] : -1;
break;
}
element_energy = E_ExtLoop(type, s5, s3, P);
if (!(ON_SAME_STRAND(k, j, cp))) /*&&(state->is_duplex==0))*/
element_energy += P->DuplexInit;
/*state->is_duplex=1;*/
if (sc) {
if (sc->f)
element_energy += sc->f(1, j, k - 1, k, VRNA_DECOMP_EXT_EXT_STEM, sc->data);
}
if (f5[k - 1] + c[kj] + element_energy + best_energy <= threshold) {
temp_state = derive_new_state(1, k - 1, state, 0, 0);
env->nopush = false;
repeat(vc, k, j, temp_state, element_energy, f5[k - 1], best_energy, threshold, env);
free_state_node(temp_state);
}
}
}
kj = indx[j] + 1;
if ((with_gquad) &&
(ON_SAME_STRAND(k, j, cp)) &&
(ggg[kj] != INF)) {
element_energy = 0;
if (ggg[kj] + element_energy + best_energy <= threshold)
/* backtrace the quadruplex */
repeat_gquad(vc, 1, j, state, element_energy, 0, best_energy, threshold, env);
}
if ((hard_constraints[length + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) &&
(c[kj] != INF)) {
type = vrna_get_ptype(kj, ptype);
s5 = -1;
switch (dangle_model) {
case 0:
s3 = -1;
break;
default:
s3 = (j < length) && (ON_SAME_STRAND(j, j + 1, cp)) ? S1[j + 1] : -1;
break;
}
element_energy = E_ExtLoop(type, s5, s3, P);
if (!(ON_SAME_STRAND(1, j, cp)))
element_energy += P->DuplexInit;
if (sc) {
if (sc->f)
element_energy += sc->f(1, j, 1, j, VRNA_DECOMP_EXT_STEM, sc->data);
}
if (c[kj] + element_energy + best_energy <= threshold)
repeat(vc, 1, j, state, element_energy, 0, best_energy, threshold, env);
}
} /* end array_flag == 0 && !circular*/
/* or do we subopt circular? */
else if (array_flag == 0) {
int k, l, p, q, tmp_en;
/* if we've done everything right, we will never reach this case more than once */
/* right after the initilization of the stack with ([1,n], empty, 0) */
/* lets check, if we can have an open chain without breaking the threshold */
/* this is an ugly work-arround cause in case of an open chain we do not have to */
/* backtrack anything further... */
if (hc->up_ext[1] >= length) {
tmp_en = 0;
if (sc) {
if (sc->energy_up)
tmp_en += sc->energy_up[1][length];
if (sc->f)
tmp_en += sc->f(1, j, 1, j, VRNA_DECOMP_EXT_UP, sc->data);
}
if (tmp_en <= threshold) {
new_state = derive_new_state(1, 2, state, 0, 0);
new_state->partial_energy = 0;
push(env->Stack, new_state);
env->nopush = false;
}
}
/* ok, lets check if we can do an exterior hairpin without breaking the threshold */
/* best energy should be 0 if we are here */
if (FcH + best_energy <= threshold) {
/* lets search for all exterior hairpin cases, that fit into our threshold barrier */
/* we use index k,l to avoid confusion with i,j index of our state... */
/* if we reach here, i should be 1 and j should be n respectively */
for (k = i; k < j; k++) {
if (hc->up_hp[1] < k)
break;
for (l = j; l >= k + turn + 1; l--) {
int kl, tmpE;
kl = indx[l] + k;
if (c[kl] != INF) {
tmpE = vrna_E_hp_loop(vc, l, k);
if (c[kl] + tmpE + best_energy <= threshold) {
/* what we really have to do is something like this, isn't it? */
/* we have to create a new state, with interval [k,l], then we */
/* add our loop energy as initial energy of this state and put */
/* the state onto the stack R... for further refinement... */
/* we also denote this new interval to be scanned in C */
fork_state(k, l, state, tmpE, 2, env);
}
}
}
}
}
/* now lets see, if we can do an exterior interior loop without breaking the threshold */
if (FcI + best_energy <= threshold) {
/* now we search for our exterior interior loop possibilities */
for (k = i; k < j; k++) {
for (l = j; l >= k + turn + 1; l--) {
int kl, type, tmpE;
kl = indx[l] + k; /* just confusing these indices ;-) */
if ((hard_constraints[length * k + l] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) &&
(c[kl] != INF)) {
type = rtype[vrna_get_ptype(kl, ptype)];
for (p = l + 1; p < j; p++) {
int u1, qmin;
u1 = p - l - 1;
if (u1 + k - 1 > MAXLOOP)
break;
if (hc->up_int[l + 1] < u1)
break;
qmin = u1 + k - 1 + j - MAXLOOP;
if (qmin < p + turn + 1)
qmin = p + turn + 1;
for (q = j; q >= qmin; q--) {
int u2, type_2;
if (hc->up_int[q + 1] < (j - q + k - 1))
break;
if ((hard_constraints[length * p + q] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) &&
(c[indx[q] + p] != INF)) {
type_2 = rtype[vrna_get_ptype(indx[q] + p, ptype)];
u2 = k - 1 + j - q;
if (u1 + u2 > MAXLOOP)
continue;
tmpE = E_IntLoop(u1,
u2,
type,
type_2,
S1[l + 1],
S1[k - 1],
S1[p - 1],
S1[q + 1],
P);
if (sc) {
if (sc->energy_up)
tmpE += sc->energy_up[l + 1][p - l - 1]
+ sc->energy_up[q + 1][j - q]
+ sc->energy_up[1][k - 1];
if (sc->energy_stack) {
if (u1 + u2 == 0) {
tmpE += sc->energy_stack[k]
+ sc->energy_stack[l]
+ sc->energy_stack[p]
+ sc->energy_stack[q];
}
}
}
if (c[kl] + c[indx[q] + p] + tmpE + best_energy <= threshold) {
/* ok, similar to the hairpin stuff, we add new states onto the stack R */
/* but in contrast to the hairpin decomposition, we have to add two new */
/* intervals, enclosed by k,l and p,q respectively and we also have to */
/* add the partial energy, that comes from the exterior interior loop */
fork_two_states(k, l, p, q, state, tmpE, 2, 2, env);
}
}
}
}
}
}
}
}
/* and last but not least, we have a look, if we can do an exterior multiloop within the energy threshold */
if (FcM <= threshold) {
/* this decomposition will be somehow more complicated...so lets see what we do here... */
/* first we want to find out which split inidices we can use without exceeding the threshold */
int tmpE2;
for (k = turn + 1; k < j - 2 * turn; k++) {
if ((fML[indx[k] + 1] != INF) &&
(fM2[k + 1] != INF)) {
tmpE2 = fML[indx[k] + 1] + fM2[k + 1] + P->MLclosing;
if (tmpE2 + best_energy <= threshold) {
/* grmpfh, we have found a possible split index k so we have to split fM2 and fML now */
/* lets do it first in fM2 anyway */
for (l = k + turn + 2; l < j - turn - 1; l++) {
tmpE2 = fM1[indx[l] + k + 1] + fM1[indx[j] + l + 1];
if (tmpE2 + fML[indx[k] + 1] + P->MLclosing <= threshold) {
/* we've (hopefully) found a valid decomposition of fM2 and therefor we have all */
/* three intervals for our new state to be pushed on stack R */
new_state = copy_state(state);
/* first interval leads for search in fML array */
new_interval = make_interval(1, k, 1);
push(new_state->Intervals, new_interval);
env->nopush = false;
/* next, we have the first interval that has to be traced in fM1 */
new_interval = make_interval(k + 1, l, 3);
push(new_state->Intervals, new_interval);
env->nopush = false;
/* and the last of our three intervals is also one to be traced within fM1 array... */
new_interval = make_interval(l + 1, j, 3);
push(new_state->Intervals, new_interval);
env->nopush = false;
/* mmh, we add the energy for closing the multiloop now... */
new_state->partial_energy += P->MLclosing;
/* next we push our state onto the R stack */
push(env->Stack, new_state);
env->nopush = false;
}
/* else we search further... */
}
/* ok, we have to decompose fML now... */
}
}
}
}
} /* thats all folks for the circular case... */
/* 44444444444444444444444444444444444444444444444444 */
/* */
/* array_flag = 4: interval i,j was found while */
/* tracing back through fc-array smaller than than cp */
/* or within this block */
/* */
/* 44444444444444444444444444444444444444444444444444 */
if (array_flag == 4) {
int ik, s5, s3, tmp_en;
if ((hc->up_ext[i]) &&
(fc[i + 1] != INF)) {
tmp_en = 0;
if (sc) {
if (sc->energy_up)
tmp_en += sc->energy_up[i][1];
if (sc->f)
tmp_en += sc->f(i, j, i + 1, j, VRNA_DECOMP_EXT_EXT, sc->data);
}
if (fc[i + 1] + tmp_en + best_energy <= threshold)
/* no basepair, nibbling of 5'-end */
fork_state(i + 1, j, state, tmp_en, 4, env);
}
for (k = i + TURN + 1; k < j; k++) {
ik = indx[k] + i;
if ((with_gquad) &&
(fc[k + 1] != INF) &&
(ggg[ik] != INF)) {
if (fc[k + 1] + ggg[ik] + best_energy <= threshold) {
temp_state = derive_new_state(k + 1, j, state, 0, 4);
env->nopush = false;
repeat_gquad(vc, i, k, temp_state, 0, fc[k + 1], best_energy, threshold, env);
free_state_node(temp_state);
}
}
if ((hard_constraints[length * i + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) &&
(fc[k + 1] != INF) &&
(c[ik] != INF)) {
type = vrna_get_ptype(ik, ptype);
switch (dangle_model) {
case 0:
s5 = s3 = -1;
break;
default:
s5 = (i > 1) ? S1[i - 1] : -1;
s3 = S1[k + 1];
break;
}
element_energy = E_ExtLoop(type, s5, s3, P);
if (sc) {
if (sc->f)
element_energy += sc->f(i, j, k, k + 1, VRNA_DECOMP_EXT_STEM_EXT, sc->data);
}
if (fc[k + 1] + c[ik] + element_energy + best_energy <= threshold) {
temp_state = derive_new_state(k + 1, j, state, 0, 4);
env->nopush = false;
repeat(vc, i, k, temp_state, element_energy, fc[k + 1], best_energy, threshold, env);
free_state_node(temp_state);
}
}
}
ik = indx[cp - 1] + i; /* indx[j] + i; */
if ((with_gquad) && (ggg[ik] != INF))
if (ggg[ik] + best_energy <= threshold)
repeat_gquad(vc, i, cp - 1, state, 0, 0, best_energy, threshold, env);
if ((hard_constraints[length * i + cp - 1] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) &&
(c[ik] != INF)) {
type = vrna_get_ptype(ik, ptype);
s3 = -1;
switch (dangle_model) {
case 0:
s5 = -1;
break;
default:
s5 = (i > 1) ? S1[i - 1] : -1;
break;
}
element_energy = E_ExtLoop(type, s5, s3, P);
if (sc) {
if (sc->f)
element_energy += sc->f(i, cp - 1, i, cp - 1, VRNA_DECOMP_EXT_STEM, sc->data);
}
if (c[ik] + element_energy + best_energy <= threshold)
repeat(vc, i, cp - 1, state, element_energy, 0, best_energy, threshold, env);
}
} /* array_flag == 4 */
/* 55555555555555555555555555555555555555555555555555 */
/* */
/* array_flag = 5: interval cp=i,j was found while */
/* tracing back through fc-array greater than cp */
/* or within this block */
/* */
/* 55555555555555555555555555555555555555555555555555 */
if (array_flag == 5) {
int kj, s5, s3, tmp_en;
if ((hc->up_ext[j]) &&
(fc[j - 1] != INF)) {
tmp_en = 0;
if (sc) {
if (sc->energy_up)
tmp_en += sc->energy_up[j][1];
if (sc->f)
tmp_en += sc->f(i, j, i, j - 1, VRNA_DECOMP_EXT_EXT, sc->data);
}
if (fc[j - 1] + tmp_en + best_energy <= threshold)
/* no basepair, nibbling of 3'-end */
fork_state(i, j - 1, state, tmp_en, 5, env);
}
for (k = j - TURN - 1; k > i; k--) {
kj = indx[j] + k;
if ((with_gquad) &&
(fc[k - 1] != INF) &&
(ggg[kj] != INF)) {
if (fc[k - 1] + ggg[kj] + best_energy <= threshold) {
temp_state = derive_new_state(i, k - 1, state, 0, 5);
env->nopush = false;
repeat_gquad(vc, k, j, temp_state, 0, fc[k - 1], best_energy, threshold, env);
free_state_node(temp_state);
}
}
if ((hard_constraints[length * j + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) &&
(fc[k - 1] != INF) &&
(c[kj] != INF)) {
type = vrna_get_ptype(kj, ptype);
element_energy = 0;
switch (dangle_model) {
case 0:
s3 = s5 = -1;
break;
default:
s5 = S1[k - 1];
s3 = (j < length) ? S1[j + 1] : -1;
break;
}
element_energy = E_ExtLoop(type, s5, s3, P);
if (sc) {
if (sc->f)
element_energy += sc->f(i, j, k - 1, k, VRNA_DECOMP_EXT_EXT_STEM, sc->data);
}
if (fc[k - 1] + c[kj] + element_energy + best_energy <= threshold) {
temp_state = derive_new_state(i, k - 1, state, 0, 5);
env->nopush = false;
repeat(vc, k, j, temp_state, element_energy, fc[k - 1], best_energy, threshold, env);
free_state_node(temp_state);
}
}
}
kj = indx[j] + cp; /* indx[j] + i; */
if ((with_gquad) && (ggg[kj] != INF))
if (ggg[kj] + best_energy <= threshold)
repeat_gquad(vc, cp, j, state, 0, 0, best_energy, threshold, env);
if ((hard_constraints[length * cp + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) &&
(c[kj] != INF)) {
type = vrna_get_ptype(kj, ptype);
s5 = -1;
switch (dangle_model) {
case 0:
s3 = -1;
break;
default:
s3 = (j < length) ? S1[j + 1] : -1;
break;
}
element_energy = E_ExtLoop(type, s5, s3, P);
if (sc) {
if (sc->f)
element_energy += sc->f(cp, j, cp, j, VRNA_DECOMP_EXT_STEM, sc->data);
}
if (c[kj] + element_energy + best_energy <= threshold)
repeat(vc, cp, j, state, element_energy, 0, best_energy, threshold, env);
}
} /* array_flag == 5 */
if (array_flag == 6) {
/* we have a gquad */
repeat_gquad(vc, i, j, state, 0, 0, best_energy, threshold, env);
if (env->nopush)
vrna_message_warning("%d,%d\nOops, no solution in gquad-repeat!", i, j);
return;
}
if (env->nopush) {
push_back(env->Stack, state);
env->nopush = false;
}
return;
}
/*---------------------------------------------------------------------------*/
PRIVATE void
repeat_gquad(vrna_fold_compound_t *vc,
int i,
int j,
STATE *state,
int part_energy,
int temp_energy,
int best_energy,
int threshold,
subopt_env *env)
{
int *ggg, *indx, element_energy, cp;
short *S1;
vrna_param_t *P;
indx = vc->jindx;
cp = vc->cutpoint;
ggg = vc->matrices->ggg;
S1 = vc->sequence_encoding;
P = vc->params;
/* find all gquads that fit into the energy range and the interval [i,j] */
STATE *new_state;
best_energy += part_energy; /* energy of current structural element */
best_energy += temp_energy; /* energy from unpushed interval */
if (ON_SAME_STRAND(i, j, cp)) {
element_energy = ggg[indx[j] + i];
if ((element_energy != INF) &&
(element_energy + best_energy <= threshold)) {
int cnt;
int *L;
int *l;
/* find out how many gquads we might expect in the interval [i,j] */
int num_gquads = get_gquad_count(S1, i, j);
num_gquads++;
L = (int *)vrna_alloc(sizeof(int) * num_gquads);
l = (int *)vrna_alloc(sizeof(int) * num_gquads * 3);
L[0] = -1;
get_gquad_pattern_exhaustive(S1, i, j, P, L, l, threshold - best_energy);
for (cnt = 0; L[cnt] != -1; cnt++) {
new_state = copy_state(state);
make_gquad(i, L[cnt], &(l[3 * cnt]), new_state);
new_state->partial_energy += part_energy;
new_state->partial_energy += element_energy;
/* new_state->best_energy =
* hairpin[unpaired] + element_energy + best_energy; */
push(env->Stack, new_state);
env->nopush = false;
}
free(L);
free(l);
}
}
best_energy -= part_energy;
best_energy -= temp_energy;
return;
}
PRIVATE void
repeat(vrna_fold_compound_t *vc,
int i,
int j,
STATE *state,
int part_energy,
int temp_energy,
int best_energy,
int threshold,
subopt_env *env)
{
/* routine to find stacks, bulges, internal loops and multiloops */
/* within interval closed by basepair i,j */
STATE *new_state;
vrna_param_t *P;
vrna_md_t *md;
register int ij, k, p, q, energy, new;
register int mm;
register int no_close, type, type_2;
char *ptype;
unsigned int n;
int element_energy;
int *fc, *c, *fML, *fM1, *ggg;
int rt, *indx, *rtype, noGUclosure, noLP, with_gquad, dangle_model, turn, cp;
short *S1;
vrna_hc_t *hc;
vrna_sc_t *sc;
n = vc->length;
S1 = vc->sequence_encoding;
ptype = vc->ptype;
indx = vc->jindx;
cp = vc->cutpoint;
P = vc->params;
md = &(P->model_details);
rtype = &(md->rtype[0]);
noGUclosure = md->noGUclosure;
noLP = md->noLP;
with_gquad = md->gquad;
dangle_model = md->dangles;
turn = md->min_loop_size;
fc = vc->matrices->fc;
c = vc->matrices->c;
fML = vc->matrices->fML;
fM1 = vc->matrices->fM1;
ggg = vc->matrices->ggg;
hc = vc->hc;
sc = vc->sc;
ij = indx[j] + i;
type = vrna_get_ptype(ij, ptype);
/*
* if (type==0) fprintf(stderr, "repeat: Warning: %d %d can't pair\n", i,j);
*/
no_close = (((type == 3) || (type == 4)) && noGUclosure);
if (hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) {
if (noLP) {
/* always consider the structure with additional stack */
if (i + turn + 2 < j) {
if (hc->mx[n * (i + 1) + j - 1] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC) {
type_2 = rtype[vrna_get_ptype(indx[j - 1] + i + 1, ptype)];
energy = 0;
if (ON_SAME_STRAND(i, i + 1, cp) && ON_SAME_STRAND(j - 1, j, cp)) {
energy = E_IntLoop(0, 0, type, type_2, S1[i + 1], S1[j - 1], S1[i + 1], S1[j - 1], P);
if (sc) {
if (sc->energy_bp)
energy += sc->energy_bp[ij];
if (sc->energy_stack) {
energy += sc->energy_stack[i]
+ sc->energy_stack[i + 1]
+ sc->energy_stack[j - 1]
+ sc->energy_stack[j];
}
if (sc->f)
energy += sc->f(i, j, i + 1, j - 1, VRNA_DECOMP_PAIR_IL, sc->data);
}
new_state = derive_new_state(i + 1, j - 1, state, part_energy + energy, 2);
make_pair(i, j, new_state);
make_pair(i + 1, j - 1, new_state);
/* new_state->best_energy = new + best_energy; */
push(env->Stack, new_state);
env->nopush = false;
if (i == 1 || state->structure[i - 2] != '(' || state->structure[j] != ')')
/* adding a stack is the only possible structure */
return;
}
}
}
}
}
best_energy += part_energy; /* energy of current structural element */
best_energy += temp_energy; /* energy from unpushed interval */
if (hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) {
for (p = i + 1; p <= MIN2(j - 2 - turn, i + MAXLOOP + 1); p++) {
int minq = j - i + p - MAXLOOP - 2;
if (minq < p + 1 + turn)
minq = p + 1 + turn;
if (hc->up_int[i + 1] < (p - i - 1))
break;
for (q = j - 1; q >= minq; q--) {
if (hc->up_int[q + 1] < (j - q - 1))
break;
/* skip stack if noLP, since we've already processed it above */
if ((noLP) && (p == i + 1) && (q == j - 1))
continue;
if (!(hc->mx[n * p + q] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC))
continue;
if (c[indx[q] + p] == INF)
continue;
type_2 = vrna_get_ptype(indx[q] + p, ptype);
if (noGUclosure)
if (no_close || (type_2 == 3) || (type_2 == 4))
if ((p > i + 1) || (q < j - 1))
continue;
/* continue unless stack */
if (ON_SAME_STRAND(i, p, cp) && ON_SAME_STRAND(q, j, cp)) {
energy = E_IntLoop(p - i - 1, j - q - 1, type, rtype[type_2],
S1[i + 1], S1[j - 1], S1[p - 1], S1[q + 1], P);
new = energy + c[indx[q] + p];
if (sc) {
if (sc->energy_up)
energy += sc->energy_up[i + 1][p - i - 1]
+ sc->energy_up[q + 1][j - q - 1];
if (sc->energy_bp)
energy += sc->energy_bp[ij];
if (sc->energy_stack) {
if ((p == i + 1) && (q == j - 1)) {
energy += sc->energy_stack[i]
+ sc->energy_stack[p]
+ sc->energy_stack[q]
+ sc->energy_stack[j];
}
}
if (sc->f)
energy += sc->f(i, j, p, q, VRNA_DECOMP_PAIR_IL, sc->data);
}
new = energy + c[indx[q] + p];
if (new + best_energy <= threshold)
/* stack, bulge, or interior loop */
fork_int_state(i, j, p, q, state, part_energy + energy, env);
} /*end of if block */
} /* end of q-loop */
} /* end of p-loop */
}
if (!ON_SAME_STRAND(i, j, cp)) {
/*look in fc*/
if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) &&
(fc[i + 1] != INF) &&
(fc[j - 1] != INF)) {
rt = rtype[type];
element_energy = 0;
switch (dangle_model) {
case 0:
element_energy = E_ExtLoop(rt, -1, -1, P);
break;
default:
element_energy =
E_ExtLoop(rt,
(ON_SAME_STRAND(j - 1, j, cp)) ?
S1[j - 1] :
-1,
(ON_SAME_STRAND(i, i + 1, cp)) ?
S1[i + 1] :
-1,
P);
break;
}
if (fc[i + 1] + fc[j - 1] + element_energy + best_energy <= threshold)
fork_two_states_pair(i, j, cp, state, part_energy + element_energy, 4, 5, env);
}
}
mm = P->MLclosing;
rt = rtype[type];
if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP) && (i != cp - 1) && (j != cp)) {
element_energy = mm;
switch (dangle_model) {
case 0:
element_energy = E_MLstem(rt, -1, -1, P) + mm;
break;
default:
element_energy = E_MLstem(rt, S1[j - 1], S1[i + 1], P) + mm;
break;
}
if (sc) {
if (sc->energy_bp)
element_energy += sc->energy_bp[ij];
if (sc->f)
element_energy += sc->f(i, j, i + 1, j - 1, VRNA_DECOMP_PAIR_ML, sc->data);
}
/* multiloop decomposition */
if ((sc) && (sc->f)) {
for (k = i + turn + 2; k <= j - turn - 2; k++) {
int eee = fML[indx[k - 1] + i + 1];
if ((eee != INF) && (fM1[indx[j - 1] + k] != INF)) {
eee += fM1[indx[j - 1] + k] +
best_energy;
int aux_eee = element_energy +
sc->f(i + 1, j - 1, k - 1, k, VRNA_DECOMP_ML_ML_ML, sc->data);
if ((eee + aux_eee) <= threshold)
fork_two_states_pair(i, j, k, state, part_energy + aux_eee, 1, 3, env);
}
}
} else {
for (k = i + turn + 2; k <= j - turn - 2; k++) {
int eee = fML[indx[k - 1] + i + 1];
if ((eee != INF) && (fM1[indx[j - 1] + k] != INF)) {
/* multiloop decomposition */
if ((eee + fM1[indx[j - 1] + k] +
element_energy + best_energy) <= threshold)
fork_two_states_pair(i, j, k, state, part_energy + element_energy, 1, 3, env);
}
}
}
}
if (ON_SAME_STRAND(i, j, cp)) {
if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_HP_LOOP) &&
(!no_close)) {
element_energy = vrna_E_hp_loop(vc, i, j);
if (element_energy != INF) {
if (element_energy + best_energy <= threshold)
/* hairpin structure */
fork_state_pair(i, j, state, part_energy + element_energy, env);
}
}
if (with_gquad) {
/* now we have to find all loops where (i,j) encloses a gquad in an interior loops style */
int cnt, *p, *q, *en, tmp_en;
p = q = en = NULL;
en =
E_GQuad_IntLoop_exhaustive(i, j, &p, &q, type, S1, ggg, threshold - best_energy, indx, P);
for (cnt = 0; p[cnt] != -1; cnt++) {
if ((hc->up_int[i + 1] >= p[cnt] - i - 1) && (hc->up_int[q[cnt] + 1] >= j - q[cnt] - 1)) {
tmp_en = en[cnt];
if (sc) {
if (sc->energy_bp)
tmp_en += sc->energy_bp[ij];
if (sc->energy_up)
tmp_en += sc->energy_up[i + 1][p[cnt] - i - 1]
+ sc->energy_up[q[cnt] + 1][j - q[cnt] - 1];
}
new_state = derive_new_state(p[cnt], q[cnt], state, tmp_en + part_energy, 6);
make_pair(i, j, new_state);
/* new_state->best_energy = new + best_energy; */
push(env->Stack, new_state);
env->nopush = false;
}
}
free(en);
free(p);
free(q);
}
}
best_energy -= part_energy;
best_energy -= temp_energy;
return;
}
PRIVATE void
old_subopt_print(const char *structure,
float energy,
void *data)
{
struct old_subopt_dat *d = (struct old_subopt_dat *)data;
if (structure && d->fp) {
char *e_string = vrna_strdup_printf(" %6.2f", energy);
print_structure(d->fp, structure, e_string);
free(e_string);
}
}
PRIVATE void
old_subopt_store(const char *structure,
float energy,
void *data)
{
struct old_subopt_dat *d = (struct old_subopt_dat *)data;
/* store solution */
if (d->n_sol + 1 == d->max_sol) {
d->max_sol *= 2;
d->SolutionList = (SOLUTION *)vrna_realloc(d->SolutionList, d->max_sol * sizeof(SOLUTION));
}
if (structure) {
d->SolutionList[d->n_sol].energy = energy;
d->SolutionList[d->n_sol++].structure = strdup(structure);
} else {
d->SolutionList[d->n_sol].energy = 0;
d->SolutionList[d->n_sol++].structure = NULL;
}
}
/*###########################################*/
/*# deprecated functions below #*/
/*###########################################*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
PUBLIC SOLUTION *
subopt(char *seq,
char *structure,
int delta,
FILE *fp)
{
return wrap_subopt(seq, structure, NULL, delta, fold_constrained, 0, fp);
}
PUBLIC SOLUTION *
subopt_circ(char *seq,
char *structure,
int delta,
FILE *fp)
{
return wrap_subopt(seq, structure, NULL, delta, fold_constrained, 1, fp);
}
PUBLIC SOLUTION *
subopt_par(char *seq,
char *structure,
vrna_param_t *parameters,
int delta,
int is_constrained,
int is_circular,
FILE *fp)
{
return wrap_subopt(seq, structure, parameters, delta, is_constrained, is_circular, fp);
}
PRIVATE SOLUTION *
wrap_subopt(char *string,
char *structure,
vrna_param_t *parameters,
int delta,
int is_constrained,
int is_circular,
FILE *fp)
{
vrna_fold_compound_t *vc;
vrna_param_t *P;
char *seq;
#ifdef _OPENMP
/* Explicitly turn off dynamic threads */
omp_set_dynamic(0);
#endif
/* we need the parameter structure for hard constraints */
if (parameters) {
P = vrna_params_copy(parameters);
} else {
vrna_md_t md;
set_model_details(&md);
md.temperature = temperature;
P = vrna_params(&md);
}
P->model_details.circ = is_circular;
P->model_details.uniq_ML = uniq_ML = 1;
/* what about cofold sequences here? Is it safe to call the below cut_point_insert() ? */
/* dirty hack to reinsert the '&' according to the global variable 'cut_point' */
seq = vrna_cut_point_insert(string, cut_point);
vc =
vrna_fold_compound(seq,
&(P->model_details),
((is_circular == 0) ? VRNA_OPTION_HYBRID : VRNA_OPTION_DEFAULT));
if (parameters) {
/* replace params if necessary */
free(vc->params);
vc->params = P;
} else {
free(P);
}
/* handle hard constraints in pseudo dot-bracket format if passed via simple interface */
if (is_constrained && structure) {
unsigned int constraint_options = 0;
constraint_options |= VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK
| VRNA_CONSTRAINT_DB_INTRAMOL
| VRNA_CONSTRAINT_DB_INTERMOL;
vrna_constraints_add(vc, (const char *)structure, constraint_options);
}
if (backward_compat_compound && backward_compat)
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = vc;
backward_compat = 1;
/* cleanup */
free(seq);
return vrna_subopt(vc, delta, subopt_sorted, fp);
}
#endif
/*---------------------------------------------------------------------------*/
/* Well, that is the end!----------------------------------------------------*/
/*---------------------------------------------------------------------------*/
|
swap.h | /*
* Author: Salvatore Mandra (salvatore.mandra@nasa.gov)
*
* Copyright © 2021, United States Government, as represented by the
* Administrator of the National Aeronautics and Space Administration. All
* rights reserved.
*
* The HybridQ: A Hybrid Simulator for Quantum Circuits platform is licensed
* under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the
* License at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
#ifndef HYBRIDQ__SWAP_H
#define HYBRIDQ__SWAP_H
#include "pack.h"
#include "utils.h"
namespace hybridq::swap {
template <typename Positions, std::size_t size = array_size_v<Positions>>
constexpr inline auto swap(std::size_t x, Positions&& pos) {
std::size_t y{0};
for (std::size_t i = 0; i < size; ++i) y ^= ((x >> i) & 1uL) << pos[i];
return y;
}
template <typename pack_type, typename Positions, std::size_t... I>
constexpr inline void _swap(pack_type&& pack, Positions&& pos,
std::index_sequence<I...>) {
pack = remove_pcvr_t<pack_type>{pack[swap(I, pos)]...};
}
template <typename pack_type, typename Positions,
std::size_t size = pack_size_v<pack_type>>
constexpr inline void swap(pack_type&& pack, Positions&& pos) {
_swap(pack, pos, std::make_index_sequence<size>{});
}
template <typename float_type, typename Positions,
std::size_t swap_size = array_size_v<Positions>>
int swap_array(float_type* array, Positions&& pos, const std::size_t size) {
// Reinterpret
auto* _array = reinterpret_cast<
typename hybridq::__pack__<float_type, 1uL << swap_size>::value_type*>(
array);
#pragma omp parallel for
for (std::size_t i = 0; i < (size >> swap_size); ++i) swap(_array[i], pos);
return 0;
}
template <typename float_type, typename index_type>
int swap_array(float_type* array, index_type* pos, const std::size_t size,
const std::size_t n_pos) {
// Get U_Size
const std::size_t swap_size = 1uL << n_pos;
// Compute offset
std::size_t offset[n_pos];
for (std::size_t i = 0; i < n_pos; ++i) {
offset[i] = 0;
for (std::size_t j = i + 1; j < n_pos; ++j) offset[i] += (pos[j] < pos[i]);
}
// Allocate buffers
float_type _array[swap_size];
std::size_t _swap_pos[swap_size];
for (std::size_t x = 0; x < swap_size; ++x) {
std::size_t y{0};
for (std::size_t i = 0; i < n_pos; ++i) y ^= ((x >> i) & 1uL) << pos[i];
_swap_pos[x] = y;
}
#pragma omp parallel for private(_array)
for (std::size_t i = 0; i<size>> n_pos; ++i) {
// Load buffer
for (std::size_t j = 0; j < swap_size; ++j)
_array[j] = array[_swap_pos[j] ^ (i << n_pos)];
// Dump buffer
for (std::size_t j = 0; j < swap_size; ++j)
array[j ^ (i << n_pos)] = _array[j];
}
return 0;
}
} // namespace hybridq::swap
#endif
|
app_baseline.c | /*
* JGL@SAFARI
*/
/**
* @file app.c
* @brief Template for a Host Application Source File.
*
* The macros DPU_BINARY and NR_TASKLETS are directly
* used in the static functions, and are not passed as arguments of these functions.
*/
#include <assert.h>
#include <getopt.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <omp.h>
#include "../../support/common.h"
#include "../../support/timer.h"
// Pointer declaration
static T *A;
static unsigned int *histo_host;
typedef struct Params {
unsigned int input_size;
unsigned int bins;
int n_warmup;
int n_reps;
const char *file_name;
int exp;
int n_threads;
} Params;
/**
* @brief creates input arrays
* @param nr_elements how many elements in input arrays
*/
static void read_input(T *A, const Params p) {
char dctFileName[100];
FILE *File = NULL;
// Open input file
unsigned short temp;
sprintf(dctFileName, p.file_name);
if ((File = fopen(dctFileName, "rb")) != NULL) {
for (unsigned int y = 0; y < p.input_size; y++) {
fread(&temp, sizeof(unsigned short), 1, File);
A[y] = (unsigned int)ByteSwap16(temp);
if (A[y] >= 4096)
A[y] = 4095;
}
fclose(File);
} else {
printf("%s does not exist\n", dctFileName);
exit(1);
}
}
/**
* @brief compute output in the host
*/
static void histogram_host(unsigned int *histo, T *A, unsigned int bins, unsigned int nr_elements, int exp, unsigned int nr_of_dpus, int t) {
omp_set_num_threads(t);
if (!exp) {
#pragma omp parallel for
for (unsigned int i = 0; i < nr_of_dpus; i++) {
for (unsigned int j = 0; j < nr_elements; j++) {
T d = A[j];
histo[i * bins + ((d * bins) >> DEPTH)] += 1;
}
}
} else {
#pragma omp parallel for
for (unsigned int j = 0; j < nr_elements; j++) {
T d = A[j];
#pragma omp atomic update
histo[(d * bins) >> DEPTH] += 1;
}
}
}
// Params ---------------------------------------------------------------------
void usage() {
fprintf(stderr,
"\nUsage: ./program [options]"
"\n"
"\nGeneral options:"
"\n -h help"
"\n -w <W> # of untimed warmup iterations (default=1)"
"\n -e <E> # of timed repetition iterations (default=3)"
"\n -t <T> # of threads (default=8)"
"\n -x <X> Weak (0) or strong (1) scaling (default=0)"
"\n"
"\nBenchmark-specific options:"
"\n -i <I> input size (default=1536*1024 elements)"
"\n -b <B> histogram size (default=256 bins)"
"\n -f <F> input image file (default=../input/image_VanHateren.iml)"
"\n");
}
struct Params input_params(int argc, char **argv) {
struct Params p;
p.input_size = 1536 * 1024;
p.bins = 256;
p.n_warmup = 1;
p.n_reps = 3;
p.n_threads = 8;
p.exp = 1;
p.file_name = "../../input/image_VanHateren.iml";
int opt;
while ((opt = getopt(argc, argv, "hi:b:w:e:f:x:t:")) >= 0) {
switch (opt) {
case 'h':
usage();
exit(0);
break;
case 'i':
p.input_size = atoi(optarg);
break;
case 'b':
p.bins = atoi(optarg);
break;
case 'w':
p.n_warmup = atoi(optarg);
break;
case 'e':
p.n_reps = atoi(optarg);
break;
case 'f':
p.file_name = optarg;
break;
case 'x':
p.exp = atoi(optarg);
break;
case 't':
p.n_threads = atoi(optarg);
break;
default:
fprintf(stderr, "\nUnrecognized option!\n");
usage();
exit(0);
}
}
assert(p.n_threads > 0 && "Invalid # of ranks!");
return p;
}
/**
* @brief Main of the Host Application.
*/
int main(int argc, char **argv) {
struct Params p = input_params(argc, argv);
uint32_t nr_of_dpus;
const unsigned int input_size = p.input_size; // Size of input image
if (!p.exp)
assert(input_size % p.n_threads == 0 && "Input size!");
else
assert(input_size % p.n_threads == 0 && "Input size!");
// Input/output allocation
A = malloc(input_size * sizeof(T));
T *bufferA = A;
if (!p.exp)
histo_host = malloc(nr_of_dpus * p.bins * sizeof(unsigned int));
else
histo_host = malloc(p.bins * sizeof(unsigned int));
// Create an input file with arbitrary data.
read_input(A, p);
Timer timer;
start(&timer, 0, 0);
if (!p.exp)
memset(histo_host, 0, nr_of_dpus * p.bins * sizeof(unsigned int));
else
memset(histo_host, 0, p.bins * sizeof(unsigned int));
histogram_host(histo_host, A, p.bins, input_size, p.exp, nr_of_dpus, p.n_threads);
stop(&timer, 0);
printf("Kernel ");
print(&timer, 0, 1);
printf("\n");
return 0;
}
|
SudokuValidator.c | /*
Andres Emilio Quinto Villagran
18288
para compilar el codigo, compilarlo con:
gcc SudokuValidator.c -o sudoku -fopenmp -lrt
Para ejecutarlo con sudoku de prueba:
./sudoku "sudoku.21"
./sudoku <direccion>
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <pthread.h>
int i = 0;
int j = 0;
int boardSize = 9;
int sudokuBoard[9][9];
#define handle_error(msg) \
do { perror(msg); exit(EXIT_FAILURE); } while (0)
void *checkColumns(){
omp_set_nested(1);
omp_set_num_threads(9);
int columnContainsDigits = 0;
pid_t tid;
// Revisamos columnas individuales
#pragma omp parallel for private(tid) schedule(dynamic)
for (i = 0; i<boardSize; i++){
tid = syscall(SYS_gettid);
printf("En las columnas revisadas, este es el siguiente es un thread en ejecucion: %d\n", tid);
// Por cada iteracion creamos una lista vacia para almacenar
// los elementos de la columna.
int colNums[9] = {0};
// Revisamos cada fila de la columna (cada elemento)
#pragma omp parallel for schedule(dynamic)
for (j = 0; j<boardSize; j++){
int num = sudokuBoard[j][i];
if (num < 1 || num > 9 || colNums[num - 1] == 1){
//despliegue de error o salir de thread
printf("Error en numero leido en columna");
columnContainsDigits = 0;
pthread_exit(NULL);
}else{
colNums[num - 1] = 1;
columnContainsDigits = 1;
}
}
}
return (void*) columnContainsDigits;
//pthread_exit(NULL);
}
void checkRows(){
omp_set_nested(1);
omp_set_num_threads(9);
int rowContainsDigits = 0;
// Revisamos columnas individuales
#pragma omp parallel for schedule(dynamic)
for (i = 0; i<boardSize; i++){
// Por cada iteracion creamos una lista vacia para almacenar
// los elementos de la columna.
int rowNums[9] = {0};
// Revisamos cada fila de la columna (cada elemento)
#pragma omp parallel for schedule(dynamic)
for (j = 0; j<boardSize; j++){
int num = sudokuBoard[i][j];
if (num < 1 || num > 9 || rowNums[num - 1] == 1){
//despliegue de error o salir de thread
printf("Error en numero leido en fila");
rowContainsDigits = 0;
pthread_exit(NULL);
}else{
rowNums[num - 1] = 1;
rowContainsDigits = 1;
}
}
}
return (void*) rowContainsDigits;
//pthread_exit(NULL);
}
int checkSubgrid(int gridRowStart, int gridColStart){
int subgridContainsDigits = 0;
// Mismo principio, iteramos por la subgrid de 3x3
for (i = 0; i < (gridRowStart+3); i++){
int subgridNums[9] = {0};
for (j = 0; j < (gridColStart+3); j++){
int num = sudokuBoard[i][j];
//printf("num es %d\n", num);
if (num < 1 || num > 9 || subgridNums[num - 1] == 1){
subgridContainsDigits = 0;
break;
}else{
subgridNums[num - 1] = 1;
subgridContainsDigits = 1;
}
}
}
return subgridContainsDigits;
}
int main(int argc, char *argv[]){
omp_set_nested(1);
omp_set_num_threads(1);
char *addr;
int fd, fo, size;
struct stat sb;
size_t length;
ssize_t s;
int j = 0, k = 0;
fd = open(argv[1], O_RDONLY);
// Verificar si hubo algun error o no
if (fd == -1)
handle_error("open");
// Obtenemos el tamano del archivo
if (fstat(fd, &sb) == -1)
handle_error("fstat");
size = sb.st_size;
addr = mmap(0, size, PROT_READ, MAP_PRIVATE, fd, 0);
if (addr == MAP_FAILED)
handle_error("mmap");
int counter = 0;
for (j = 0; j < boardSize; j++){
for(k = 0; k < boardSize; k++){
sudokuBoard[j][k] = addr[counter] - '0';
counter += 1;
}
}
int sg = 0, col_res = 0, row_res = 0;
for (j = 0; j < boardSize; j+=3){
for(k = 0; k < boardSize; k+=3){
sg = checkSubgrid(j, k);
if (sg != 1)
break;
}
}
// Conseguimos el no de este proceso (no el del thread)
pid_t parent_pidt = getpid();
char pid_ref[10];
sprintf(pid_ref, "%d", parent_pidt);
printf("El thread en el que se ejecuta main es: %s\n", pid_ref);
pid_t ft = fork();
// Proceso hijo empieza
if (ft == 0) {
pid_t pf = getppid();
// hijo
execlp("ps", "ps", "-p", pid_ref, "-lLf", NULL);
}
// Proceso padre empieza
else {
//int col_res, row_res;
void *valid_cols, *valid_rows;
pid_t thread_id;
// padre
pthread_t thread_id_c;
pthread_create(&thread_id_c, NULL, checkColumns, NULL);
pthread_join(thread_id_c, &valid_cols);
col_res = (int *)valid_cols;
thread_id = syscall(SYS_gettid);
printf("El thread que ejecuta revision de columnas es: %d\n", thread_id);
wait(0);
pthread_t thread_id_r;
pthread_create(&thread_id_r, NULL, checkRows, NULL);
pthread_join(thread_id_r, &valid_rows);
row_res = (int *)valid_rows;
// Desplegar si la solucion del sudoku es valida
if (sg == 1 && col_res == 1 && row_res == 1){
printf("La solucion al sudoku SI es valida\n");
}else {
printf("La solucion al sudoku NO es valida\n");
}
pid_t fp = fork();
if (fp == 0){
pid_t pt = getppid();
char pid_ref_t[10];
sprintf(pid_ref_t, "%d", pt);
execlp("ps", "ps", "-p", pid_ref_t, "-lLf", NULL);
}
wait(0);
return 0;
}
munmap(addr, 0);
close(fd);
return 0;
}
|
convolution_5x5_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv5x5s1_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator);
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob_fp32.channel(get_omp_thread_num());
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
int q = 0;
for (; q < inch - 1; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* r3 = img0.row<const unsigned short>(3);
const unsigned short* r4 = img0.row<const unsigned short>(4);
const unsigned short* kptr = kernel.channel(p).row<const unsigned short>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" // r00 r01 r02 r03
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%1] \n" // r04 r05 r06 r07
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r10 r11 r12 r13
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v1.s[2] \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"fmla v22.4s, v27.4s, v2.s[3] \n"
"fmla v23.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2] \n" // r14 r15 r16 r17
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"fmla v23.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v3.s[3] \n"
"fmla v23.4s, v19.4s, v4.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v5.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r20 r21 r22 r23
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3] \n" // r24 r25 r26 r27
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n" // r30 r31 r32 r33
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v1.s[2] \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"fmla v22.4s, v27.4s, v2.s[3] \n"
"fmla v23.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4] \n" // r34 r35 r36 r37
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"fmla v23.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v3.s[3] \n"
"fmla v23.4s, v19.4s, v4.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v5.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" // r40 r41 r42 r43
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%5] \n" // r44 r45 r46 r47
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
// "prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n" // sum0 sum1 sum2 sum3
"pld [%1, #256] \n"
"vld1.u16 {d4-d7}, [%1 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%1, #256] \n"
"vld1.u16 {d12-d15}, [%1 :64] \n" // r04 r05 r06 r07
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d6[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"vmla.f32 q14, q9, d7[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d8[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d2[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d6[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d3[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d7[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d3[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d7[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%2, #256] \n"
"vld1.u16 {d12-d15}, [%2 :64] \n" // r14 r15 r16 r17
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d4[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vmla.f32 q12, q8, d5[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d11[0] \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d12[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d12[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3 :64] \n" // r24 r25 r26 r27
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d6[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"vmla.f32 q14, q9, d7[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d8[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r30 r31 r32 r33
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d2[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d6[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d3[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d7[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d3[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d7[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64] \n" // r34 r35 r36 r37
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d4[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vmla.f32 q12, q8, d5[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d11[0] \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r40 r41 r42 r43
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d12[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d12[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d12-d15}, [%5 :64] \n" // r44 r45 r46 r47
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d6[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"vmla.f32 q14, q9, d7[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d8[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
// "pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :64] \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4h, v1.4h}, [%1], #16 \n" // r00 r01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v20.4s, v21.4s}, [%0] \n" // sum0 sum1
"fmul v22.4s, v16.4s, v0.s[0] \n"
"fmul v23.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%1] \n" // r02 r03 r04 r05
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n" // r10 r11
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%2] \n" // r12 r13 r14 r15
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n" // r20 r21
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%3] \n" // r22 r23 r24 r25
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4h, v1.4h}, [%4], #16 \n" // r30 r31
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%4] \n" // r32 r33 r34 r35
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4h, v1.4h}, [%5], #16 \n" // r40 r41
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%5] \n" // r42 r43 r44 r45
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
// "prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"st1 {v20.4s, v21.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%1, #128] \n"
"vld1.u16 {d2-d3}, [%1 :64]! \n" // r00 r01
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"pld [%1, #256] \n"
"vld1.u16 {d8-d11}, [%1 :64] \n" // r02 r03 r04 r05
"vshll.u16 q8, d20, #16 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128] \n" // sum0 sum1
"vmul.f32 q14, q8, d0[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmul.f32 q15, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n" // r10 r11
"vmla.f32 q14, q10, d6[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d8-d11}, [%2 :64] \n" // r12 r13 r14 r15
"vmla.f32 q14, q10, d0[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q9, d7[1] \n"
"pld [%3, #128] \n"
"vld1.u16 {d2-d3}, [%3 :64]! \n" // r20 r21
"vmla.f32 q14, q8, d6[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d8-d11}, [%3 :64] \n" // r22 r23 r24 r25
"vmla.f32 q14, q8, d0[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n" // r30 r31
"vmla.f32 q14, q10, d6[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d8-d11}, [%4 :64] \n" // r32 r33 r34 r35
"vmla.f32 q14, q10, d0[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q9, d7[1] \n"
"pld [%5, #128] \n"
"vld1.u16 {d2-d3}, [%5 :64]! \n" // r40 r41
"vmla.f32 q14, q8, d6[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d8-d11}, [%5 :64] \n" // r42 r43 r44 r45
"vmla.f32 q14, q8, d0[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, d7[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"vst1.f32 {d24-d27}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1], #8 \n" // r00
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%1] \n" // r01 r02 r03 r04
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n" // sum0
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmul v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmul v22.4s, v17.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmul v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2], #8 \n" // r10
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%2] \n" // r11 r12 r13 r14
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n" // r20
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%3] \n" // r21 r22 r23 r24
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v0.4h}, [%4], #8 \n" // r30
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%4] \n" // r31 r32 r33 r34
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n" // r40
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%5] \n" // r41 r42 r43 r44
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
// "prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fadd v22.4s, v21.4s, v22.4s \n"
"fadd v23.4s, v22.4s, v23.4s \n"
"fadd v20.4s, v20.4s, v23.4s \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"st1 {v20.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%1, #64] \n"
"vld1.u16 {d1}, [%1 :64]! \n" // r00
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"pld [%0, #128] \n"
"vld1.f32 {d24-d25}, [%0 :128] \n" // sum0
"vmul.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmul.f32 q14, q9, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmul.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%1, #256] \n"
"vld1.u16 {d6-d9}, [%1 :64] \n" // r01 r02 r03 r04
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2 :64]! \n" // r10
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d6-d9}, [%2 :64] \n" // r11 r12 r13 r14
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%3, #64] \n"
"vld1.u16 {d1}, [%3 :64]! \n" // r20
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d6-d9}, [%3 :64] \n" // r21 r22 r23 r24
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%4, #64] \n"
"vld1.u16 {d1}, [%4 :64]! \n" // r30
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d6-d9}, [%4 :64] \n" // r31 r32 r33 r34
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%5, #64] \n"
"vld1.u16 {d1}, [%5 :64]! \n" // r40
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d6-d9}, [%5 :64] \n" // r41 r42 r43 r44
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
// "pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vadd.f32 q13, q13, q14 \n"
"vadd.f32 q12, q12, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"vst1.f32 {d24-d25}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
r0 += 4 * 4;
r1 += 4 * 4;
r2 += 4 * 4;
r3 += 4 * 4;
r4 += 4 * 4;
}
}
for (; q < inch; q++)
{
unsigned short* outptr0_bf16 = top_blob.channel(p);
const float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* r3 = img0.row<const unsigned short>(3);
const unsigned short* r4 = img0.row<const unsigned short>(4);
const unsigned short* kptr = kernel.channel(p).row<const unsigned short>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r00 r01 r02 r03
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2] \n" // r04 r05 r06 r07
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r10 r11 r12 r13
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v1.s[2] \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"fmla v22.4s, v27.4s, v2.s[3] \n"
"fmla v23.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3] \n" // r14 r15 r16 r17
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"fmla v23.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v3.s[3] \n"
"fmla v23.4s, v19.4s, v4.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v5.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n" // r20 r21 r22 r23
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4] \n" // r24 r25 r26 r27
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" // r30 r31 r32 r33
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v1.s[2] \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"fmla v22.4s, v27.4s, v2.s[3] \n"
"fmla v23.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%5] \n" // r34 r35 r36 r37
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"fmla v23.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v3.s[3] \n"
"fmla v23.4s, v19.4s, v4.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v5.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%6], #32 \n" // r40 r41 r42 r43
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v1.s[2] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"fmla v22.4s, v19.4s, v2.s[3] \n"
"fmla v23.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6] \n" // r44 r45 r46 r47
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v5.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
// "prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%0], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n" // sum0 sum1 sum2 sum3
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%2, #256] \n"
"vld1.u16 {d12-d15}, [%2 :64] \n" // r04 r05 r06 r07
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d6[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"vmla.f32 q14, q9, d7[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d8[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d2[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d6[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d3[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d7[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d3[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d7[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3 :64] \n" // r14 r15 r16 r17
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d4[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vmla.f32 q12, q8, d5[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d11[0] \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d11[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d12[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d12[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64] \n" // r24 r25 r26 r27
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d6[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"vmla.f32 q14, q9, d7[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d8[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r30 r31 r32 r33
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d2[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d6[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d3[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d7[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d3[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d7[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d12-d15}, [%5 :64] \n" // r34 r35 r36 r37
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d4[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vmla.f32 q12, q8, d5[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d11[0] \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d11[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%6, #256] \n"
"vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d12[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d12[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6 :64] \n" // r44 r45 r46 r47
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d6[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"vmla.f32 q14, q9, d7[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d8[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
// "pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64] \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vshrn.u32 d26, q14, #16 \n"
"vshrn.u32 d27, q15, #16 \n"
"vst1.u16 {d24-d27}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n" // r00 r01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v20.4s, v21.4s}, [%1], #32 \n" // sum0 sum1
"fmul v22.4s, v16.4s, v0.s[0] \n"
"fmul v23.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%2] \n" // r02 r03 r04 r05
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n" // r10 r11
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%3] \n" // r12 r13 r14 r15
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4h, v1.4h}, [%4], #16 \n" // r20 r21
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%4] \n" // r22 r23 r24 r25
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4h, v1.4h}, [%5], #16 \n" // r30 r31
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%5] \n" // r32 r33 r34 r35
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v27.4s, v1.s[3] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v0.4h, v1.4h}, [%6], #16 \n" // r40 r41
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v2.4h, v3.4h, v4.4h, v5.4h}, [%6] \n" // r42 r43 r44 r45
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v19.4s, v1.s[3] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
// "prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"st1 {v20.4h, v21.4h}, [%0], #16 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n" // r00 r01
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"pld [%2, #256] \n"
"vld1.u16 {d8-d11}, [%2 :64] \n" // r02 r03 r04 r05
"vshll.u16 q8, d20, #16 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n" // sum0 sum1
"vmul.f32 q14, q8, d0[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmul.f32 q15, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%3, #128] \n"
"vld1.u16 {d2-d3}, [%3 :64]! \n" // r10 r11
"vmla.f32 q14, q10, d6[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d8-d11}, [%3 :64] \n" // r12 r13 r14 r15
"vmla.f32 q14, q10, d0[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q9, d7[1] \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n" // r20 r21
"vmla.f32 q14, q8, d6[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d8-d11}, [%4 :64] \n" // r22 r23 r24 r25
"vmla.f32 q14, q8, d0[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%5, #128] \n"
"vld1.u16 {d2-d3}, [%5 :64]! \n" // r30 r31
"vmla.f32 q14, q10, d6[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d8-d11}, [%5 :64] \n" // r32 r33 r34 r35
"vmla.f32 q14, q10, d0[0] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q15, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q9, d7[1] \n"
"pld [%6, #128] \n"
"vld1.u16 {d2-d3}, [%6 :64]! \n" // r40 r41
"vmla.f32 q14, q8, d6[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d8-d11}, [%6 :64] \n" // r42 r43 r44 r45
"vmla.f32 q14, q8, d0[0] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q15, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, d7[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vst1.u16 {d24-d25}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2], #8 \n" // r00
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%2] \n" // r01 r02 r03 r04
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v20.4s}, [%1], #16 \n" // sum0
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmul v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmul v22.4s, v17.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmul v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n" // r10
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%3] \n" // r11 r12 r13 r14
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v0.4h}, [%4], #8 \n" // r20
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%4] \n" // r21 r22 r23 r24
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n" // r30
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%5] \n" // r31 r32 r33 r34
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%6, #64] \n"
"ld1 {v0.4h}, [%6], #8 \n" // r40
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v1.4h, v2.4h, v3.4h, v4.4h}, [%6] \n" // r41 r42 r43 r44
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
// "prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fadd v22.4s, v21.4s, v22.4s \n"
"fadd v23.4s, v22.4s, v23.4s \n"
"fadd v20.4s, v20.4s, v23.4s \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"shrn v20.4h, v20.4s, #16 \n"
"st1 {v20.4h}, [%0], #8 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2 :64]! \n" // r00
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1 :128]! \n" // sum0
"vmul.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmul.f32 q14, q9, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmul.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d6-d9}, [%2 :64] \n" // r01 r02 r03 r04
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%3, #64] \n"
"vld1.u16 {d1}, [%3 :64]! \n" // r10
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d6-d9}, [%3 :64] \n" // r11 r12 r13 r14
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%4, #64] \n"
"vld1.u16 {d1}, [%4 :64]! \n" // r20
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d6-d9}, [%4 :64] \n" // r21 r22 r23 r24
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%5, #64] \n"
"vld1.u16 {d1}, [%5 :64]! \n" // r30
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d6-d9}, [%5 :64] \n" // r31 r32 r33 r34
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #64] \n"
"vld1.u16 {d1}, [%6 :64]! \n" // r40
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d6-d9}, [%6 :64] \n" // r41 r42 r43 r44
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q1, d6, #16 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
// "pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vadd.f32 q13, q13, q14 \n"
"vadd.f32 q12, q12, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"vshrn.u32 d24, q12, #16 \n"
"vst1.u16 {d24}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
r0 += 4 * 4;
r1 += 4 * 4;
r2 += 4 * 4;
r3 += 4 * 4;
r4 += 4 * 4;
}
}
}
}
static void conv5x5s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator);
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob_fp32.channel(get_omp_thread_num());
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
int q = 0;
for (; q < inch - 1; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* r3 = img0.row<const unsigned short>(3);
const unsigned short* r4 = img0.row<const unsigned short>(4);
const unsigned short* kptr = kernel.channel(p).row<const unsigned short>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%1], #32 \n" // r04 r05 r06 r07
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n" // sum0 sum1 sum2 sum3
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%1] \n" // r08 r09 r010
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r10 r11 r12 r13
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2], #32 \n" // r14 r15 r16 r17
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%2] \n" // r18 r19 r110
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v28.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v28.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v28.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v7.s[0] \n"
"fmla v23.4s, v16.4s, v29.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v7.s[1] \n"
"fmla v23.4s, v17.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v7.s[2] \n"
"fmla v23.4s, v18.4s, v29.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v7.s[3] \n"
"fmla v23.4s, v19.4s, v29.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r20 r21 r22 r23
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v6.s[0] \n"
"fmla v22.4s, v24.4s, v28.s[0] \n"
"fmla v23.4s, v24.4s, v30.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"fmla v22.4s, v25.4s, v28.s[1] \n"
"fmla v23.4s, v25.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v6.s[2] \n"
"fmla v22.4s, v26.4s, v28.s[2] \n"
"fmla v23.4s, v26.4s, v30.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v27.4s, v28.s[3] \n"
"fmla v23.4s, v27.4s, v30.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // r24 r25 r26 r27
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%3] \n" // r28 r29 r210
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n" // r30 r31 r32 r33
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n" // r34 r35 r36 r37
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%4] \n" // r38 r39 r310
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v28.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v28.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v28.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v7.s[0] \n"
"fmla v23.4s, v16.4s, v29.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v7.s[1] \n"
"fmla v23.4s, v17.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v7.s[2] \n"
"fmla v23.4s, v18.4s, v29.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v7.s[3] \n"
"fmla v23.4s, v19.4s, v29.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" // r40 r41 r42 r43
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v6.s[0] \n"
"fmla v22.4s, v24.4s, v28.s[0] \n"
"fmla v23.4s, v24.4s, v30.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"fmla v22.4s, v25.4s, v28.s[1] \n"
"fmla v23.4s, v25.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v6.s[2] \n"
"fmla v22.4s, v26.4s, v28.s[2] \n"
"fmla v23.4s, v26.4s, v30.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v27.4s, v28.s[3] \n"
"fmla v23.4s, v27.4s, v30.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%5], #32 \n" // r44 r45 r46 r47
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%5] \n" // r48 r49 r410
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
// "prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30");
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n" // sum0 sum1 sum2 sum3
"pld [%1, #256] \n"
"vld1.u16 {d4-d7}, [%1 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%1, #256] \n"
"vld1.u16 {d12-d15}, [%1 :64]! \n" // r04 r05 r06 r07
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%1, #128] \n"
"vld1.u16 {d2-d3}, [%1 :64]! \n" // r08 r09
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q10, d2[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d15[0] \n"
"vmla.f32 q15, q8, d3[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d15[1] \n"
"vmla.f32 q15, q9, d3[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%1, #64] \n"
"vld1.u16 {d5}, [%1 :64] \n" // r010
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d12-d15}, [%2 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r14 r15 r16 r17
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d12[0] \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"vmla.f32 q14, q11, d0[1] \n"
"vmla.f32 q15, q11, d4[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q8, d1[0] \n"
"vmla.f32 q15, q8, d5[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vmla.f32 q14, q9, d1[1] \n"
"vmla.f32 q15, q9, d5[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d10[0] \n"
"vmla.f32 q13, q8, d14[0] \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d10[1] \n"
"vmla.f32 q13, q9, d14[1] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d11[0] \n"
"vmla.f32 q13, q10, d15[0] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d11[1] \n"
"vmla.f32 q13, q11, d15[1] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%2, #128] \n"
"vld1.u16 {d10-d11}, [%2 :64]! \n" // r18 r19
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q12, q10, d12[0] \n"
"vmla.f32 q13, q10, d0[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d12[1] \n"
"vmla.f32 q13, q11, d0[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d13[0] \n"
"vmla.f32 q13, q8, d1[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d13[1] \n"
"vmla.f32 q13, q9, d1[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d14[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d14[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d15[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d15[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%2, #64] \n"
"vld1.u16 {d13}, [%2 :64] \n" // r110
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3 :64]! \n" // r24 r25 r26 r27
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%3, #128] \n"
"vld1.u16 {d2-d3}, [%3 :64]! \n" // r28 r29
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q10, d2[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d15[0] \n"
"vmla.f32 q15, q8, d3[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d15[1] \n"
"vmla.f32 q15, q9, d3[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%3, #64] \n"
"vld1.u16 {d5}, [%3 :64] \n" // r210
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64]! \n" // r30 r31 r32 r33
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r34 r35 r36 r37
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d12[0] \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"vmla.f32 q14, q11, d0[1] \n"
"vmla.f32 q15, q11, d4[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q8, d1[0] \n"
"vmla.f32 q15, q8, d5[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vmla.f32 q14, q9, d1[1] \n"
"vmla.f32 q15, q9, d5[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d10[0] \n"
"vmla.f32 q13, q8, d14[0] \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d10[1] \n"
"vmla.f32 q13, q9, d14[1] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d11[0] \n"
"vmla.f32 q13, q10, d15[0] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d11[1] \n"
"vmla.f32 q13, q11, d15[1] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%4, #128] \n"
"vld1.u16 {d10-d11}, [%4 :64]! \n" // r38 r39
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q12, q10, d12[0] \n"
"vmla.f32 q13, q10, d0[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d12[1] \n"
"vmla.f32 q13, q11, d0[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d13[0] \n"
"vmla.f32 q13, q8, d1[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d13[1] \n"
"vmla.f32 q13, q9, d1[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d14[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d14[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d15[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d15[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%4, #64] \n"
"vld1.u16 {d13}, [%4 :64] \n" // r310
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r40 r41 r42 r43
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d12-d15}, [%5 :64]! \n" // r44 r45 r46 r47
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%5, #128] \n"
"vld1.u16 {d2-d3}, [%5 :64]! \n" // r48 r49
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q10, d2[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d15[0] \n"
"vmla.f32 q15, q8, d3[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d15[1] \n"
"vmla.f32 q15, q9, d3[1] \n"
// "pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128] \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%5, #64] \n"
"vld1.u16 {d5}, [%5 :64] \n" // r410
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"sub %1, %1, #16 \n"
"sub %2, %2, #16 \n"
"sub %3, %3, #16 \n"
"sub %4, %4, #16 \n"
"sub %5, %5, #16 \n"
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" // r00 r01 r02 r03
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v20.4s, v21.4s}, [%0] \n" // sum0 sum1
"fmul v22.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmul v23.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%1] \n" // r04 r05 r06
"shll v25.4s, v25.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r10 r11 r12 r13
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%2] \n" // r14 r15 r16
"shll v17.4s, v17.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r20 r21 r22 r23
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%3] \n" // r24 r25 r26
"shll v25.4s, v25.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n" // r30 r31 r32 r33
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%4] \n" // r34 r35 r36
"shll v17.4s, v17.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" // r40 r41 r42 r43
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%5] \n" // r44 r45 r46
"shll v25.4s, v25.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
// "prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"st1 {v20.4s, v21.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%1, #256] \n"
"vld1.u16 {d4-d7}, [%1 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128] \n" // sum0 sum1
"vmul.f32 q14, q8, d0[0] \n"
"vmul.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%1, #192] \n"
"vld1.u16 {d10-d12}, [%1 :64] \n" // r04 r05 r06
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d13[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%2, #192] \n"
"vld1.u16 {d10-d12}, [%2 :64] \n" // r14 r15 r16
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%3, #192] \n"
"vld1.u16 {d10-d12}, [%3 :64] \n" // r24 r25 r26
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r30 r31 r32 r33
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d13[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%4, #192] \n"
"vld1.u16 {d10-d12}, [%4 :64] \n" // r34 r35 r36
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r40 r41 r42 r43
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%5, #192] \n"
"vld1.u16 {d10-d12}, [%5 :64] \n" // r44 r45 r46
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
// "pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"vst1.f32 {d24-d27}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n" // sum0
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4h, v1.4h}, [%1], #16 \n" // r00 r01
"shll v0.4s, v0.4h, #16 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmul v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmul v22.4s, v17.4s, v0.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmul v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%1] \n" // r02 r03 r04
"shll v25.4s, v25.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n" // r10 r11
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%2] \n" // r12 r13 r14
"shll v17.4s, v17.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n" // r20 r21
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%3] \n" // r22 r23 r24
"shll v25.4s, v25.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4h, v1.4h}, [%4], #16 \n" // r30 r31
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%4] \n" // r32 r33 r34
"shll v17.4s, v17.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4h, v1.4h}, [%5], #16 \n" // r40 r41
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%5] \n" // r42 r43 r44
"shll v25.4s, v25.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%6], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
// "prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fadd v22.4s, v21.4s, v22.4s \n"
"fadd v23.4s, v22.4s, v23.4s \n"
"fadd v20.4s, v20.4s, v23.4s \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"st1 {v20.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%1, #128] \n"
"vld1.u16 {d2-d3}, [%1 :64]! \n" // r00 r01
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"pld [%0, #128] \n"
"vld1.f32 {d24-d25}, [%0 :128] \n" // sum0
"vmul.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmul.f32 q14, q9, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmul.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%1, #192] \n"
"vld1.u16 {d6-d8}, [%1 :64] \n" // r02 r03 r04
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n" // r10 r11
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%2, #192] \n"
"vld1.u16 {d6-d8}, [%2 :64] \n" // r12 r13 r14
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%3, #128] \n"
"vld1.u16 {d2-d3}, [%3 :64]! \n" // r20 r21
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%3, #192] \n"
"vld1.u16 {d6-d8}, [%3 :64] \n" // r22 r23 r24
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n" // r30 r31
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%4, #192] \n"
"vld1.u16 {d6-d8}, [%4 :64] \n" // r32 r33 r34
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%5, #128] \n"
"vld1.u16 {d2-d3}, [%5 :64]! \n" // r40 r41
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%5, #192] \n"
"vld1.u16 {d6-d8}, [%5 :64] \n" // r42 r43 r44
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d16-d19}, [%6 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
// "pld [%6, #256] \n"
"vld1.u16 {d20-d23}, [%6 :128] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vadd.f32 q14, q13, q14 \n"
"vadd.f32 q15, q14, q15 \n"
"vadd.f32 q12, q12, q15 \n"
"sub %6, %6, #768 \n" // kptr -= 24 * 16;
"vst1.f32 {d24-d25}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
for (; q < inch; q++)
{
unsigned short* outptr0_bf16 = top_blob.channel(p);
const float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* r3 = img0.row<const unsigned short>(3);
const unsigned short* r4 = img0.row<const unsigned short>(4);
const unsigned short* kptr = kernel.channel(p).row<const unsigned short>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2], #32 \n" // r04 r05 r06 r07
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" // sum0 sum1 sum2 sum3
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%2] \n" // r08 r09 r010
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r10 r11 r12 r13
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // r14 r15 r16 r17
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%3] \n" // r18 r19 r110
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v28.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v28.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v28.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v28.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v7.s[0] \n"
"fmla v23.4s, v16.4s, v29.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v7.s[1] \n"
"fmla v23.4s, v17.4s, v29.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v7.s[2] \n"
"fmla v23.4s, v18.4s, v29.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v7.s[3] \n"
"fmla v23.4s, v19.4s, v29.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n" // r20 r21 r22 r23
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v6.s[0] \n"
"fmla v22.4s, v24.4s, v28.s[0] \n"
"fmla v23.4s, v24.4s, v30.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"fmla v22.4s, v25.4s, v28.s[1] \n"
"fmla v23.4s, v25.4s, v30.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v6.s[2] \n"
"fmla v22.4s, v26.4s, v28.s[2] \n"
"fmla v23.4s, v26.4s, v30.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v27.4s, v28.s[3] \n"
"fmla v23.4s, v27.4s, v30.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n" // r24 r25 r26 r27
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%4] \n" // r28 r29 r210
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" // r30 r31 r32 r33
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%5], #32 \n" // r34 r35 r36 r37
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%5] \n" // r38 r39 r310
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v28.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v28.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v28.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v28.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v7.s[0] \n"
"fmla v23.4s, v16.4s, v29.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v7.s[1] \n"
"fmla v23.4s, v17.4s, v29.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v7.s[2] \n"
"fmla v23.4s, v18.4s, v29.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v7.s[3] \n"
"fmla v23.4s, v19.4s, v29.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%6], #32 \n" // r40 r41 r42 r43
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v6.s[0] \n"
"fmla v22.4s, v24.4s, v28.s[0] \n"
"fmla v23.4s, v24.4s, v30.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"fmla v22.4s, v25.4s, v28.s[1] \n"
"fmla v23.4s, v25.4s, v30.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v6.s[2] \n"
"fmla v22.4s, v26.4s, v28.s[2] \n"
"fmla v23.4s, v26.4s, v30.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v27.4s, v28.s[3] \n"
"fmla v23.4s, v27.4s, v30.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n" // r44 r45 r46 r47
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%6, #192] \n"
"ld1 {v28.4h, v29.4h, v30.4h}, [%6] \n" // r48 r49 r410
"shll v28.4s, v28.4h, #16 \n"
"shll v29.4s, v29.4h, #16 \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
// "prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%0], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30");
#else // __aarch64__
asm volatile(
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n" // sum0 sum1 sum2 sum3
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%2, #256] \n"
"vld1.u16 {d12-d15}, [%2 :64]! \n" // r04 r05 r06 r07
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n" // r08 r09
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q10, d2[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d15[0] \n"
"vmla.f32 q15, q8, d3[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d15[1] \n"
"vmla.f32 q15, q9, d3[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%2, #64] \n"
"vld1.u16 {d5}, [%2 :64] \n" // r010
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r14 r15 r16 r17
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d12[0] \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"vmla.f32 q14, q11, d0[1] \n"
"vmla.f32 q15, q11, d4[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q8, d1[0] \n"
"vmla.f32 q15, q8, d5[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vmla.f32 q14, q9, d1[1] \n"
"vmla.f32 q15, q9, d5[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d10[0] \n"
"vmla.f32 q13, q8, d14[0] \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d10[1] \n"
"vmla.f32 q13, q9, d14[1] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d11[0] \n"
"vmla.f32 q13, q10, d15[0] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d11[1] \n"
"vmla.f32 q13, q11, d15[1] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%3, #128] \n"
"vld1.u16 {d10-d11}, [%3 :64]! \n" // r18 r19
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q12, q10, d12[0] \n"
"vmla.f32 q13, q10, d0[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d12[1] \n"
"vmla.f32 q13, q11, d0[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d13[0] \n"
"vmla.f32 q13, q8, d1[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d13[1] \n"
"vmla.f32 q13, q9, d1[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d14[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d14[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d15[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d15[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%3, #64] \n"
"vld1.u16 {d13}, [%3 :64] \n" // r110
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64]! \n" // r24 r25 r26 r27
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n" // r28 r29
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q10, d2[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d15[0] \n"
"vmla.f32 q15, q8, d3[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d15[1] \n"
"vmla.f32 q15, q9, d3[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%4, #64] \n"
"vld1.u16 {d5}, [%4 :64] \n" // r210
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d12-d15}, [%5 :64]! \n" // r30 r31 r32 r33
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r34 r35 r36 r37
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q10, d8[0] \n"
"vmla.f32 q13, q10, d12[0] \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"vmla.f32 q14, q11, d0[1] \n"
"vmla.f32 q15, q11, d4[1] \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q8, d1[0] \n"
"vmla.f32 q15, q8, d5[0] \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vmla.f32 q14, q9, d1[1] \n"
"vmla.f32 q15, q9, d5[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d10[0] \n"
"vmla.f32 q13, q8, d14[0] \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d10[1] \n"
"vmla.f32 q13, q9, d14[1] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d11[0] \n"
"vmla.f32 q13, q10, d15[0] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d11[1] \n"
"vmla.f32 q13, q11, d15[1] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%5, #128] \n"
"vld1.u16 {d10-d11}, [%5 :64]! \n" // r38 r39
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q12, q10, d12[0] \n"
"vmla.f32 q13, q10, d0[0] \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vmla.f32 q12, q11, d12[1] \n"
"vmla.f32 q13, q11, d0[1] \n"
"vmla.f32 q14, q11, d4[1] \n"
"vmla.f32 q15, q11, d8[1] \n"
"vmla.f32 q12, q8, d13[0] \n"
"vmla.f32 q13, q8, d1[0] \n"
"vmla.f32 q14, q8, d5[0] \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q12, q9, d13[1] \n"
"vmla.f32 q13, q9, d1[1] \n"
"vmla.f32 q14, q9, d5[1] \n"
"vmla.f32 q15, q9, d9[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q12, q8, d14[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d14[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d15[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d15[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"pld [%5, #64] \n"
"vld1.u16 {d13}, [%5 :64] \n" // r310
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q12, q10, d0[0] \n"
"vmla.f32 q13, q10, d4[0] \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"vmla.f32 q14, q11, d8[1] \n"
"vmla.f32 q15, q11, d12[1] \n"
"vmla.f32 q12, q8, d1[0] \n"
"vmla.f32 q13, q8, d5[0] \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q9, d9[1] \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6 :64]! \n" // r44 r45 r46 r47
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d2[0] \n"
"vmla.f32 q13, q10, d6[0] \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vmla.f32 q12, q8, d3[0] \n"
"vmla.f32 q13, q8, d7[0] \n"
"vmla.f32 q14, q8, d11[0] \n"
"vmla.f32 q15, q8, d15[0] \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vmla.f32 q14, q9, d11[1] \n"
"vmla.f32 q15, q9, d15[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%6, #128] \n"
"vld1.u16 {d2-d3}, [%6 :64]! \n" // r48 r49
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q10, d16, #16 \n"
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q8, d18, #16 \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q12, q10, d6[0] \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q10, d2[0] \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vmla.f32 q12, q8, d7[0] \n"
"vmla.f32 q13, q8, d11[0] \n"
"vmla.f32 q14, q8, d15[0] \n"
"vmla.f32 q15, q8, d3[0] \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vmla.f32 q14, q9, d15[1] \n"
"vmla.f32 q15, q9, d3[1] \n"
// "pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128] \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q11, d23, #16 \n"
"pld [%6, #64] \n"
"vld1.u16 {d5}, [%6 :64] \n" // r410
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"sub %2, %2, #16 \n"
"sub %3, %3, #16 \n"
"sub %4, %4, #16 \n"
"sub %5, %5, #16 \n"
"sub %6, %6, #16 \n"
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vshrn.u32 d26, q14, #16 \n"
"vshrn.u32 d27, q15, #16 \n"
"vst1.u16 {d24-d27}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r00 r01 r02 r03
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v20.4s, v21.4s}, [%1], #32 \n" // sum0 sum1
"fmul v22.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmul v23.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%2] \n" // r04 r05 r06
"shll v25.4s, v25.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r10 r11 r12 r13
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%3] \n" // r14 r15 r16
"shll v17.4s, v17.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%4], #32 \n" // r20 r21 r22 r23
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%4] \n" // r24 r25 r26
"shll v25.4s, v25.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" // r30 r31 r32 r33
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%5] \n" // r34 r35 r36
"shll v17.4s, v17.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%6], #32 \n" // r40 r41 r42 r43
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%6, #192] \n"
"ld1 {v4.4h, v5.4h, v6.4h}, [%6] \n" // r44 r45 r46
"shll v25.4s, v25.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"fmla v22.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
// "prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"st1 {v20.4h, v21.4h}, [%0], #16 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n" // sum0 sum1
"vmul.f32 q14, q8, d0[0] \n"
"vmul.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%2, #192] \n"
"vld1.u16 {d10-d12}, [%2 :64] \n" // r04 r05 r06
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d13[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%3, #192] \n"
"vld1.u16 {d10-d12}, [%3 :64] \n" // r14 r15 r16
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%4, #192] \n"
"vld1.u16 {d10-d12}, [%4 :64] \n" // r24 r25 r26
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r30 r31 r32 r33
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d9[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d13[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d0[1] \n"
"vmla.f32 q13, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d1[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"vmla.f32 q13, q9, d5[1] \n"
"pld [%5, #192] \n"
"vld1.u16 {d10-d12}, [%5 :64] \n" // r34 r35 r36
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d3[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d4[0] \n"
"vmla.f32 q15, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d4[1] \n"
"vmla.f32 q13, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d5[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d7[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d11[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%6, #256] \n"
"vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, d8[0] \n"
"vmla.f32 q15, q10, d12[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d8[1] \n"
"vmla.f32 q13, q11, d12[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d9[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #192] \n"
"vld1.u16 {d10-d12}, [%6 :64] \n" // r44 r45 r46
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q14, q10, d2[0] \n"
"vmla.f32 q15, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d2[1] \n"
"vmla.f32 q13, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vmla.f32 q14, q8, d3[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vmla.f32 q13, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vmla.f32 q14, q10, d5[0] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q14, q10, d6[0] \n"
"vmla.f32 q15, q10, d10[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q12, q11, d6[1] \n"
"vmla.f32 q13, q11, d10[1] \n"
// "pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128] \n"
"vmla.f32 q14, q8, d7[0] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d11[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vmla.f32 q13, q9, d11[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vst1.u16 {d24-d25}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v20.4s}, [%1], #16 \n" // sum0
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n" // r00 r01
"shll v0.4s, v0.4h, #16 \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmul v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmul v22.4s, v17.4s, v0.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmul v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%2] \n" // r02 r03 r04
"shll v25.4s, v25.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n" // r10 r11
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%3] \n" // r12 r13 r14
"shll v17.4s, v17.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4h, v1.4h}, [%4], #16 \n" // r20 r21
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%4] \n" // r22 r23 r24
"shll v25.4s, v25.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4h, v1.4h}, [%5], #16 \n" // r30 r31
"shll v17.4s, v17.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%5] \n" // r32 r33 r34
"shll v17.4s, v17.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v16.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v0.4h, v1.4h}, [%6], #16 \n" // r40 r41
"shll v25.4s, v25.4h, #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%6, #192] \n"
"ld1 {v2.4h, v3.4h, v4.4h}, [%6] \n" // r42 r43 r44
"shll v25.4s, v25.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7], #32 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
// "prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7] \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fadd v22.4s, v21.4s, v22.4s \n"
"fadd v23.4s, v22.4s, v23.4s \n"
"fadd v20.4s, v20.4s, v23.4s \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"shrn v20.4h, v20.4s, #16 \n"
"st1 {v20.4h}, [%0], #8 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n" // r00 r01
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q8, d20, #16 \n"
"vshll.u16 q9, d21, #16 \n"
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1 :128]! \n" // sum0
"vmul.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmul.f32 q14, q9, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmul.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%2, #192] \n"
"vld1.u16 {d6-d8}, [%2 :64] \n" // r02 r03 r04
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%3, #128] \n"
"vld1.u16 {d2-d3}, [%3 :64]! \n" // r10 r11
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%3, #192] \n"
"vld1.u16 {d6-d8}, [%3 :64] \n" // r12 r13 r14
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n" // r20 r21
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%4, #192] \n"
"vld1.u16 {d6-d8}, [%4 :64] \n" // r22 r23 r24
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"pld [%5, #128] \n"
"vld1.u16 {d2-d3}, [%5 :64]! \n" // r30 r31
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q10, d22, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d9[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d0[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d1[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d1[1] \n"
"pld [%5, #192] \n"
"vld1.u16 {d6-d8}, [%5 :64] \n" // r32 r33 r34
"vshll.u16 q9, d21, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q8, d2[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d3[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d3[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d4[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d5[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d5[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d6[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d6[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d7[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #128] \n"
"vld1.u16 {d2-d3}, [%6 :64]! \n" // r40 r41
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q13, q10, d8[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d8[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d0[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d0[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d1[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #192] \n"
"vld1.u16 {d6-d8}, [%6 :64] \n" // r42 r43 r44
"vshll.u16 q11, d17, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vmla.f32 q13, q10, d2[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d2[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128]! \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d3[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d3[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d4[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d4[1] \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :128]! \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d5[0] \n"
"vshll.u16 q10, d16, #16 \n"
"vmla.f32 q12, q11, d5[1] \n"
"vshll.u16 q11, d17, #16 \n"
"vmla.f32 q13, q10, d6[0] \n"
"vshll.u16 q8, d18, #16 \n"
"vmla.f32 q14, q11, d6[1] \n"
// "pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :128] \n"
"vshll.u16 q9, d19, #16 \n"
"vmla.f32 q15, q8, d7[0] \n"
"vshll.u16 q8, d20, #16 \n"
"vmla.f32 q12, q9, d7[1] \n"
"vshll.u16 q9, d21, #16 \n"
"vmla.f32 q13, q8, d8[0] \n"
"vshll.u16 q10, d22, #16 \n"
"vmla.f32 q14, q9, d8[1] \n"
"vshll.u16 q11, d23, #16 \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vadd.f32 q14, q13, q14 \n"
"vadd.f32 q15, q14, q15 \n"
"vadd.f32 q12, q12, q15 \n"
"sub %7, %7, #768 \n" // kptr -= 24 * 16;
"vshrn.u32 d24, q12, #16 \n"
"vst1.u16 {d24}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
}
|
shared.c | /*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*******************************************************************
NAME: RefCount
PURPOSE: This program tests the efficiency of exclusive access to a
pair of non-adjacent shared reference counters
USAGE: The program takes as input the total number of times the reference
counters are updated, and the number of threads
involved.
<progname> <# threads><# iterations>
The output consists of diagnostics to make sure the
algorithm worked, and of timing statistics.
FUNCTIONS CALLED:
Other than OpenMP or standard C functions, the following
functions are used in this program:
wtime()
bail_out()
getpagesize()
HISTORY: Written by Rob Van der Wijngaart, January 2006.
*******************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_omp.h>
/* shouldn't need the prototype below, since it is defined in <unistd.h>. But it
depends on the existence of symbols __USE_BSD or _USE_XOPEN_EXTENDED, neither
of which may be present. To avoid warnings, we define the prototype here */
#if !defined(__USE_BSD) && !defined(__USE_XOPEN_EXTENDED)
extern int getpagesize(void);
#endif
int main(int argc, char ** argv)
{
int iterations; /* total number of reference pair counter updates */
int page_fit; /* indicates that counters fit on different pages */
size_t store_size; /* amount of space reserved for counters */
s64Int *pcounter1,
*pcounter2; /* pointers to counters */
s64Int *counter_space; /* pointer to space reserved for counters */
omp_lock_t counter_lock; /* lock that guards access to counters */
double refcount_time; /* timing parameter */
int nthread_input; /* thread parameters */
int nthread;
s64Int sum_distance=0,
sum_distance2=0; /* distance and distance squared between
reference counter updates by the same thread,
summed over all threads */
double avg_distance,
avg_distance2; /* distances averaged over all threads */
int num_error=0; /* flag that signals that requested and obtained
numbers of threads are the same */
/*********************************************************************
** process and test input parameters
*********************************************************************/
if (argc != 3){
printf("Usage: %s <# threads> <# counter pair updates>\n", *argv);
return(1);
}
nthread_input = atoi(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
exit(EXIT_FAILURE);
}
iterations = atoi(*++argv);
if (iterations < 1){
printf("ERROR: iterations must be >= 1 : %d \n",iterations);
exit(EXIT_FAILURE);
}
omp_set_num_threads(nthread_input);
/* initialize shared counters; we put them on different pages, if possible.
If the page size equals the whole memory, this will fail, and we reduce
the space required */
page_fit = 1;
store_size = (size_t) getpagesize();
#ifdef VERBOSE
printf("Page size = %d\n", getpagesize());
#endif
counter_space = (s64Int *) malloc(store_size+sizeof(s64Int));
while (!counter_space && store_size>2*sizeof(s64Int)) {
page_fit=0;
store_size/=2;
counter_space = (s64Int *) malloc(store_size+sizeof(s64Int));
}
if (!counter_space) {
printf("ERROR: could not allocate space for counters\n");
exit(EXIT_FAILURE);
}
#ifdef VERBOSE
if (!page_fit) printf("Counters do not fit on different pages\n");
else printf("Counters fit on different pages\n");
#endif
pcounter1 = counter_space;
pcounter2 = counter_space + store_size/sizeof(s64Int);
(*pcounter1) = 0;
(*pcounter2) = 0;
/* initialize the lock on which we will be pounding */
omp_init_lock(&counter_lock);
#pragma omp parallel reduction(+:sum_distance,sum_distance2)
{
int iter; /* dummy */
/* we declare everything the same type/length to avoid consversions */
s64Int oldcounter;/* previous thread value of reference counter */
s64Int newcounter;/* current thread value of reference counter */
s64Int distance; /* distance between successive counter updates by
same thread */
#pragma omp master
{
nthread = omp_get_num_threads();
printf("OpenMP exclusive access test RefCount, shared counters\n");
if (nthread != nthread_input) {
num_error = 1;
printf("ERROR: number of requested threads %d does not equal ",
nthread_input);
printf("number of spawned threads %d\n", nthread);
}
else {
printf("Number of threads = %i\n",nthread_input);
printf("Number of counter pair updates = %i\n", iterations);
}
}
bail_out(num_error);
#pragma omp master
{
refcount_time = wtime();
}
/* the first iteration that any thread does initializes oldcounter.
We could treat this situation with a test in the main loop, but
that adds overhead to each iteration, so we keep it separate */
omp_set_lock(&counter_lock);
(*pcounter1)++;
#ifdef VERBOSE
oldcounter=*pcounter1;
#endif
(*pcounter2)++;
omp_unset_lock(&counter_lock);
#pragma omp for
/* start with iteration nthread to take into account pre-loop iter */
for (iter=nthread; iter<iterations; iter++) {
omp_set_lock(&counter_lock);
/* keep stuf within lock region as brief as possible */
#ifdef VERBOSE
distance = ((*pcounter1)++)-oldcounter;
oldcounter = (*pcounter1);
#else
(*pcounter1)++;
#endif
(*pcounter2)++;
omp_unset_lock(&counter_lock);
#ifdef VERBOSE
sum_distance += distance;
sum_distance2 += distance*distance;
#endif
}
#pragma omp master
{
refcount_time = wtime() - refcount_time;
}
} /* end of OpenMP parallel region */
#ifdef VERBOSE
if (iterations > 1) {
avg_distance = (double) sum_distance/(iterations-1);
avg_distance2 = (double) sum_distance2/(iterations-1);
}
#endif
if ((*pcounter1) != iterations || (*pcounter1) != (*pcounter2)) {
printf("ERROR: Incorrect or inconsistent counter values "FSTR64U,
(*pcounter1));
printf(FSTR64U"; should be %d\n", (*pcounter2), iterations);
}
else {
#ifdef VERBOSE
printf("Solution validates; Correct counter value of "FSTR64"\n", (*pcounter1));
#else
printf("Solution validates\n");
#endif
printf("Rate (CPUPs/s): %d, time (s): %lf\n",
(int)(iterations/refcount_time), refcount_time);
#ifdef VERBOSE
if (iterations > 1) {
printf("Average update distance: %lf\n", avg_distance);
printf("Standard deviation of update distance: %lf\n",
sqrt(avg_distance2-avg_distance*avg_distance));
printf("Mean and standard deviation of update distance for random locks: ");
printf("%lf, %lf\n", (double)(nthread-1), sqrt((double)(nthread*(nthread-1))));
printf(" fair locks: ");
printf("%lf, %lf\n", (double)(nthread-1), 0.0);
}
#endif
}
exit(EXIT_SUCCESS);
}
|
device_utilities.h | /**
*
* OHIO STATE UNIVERSITY SOFTWARE DISTRIBUTION LICENSE
*
* Parallel CCD++ on GPU (the “Software”) Copyright (c) 2017, The Ohio State
* University. All rights reserved.
*
* The Software is available for download and use subject to the terms and
* conditions of this License. Access or use of the Software constitutes acceptance
* and agreement to the terms and conditions of this License. Redistribution and
* use of the Software in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the capitalized paragraph below.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the capitalized paragraph below in the documentation
* and/or other materials provided with the distribution.
*
* 3. The names of Ohio State University, or its faculty, staff or students may not
* be used to endorse or promote products derived from the Software without
* specific prior written permission.
*
* This software was produced with support from the National Science Foundation
* (NSF) through Award 1629548. Nothing in this work should be construed as
* reflecting the official policy or position of the Defense Department, the United
* States government, Ohio State University.
*
* THIS SOFTWARE HAS BEEN APPROVED FOR PUBLIC RELEASE, UNLIMITED DISTRIBUTION. THE
* SOFTWARE IS PROVIDED “AS IS” AND WITHOUT ANY EXPRESS, IMPLIED OR STATUTORY
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF ACCURACY, COMPLETENESS,
* NONINFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. ACCESS OR USE OF THE SOFTWARE IS ENTIRELY AT THE USER’S RISK. IN
* NO EVENT SHALL OHIO STATE UNIVERSITY OR ITS FACULTY, STAFF OR STUDENTS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE SOFTWARE
* USER SHALL INDEMNIFY, DEFEND AND HOLD HARMLESS OHIO STATE UNIVERSITY AND ITS
* FACULTY, STAFF AND STUDENTS FROM ANY AND ALL CLAIMS, ACTIONS, DAMAGES, LOSSES,
* LIABILITIES, COSTS AND EXPENSES, INCLUDING ATTORNEYS’ FEES AND COURT COSTS,
* DIRECTLY OR INDIRECTLY ARISING OUT OF OR IN CONNECTION WITH ACCESS OR USE OF THE
* SOFTWARE.
*
*/
/**
*
* Author:
* Israt (nisa.1@osu.edu)
*
* Contacts:
* Israt (nisa.1@osu.edu)
* Aravind Sukumaran-Rajam (sukumaranrajam.1@osu.edu)
* P. (Saday) Sadayappan (sadayappan.1@osu.edu)
*
*/
#include "util.h"
const int THREADLOAD = 2;
int NUM_THRDS = 10;
void cuda_timerStart(cudaEvent_t start, cudaStream_t streamT) {
cudaEventRecord(start, streamT);
}
float cuda_timerEnd(cudaEvent_t start, cudaEvent_t stop, cudaStream_t streamT) {
float mili = 0;
cudaDeviceSynchronize();
cudaEventRecord(stop, streamT);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&mili, start, stop);
return mili;
}
void copy_R(SparseMatrix &R, DTYPE *copy_R) //R to R copy
{
auto val_ptr = R.get_csr_val();
#pragma omp parallel for
for (int c = 0; c < R.cols_; ++c) {
for (int idx = R.get_csc_col_ptr()[c]; idx < R.get_csc_col_ptr()[c + 1];
++idx)
copy_R[idx] = val_ptr[idx];
}
}
void copy_R1(DTYPE *copy_R, SparseMatrix &R) {
auto val_ptr = R.get_csr_val();
#pragma omp parallel for
for (int c = 0; c < R.cols_; ++c) {
for (int idx = R.get_csc_col_ptr()[c]; idx < R.get_csc_col_ptr()[c + 1];
++idx)
val_ptr[idx] = copy_R[idx];
}
}
void make_tile(SparseMatrix &R, MatInt &tiled_bin, const int TS) {
#pragma omp parallel for
for (int c = 0; c < R.cols_; ++c) {
long idx = R.get_csc_col_ptr()[c];
tiled_bin[0][c] = idx;
for (int tile = TS; tile < (R.rows_ + TS - 1); tile += TS) {
int tile_no = tile / TS; // - 1;
while (R.get_csc_row_indx()[idx] < tile
&& idx < R.get_csc_col_ptr()[c + 1]) {
idx++;
}
tiled_bin[tile_no][c] = idx;
}
}
}
void make_tile_odd(SparseMatrix &R, MatInt &tiled_bin, const int TS) {
#pragma omp parallel for
for (int c = 0; c < R.cols_; ++c) {
long idx = R.get_csc_col_ptr()[c];
tiled_bin[0][c] = idx;
for (int tile = TS + (TS / 2); tile < (R.rows_ + (TS + (TS / 2)) - 1);
tile += TS) {
int tile_no = tile / TS; // - 1;
while (R.get_csc_row_indx()[idx] < tile
&& idx < R.get_csc_col_ptr()[c + 1]) {
idx++;
}
tiled_bin[tile_no][c] = idx;
}
}
}
void tiled_binning(SparseMatrix &R, int *host_rowGroupPtr, int *LB, int *UB,
int *count, MatInt &tiled_bin, const int tile_no) {
for (int i = 0; i < NUM_THRDS; i++) {
count[i] = 0;
UB[i] = (1 << i) * THREADLOAD;
LB[i] = UB[i] >> 1;
}
LB[0] = 0;
UB[NUM_THRDS - 1] = R.max_col_nnz_ + 1;
// // // // //***********binned
// omp_set_num_threads(NUM_THRDS); // create as many CPU threads as there are # of bins
// #pragma omp parallel
// {
// unsigned int cpu_thread_id = omp_get_thread_num();
// int i = cpu_thread_id; count[i] = 0;
// for (int col = 0; col < R.cols; col++){
// //for (int col = tile_no_c*5*tileSize_H; col < ((tile_no_c+1)*5*tileSize_H) && col < R.cols ; col++){
// int NNZ = tiled_bin[tile_no+1][col] - tiled_bin[tile_no][col]; // R.col_ptr[col + 1] - R.col_ptr[col];
// if (NNZ >= LB[i] && NNZ < UB[i]){
// host_rowGroupPtr[R.cols * i + count[i]++] = col;
// }
// }
// }
//*********non-binned
int i = 6;
count[i] = 0;
for (int col = 0; col < R.cols_; col++) {
host_rowGroupPtr[R.cols_ * i + count[i]++] = col;
}
//*********non-binned
// int i = 6;
// count[i] = 0;
// for (int col = 0; col < R.cols; col++){
// int NNZ = R.col_ptr[col+1] - R.col_ptr[col];
// host_rowGroupPtr[R.cols * i + count[i]++] = col;
// printf("%d %d\n",col, NNZ );
// }
// printf("done for R\n");
}
void binning(SparseMatrix &R, int *host_rowGroupPtr, int *LB, int *UB,
int *count) {
for (int i = 0; i < NUM_THRDS; i++) {
count[i] = 0;
UB[i] = (1 << i) * THREADLOAD + 1;
LB[i] = UB[i] >> 1;
}
LB[0] = 0;
UB[NUM_THRDS - 1] = R.max_col_nnz_ + 1;
omp_set_num_threads(NUM_THRDS); // create as many CPU threads as there are # of bins
#pragma omp parallel
{
unsigned int cpu_thread_id = omp_get_thread_num();
int i = cpu_thread_id;
for (int col = 0; col < R.cols_; col++) {
int NNZ = R.get_csc_col_ptr()[col + 1] - R.get_csc_col_ptr()[col];
if (NNZ > LB[i] && NNZ < UB[i]) {
host_rowGroupPtr[R.cols_ * i + count[i]++] = col; ////changed
}
}
}
}
__global__ void weighted_H_all(int const* __restrict__ R_colPtr,
DTYPE * __restrict__ H, DTYPE * __restrict__ temp_H, int m, int k) {
int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < m) {
int nnz = R_colPtr[c + 1] - R_colPtr[c];
if (nnz != 0) {
for (int t = 0; t < k; ++t)
H[c * k + t] = temp_H[c * k + t] / nnz;
}
}
}
__global__ void weighted_H(int const* __restrict__ R_colPtr,
int const* __restrict__ R_rowLim, DTYPE * __restrict__ H,
DTYPE * __restrict__ temp_H, int m, int k) {
int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < m) {
int nnz = R_rowLim[c] - R_colPtr[c]; ////////////-R_colPtr[c];
if (nnz != 0) {
for (int t = 0; t < k; ++t)
H[c * k + t] = temp_H[c * k + t] / nnz;
}
}
}
__global__ void assignment(int const* __restrict__ R_colPtr,
DTYPE * __restrict__ v, DTYPE * __restrict__ g, DTYPE *__restrict__ h,
DTYPE lambda, int m) {
int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < m) {
DTYPE gc = g[c], hc = h[c];
if (hc == 0)
v[c] = 0; //
else
v[c] = gc / hc;
}
}
__global__ void GPU_rmse(int const* __restrict__ test_row,
int const * __restrict__ test_col, DTYPE const * __restrict__ test_val,
DTYPE * __restrict__ pred_v, DTYPE * __restrict__ rmse,
DTYPE const * __restrict__ W, DTYPE const * __restrict__ H, int m,
int k, int rows, int cols) {
int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < m) {
for (int t = 0; t < k; t++) {
int i = test_row[c];
int j = test_col[c];
pred_v[c] += W[t * rows + (i - 1)] * H[t * cols + (j - 1)]; //W[i-1][t] * H[j-1][t];
}
rmse[c] = (pred_v[c] - test_val[c]) * (pred_v[c] - test_val[c]);
}
}
|
owl_ndarray_maths_map_omp.h | /*
* OWL - OCaml Scientific Computing
* Copyright (c) 2016-2022 Liang Wang <liang@ocaml.xyz>
*/
#ifdef OWL_ENABLE_TEMPLATE
#ifndef INIT // Because some functions do not really utilise this,
#define INIT // so define an empty string as default.
#endif
#include "owl_core_engine.h"
#include "owl_omp_parameters.h"
// function to perform mapping of elements from x to y
#ifdef FUN4
#undef OWL_OMP_THRESHOLD
#define OWL_OMP_THRESHOLD OWL_OMP_THRESHOLD_FUN(FUN4)
CAMLprim value FUN4(value vN, value vX, value vY)
{
CAMLparam3(vN, vX, vY);
int N = Long_val(vN);
INIT;
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
NUMBER1 *Y_data = (NUMBER1 *) Y->data;
NUMBER *start_x, *stop_x;
NUMBER1 *start_y;
caml_release_runtime_system(); /* Allow other threads */
start_x = X_data;
stop_x = start_x + N;
start_y = Y_data;
if (N >= OWL_OMP_THRESHOLD) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < N; i++) {
NUMBER x = *(start_x + i);
*(start_y + i) = (MAPFN(x));
}
}
else {
while (start_x != stop_x) {
NUMBER x = *start_x;
*start_y = (MAPFN(x));
start_x += 1;
start_y += 1;
};
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
#endif /* FUN4 */
// function to map elements in [x] w.r.t scalar values, linspace and etc.
#ifdef FUN12
CAMLprim value FUN12(value vN, value vA, value vB, value vX)
{
CAMLparam1(vX);
int N = Long_val(vN);
INIT;
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
caml_release_runtime_system(); /* Allow other threads */
if (N >= OWL_OMP_THRESHOLD_DEFAULT) {
#pragma omp parallel for schedule(static)
for (int i = 1; i <= N; i++) {
MAPFN(*(X_data + i - 1));
}
}
else {
for (int i = 1; i <= N; i++) {
MAPFN(*X_data);
X_data++;
}
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
#endif /* FUN12 */
// function to calculate logspace_base function
#ifdef FUN13
CAMLprim value FUN13(value vN, value vBase, value vA, value vB, value vX)
{
CAMLparam1(vX);
int N = Long_val(vN);
INIT;
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
caml_release_runtime_system(); /* Allow other threads */
for (int i = 1; i <= N; i++) {
MAPFN(X_data);
X_data++;
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
#endif /* FUN13 */
// TODO: this needs to be unified with FUN4 in future
// similar to FUN4, but mostly for complex numbers
#ifdef FUN14
CAMLprim value FUN14(value vN, value vX, value vY)
{
CAMLparam3(vN, vX, vY);
int N = Long_val(vN);
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
NUMBER1 *Y_data = (NUMBER1 *) Y->data;
NUMBER *start_x;
NUMBER1 *start_y;
caml_release_runtime_system(); /* Allow other threads */
start_x = X_data;
start_y = Y_data;
if (N >= OWL_OMP_THRESHOLD_DEFAULT) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < N; i++) {
MAPFN((start_x + i), (start_y + i));
}
}
else {
for (int i = 0; i < N; i++) {
MAPFN(start_x, start_y);
start_x += 1;
start_y += 1;
}
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
#endif /* FUN14 */
// function to map pairwise elements in x and y then save results to z
#ifdef FUN15
CAMLprim value FUN15(value vN, value vX, value vY, value vZ)
{
CAMLparam4(vN, vX, vY, vZ);
int N = Long_val(vN);
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
NUMBER1 *Y_data = (NUMBER1 *) Y->data;
struct caml_ba_array *Z = Caml_ba_array_val(vZ);
NUMBER2 *Z_data = (NUMBER2 *) Z->data;
NUMBER *start_x, *stop_x;
NUMBER1 *start_y;
NUMBER2 *start_z;
caml_release_runtime_system(); /* Allow other threads */
start_x = X_data;
stop_x = start_x + N;
start_y = Y_data;
start_z = Z_data;
if (N >= OWL_OMP_THRESHOLD_DEFAULT) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < N; i++) {
MAPFN((start_x + i), (start_y + i), (start_z + i));
}
}
else {
while (start_x != stop_x) {
MAPFN(start_x, start_y, start_z);
start_x += 1;
start_y += 1;
start_z += 1;
}
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
#endif /* FUN15 */
// function to map all elements in [x] w.r.t to [a] then save results to [y]
#ifdef FUN17
CAMLprim value FUN17(value vN, value vX, value vY, value vA)
{
CAMLparam4(vN, vX, vY, vA);
int N = Long_val(vN);
INIT;
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
NUMBER1 *Y_data = (NUMBER1 *) Y->data;
NUMBER *start_x;
NUMBER1 *start_y;
caml_release_runtime_system(); /* Allow other threads */
start_x = X_data;
start_y = Y_data;
if (N >= OWL_OMP_THRESHOLD_DEFAULT) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < N; i++) {
MAPFN((start_x + i), (start_y + i));
}
}
else {
for (int i = 0; i < N; i++) {
MAPFN(start_x, start_y);
start_x += 1;
start_y += 1;
}
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
#endif /* FUN17 */
// function to map elements in [x] w.r.t [a] and [b] then save results to [x]
#ifdef FUN18
CAMLprim value FUN18(value vN, value vX, value vA, value vB)
{
CAMLparam4(vN, vX, vA, vB);
int N = Long_val(vN);
INIT;
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
NUMBER *start_x, *stop_x;
caml_release_runtime_system(); /* Allow other threads */
start_x = X_data;
stop_x = start_x + N;
while (start_x != stop_x) {
MAPFN(start_x);
start_x += 1;
};
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
#endif /* FUN18 */
// function to map x to y with explicit offset, step size, number of ops
#ifdef FUN19
CAMLprim value FUN19_IMPL(
value vN,
value vX, value vOFSX, value vINCX,
value vY, value vOFSY, value vINCY
)
{
CAMLparam5(vN, vX, vOFSX, vINCX, vY);
CAMLxparam2(vOFSY, vINCY);
int N = Long_val(vN);
int ofsx = Long_val(vOFSX);
int incx = Long_val(vINCX);
int ofsy = Long_val(vOFSY);
int incy = Long_val(vINCY);
INIT;
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
NUMBER1 *Y_data = (NUMBER1 *) Y->data;
NUMBER *start_x;
NUMBER1 *start_y;
caml_release_runtime_system(); /* Allow other threads */
start_x = X_data + ofsx;
start_y = Y_data + ofsy;
if (N >= OWL_OMP_THRESHOLD_DEFAULT) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < N; i++) {
MAPFN((start_x + i * incx), (start_y + i * incy));
}
}
else {
for (int i = 0; i < N; i++) {
MAPFN(start_x, start_y);
start_x += incx;
start_y += incy;
}
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
CAMLprim value FUN19(value *argv, int __unused_argn)
{
return FUN19_IMPL(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
}
#endif /* FUN19 */
// function to map x to y with explicit offset, step size, number of ops
// more general version of FUN19, so more control over the access pattern to
// the data with two embedded loops.
#ifdef FUN20
CAMLprim value FUN20_IMPL(
value vM, value vN,
value vX, value vOFSX, value vINCX_M, value vINCX_N,
value vY, value vOFSY, value vINCY_M, value vINCY_N
)
{
CAMLparam2(vM, vN);
CAMLxparam4(vX, vOFSX, vINCX_M, vINCX_N);
CAMLxparam4(vY, vOFSY, vINCY_M, vINCY_N);
int M = Long_val(vM);
int N = Long_val(vN);
int ofsx = Long_val(vOFSX);
int incx_m = Long_val(vINCX_M);
int incx_n = Long_val(vINCX_N);
int ofsy = Long_val(vOFSY);
int incy_m = Long_val(vINCY_M);
int incy_n = Long_val(vINCY_N);
INIT;
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
NUMBER1 *Y_data = (NUMBER1 *) Y->data;
NUMBER *start_x_m;
NUMBER *start_x_n;
NUMBER1 *start_y_m;
NUMBER1 *start_y_n;
caml_release_runtime_system(); /* Allow other threads */
start_x_m = X_data + ofsx;
start_y_m = Y_data + ofsy;
if (N >= OWL_OMP_THRESHOLD_DEFAULT) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < M; i++) {
start_x_n = start_x_m + i * incx_m;
start_y_n = start_y_m + i * incy_m;
for (int j = 0; j < N; j++) {
MAPFN(start_x_n, start_y_n);
start_x_n += incx_n;
start_y_n += incy_n;
}
}
}
else {
for (int i = 0; i < M; i++) {
start_x_n = start_x_m;
start_y_n = start_y_m;
for (int j = 0; j < N; j++) {
MAPFN(start_x_n, start_y_n);
start_x_n += incx_n;
start_y_n += incy_n;
}
start_x_m += incx_m;
start_y_m += incy_m;
}
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
CAMLprim value FUN20(value *argv, int __unused_argn)
{
return FUN20_IMPL(
argv[0], argv[1], argv[2], argv[3], argv[4],
argv[5], argv[6], argv[7], argv[8], argv[9]
);
}
#endif /* FUN20 */
// broadcast function of x and y then save the result to z
#ifdef FUN24
static OWL_INLINE void FUN24_CODE (
int d,
struct caml_ba_array *X, int64_t *stride_x, int ofs_x,
struct caml_ba_array *Y, int64_t *stride_y, int ofs_y,
struct caml_ba_array *Z, int64_t *stride_z, int ofs_z
)
{
int inc_x = X->dim[d] == Z->dim[d] ? stride_x[d] : 0;
int inc_y = Y->dim[d] == Z->dim[d] ? stride_y[d] : 0;
int inc_z = stride_z[d];
const int n = Z->dim[d];
if (d == X->num_dims - 1) {
NUMBER *x = (NUMBER *) X->data + ofs_x;
NUMBER *y = (NUMBER *) Y->data + ofs_y;
NUMBER *z = (NUMBER *) Z->data + ofs_z;
for (int i = 0; i < n; i++) {
MAPFN(x, y, z);
x += inc_x;
y += inc_y;
z += inc_z;
}
}
else {
for (int i = 0; i < n; i++) {
FUN24_CODE (d+1, X, stride_x, ofs_x, Y, stride_y, ofs_y, Z, stride_z, ofs_z);
ofs_x += inc_x;
ofs_y += inc_y;
ofs_z += inc_z;
}
}
return;
}
CAMLprim value FUN24_IMPL(
value vX, value vSTRIDE_X,
value vY, value vSTRIDE_Y,
value vZ, value vSTRIDE_Z
)
{
CAMLparam4(vX, vSTRIDE_X, vY, vSTRIDE_Y);
CAMLxparam2(vZ, vSTRIDE_Z);
struct caml_ba_array *X = Caml_ba_array_val(vX);
struct caml_ba_array *Y = Caml_ba_array_val(vY);
struct caml_ba_array *Z = Caml_ba_array_val(vZ);
struct caml_ba_array *stride_X = Caml_ba_array_val(vSTRIDE_X);
int64_t *stride_x = (int64_t *) stride_X->data;
struct caml_ba_array *stride_Y = Caml_ba_array_val(vSTRIDE_Y);
int64_t *stride_y = (int64_t *) stride_Y->data;
struct caml_ba_array *stride_Z = Caml_ba_array_val(vSTRIDE_Z);
int64_t *stride_z = (int64_t *) stride_Z->data;
caml_release_runtime_system(); /* Allow other threads */
FUN24_CODE (0, X, stride_x, 0, Y, stride_y, 0, Z, stride_z, 0);
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
CAMLprim value FUN24(value *argv, int __unused_argn)
{
return FUN24_IMPL(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
}
#endif /* FUN24 */
// broadcast function of x and y then save the result to z. Compared to FUN24,
// the difference of FUN25 is z has one extra dimension than max(dim_x, dim_y).
// This function is used in owl's distribution module and extra dimension is
// used as sample dimension.
#ifdef FUN25
static OWL_INLINE void FUN25_CODE (
int d,
struct caml_ba_array *X, int64_t *stride_x, int ofs_x,
struct caml_ba_array *Y, int64_t *stride_y, int ofs_y,
struct caml_ba_array *Z, int64_t *stride_z, int ofs_z
)
{
int inc_x = X->dim[d] == Z->dim[d+1] ? stride_x[d] : 0;
int inc_y = Y->dim[d] == Z->dim[d+1] ? stride_y[d] : 0;
int inc_z = stride_z[d+1];
const int n = Z->dim[d+1];
if (d == X->num_dims - 1) {
NUMBER *x = (NUMBER *) X->data + ofs_x;
NUMBER *y = (NUMBER *) Y->data + ofs_y;
NUMBER *z = (NUMBER *) Z->data + ofs_z;
for (int i = 0; i < n; i++) {
MAPFN(x, y, z);
x += inc_x;
y += inc_y;
z += inc_z;
}
}
else {
for (int i = 0; i < n; i++) {
FUN25_CODE (d+1, X, stride_x, ofs_x, Y, stride_y, ofs_y, Z, stride_z, ofs_z);
ofs_x += inc_x;
ofs_y += inc_y;
ofs_z += inc_z;
}
}
return;
}
CAMLprim value FUN25_IMPL(
value vX, value vSTRIDE_X,
value vY, value vSTRIDE_Y,
value vZ, value vSTRIDE_Z
)
{
CAMLparam4(vX, vSTRIDE_X, vY, vSTRIDE_Y);
CAMLxparam2(vZ, vSTRIDE_Z);
struct caml_ba_array *X = Caml_ba_array_val(vX);
struct caml_ba_array *Y = Caml_ba_array_val(vY);
struct caml_ba_array *Z = Caml_ba_array_val(vZ);
struct caml_ba_array *stride_X = Caml_ba_array_val(vSTRIDE_X);
int64_t *stride_x = (int64_t *) stride_X->data;
struct caml_ba_array *stride_Y = Caml_ba_array_val(vSTRIDE_Y);
int64_t *stride_y = (int64_t *) stride_Y->data;
struct caml_ba_array *stride_Z = Caml_ba_array_val(vSTRIDE_Z);
int64_t *stride_z = (int64_t *) stride_Z->data;
caml_release_runtime_system(); /* Allow other threads */
int ofs_z = 0;
for (int i = 0; i < Z->dim[0]; i++) {
FUN25_CODE (0, X, stride_x, 0, Y, stride_y, 0, Z, stride_z, ofs_z);
ofs_z += stride_z[0];
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
CAMLprim value FUN25(value *argv, int __unused_argn)
{
return FUN25_IMPL(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
}
#endif /* FUN25 */
// Similar to FUN25, but broadcast between w, x, y, then save the result to z
#ifdef FUN27
static OWL_INLINE void FUN27_CODE (
int d,
struct caml_ba_array *W, int64_t *stride_w, int ofs_w,
struct caml_ba_array *X, int64_t *stride_x, int ofs_x,
struct caml_ba_array *Y, int64_t *stride_y, int ofs_y,
struct caml_ba_array *Z, int64_t *stride_z, int ofs_z
)
{
int inc_w = W->dim[d] == Z->dim[d+1] ? stride_w[d] : 0;
int inc_x = X->dim[d] == Z->dim[d+1] ? stride_x[d] : 0;
int inc_y = Y->dim[d] == Z->dim[d+1] ? stride_y[d] : 0;
int inc_z = stride_z[d+1];
const int n = Z->dim[d+1];
if (d == X->num_dims - 1) {
NUMBER *w = (NUMBER *) W->data + ofs_w;
NUMBER *x = (NUMBER *) X->data + ofs_x;
NUMBER *y = (NUMBER *) Y->data + ofs_y;
NUMBER *z = (NUMBER *) Z->data + ofs_z;
for (int i = 0; i < n; i++) {
MAPFN(w, x, y, z);
w += inc_w;
x += inc_x;
y += inc_y;
z += inc_z;
}
}
else {
for (int i = 0; i < n; i++) {
FUN27_CODE (d+1, W, stride_w, ofs_w, X, stride_x, ofs_x, Y, stride_y, ofs_y, Z, stride_z, ofs_z);
ofs_w += inc_w;
ofs_x += inc_x;
ofs_y += inc_y;
ofs_z += inc_z;
}
}
return;
}
CAMLprim value FUN27_IMPL(
value vW, value vSTRIDE_W,
value vX, value vSTRIDE_X,
value vY, value vSTRIDE_Y,
value vZ, value vSTRIDE_Z
)
{
CAMLparam4(vW, vSTRIDE_W, vX, vSTRIDE_X);
CAMLparam4(vY, vSTRIDE_Y, vZ, vSTRIDE_Z);
struct caml_ba_array *W = Caml_ba_array_val(vW);
struct caml_ba_array *X = Caml_ba_array_val(vX);
struct caml_ba_array *Y = Caml_ba_array_val(vY);
struct caml_ba_array *Z = Caml_ba_array_val(vZ);
struct caml_ba_array *stride_W = Caml_ba_array_val(vSTRIDE_W);
int64_t *stride_w = (int64_t *) stride_W->data;
struct caml_ba_array *stride_X = Caml_ba_array_val(vSTRIDE_X);
int64_t *stride_x = (int64_t *) stride_X->data;
struct caml_ba_array *stride_Y = Caml_ba_array_val(vSTRIDE_Y);
int64_t *stride_y = (int64_t *) stride_Y->data;
struct caml_ba_array *stride_Z = Caml_ba_array_val(vSTRIDE_Z);
int64_t *stride_z = (int64_t *) stride_Z->data;
caml_release_runtime_system(); /* Allow other threads */
int ofs_z = 0;
for (int i = 0; i < Z->dim[0]; i++) {
FUN27_CODE (0, W, stride_w, 0, X, stride_x, 0, Y, stride_y, 0, Z, stride_z, ofs_z);
ofs_z += stride_z[0];
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
CAMLprim value FUN27(value *argv, int __unused_argn)
{
return FUN27_IMPL(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
}
#endif /* FUN27 */
// function to map x to y with explicit offset, step size, number of ops
// more general version of FUN20, so more control over the access pattern to
// the data with three embedded loops.
#ifdef FUN28
CAMLprim value FUN28_IMPL(
value vM, value vN, value vO,
value vX, value vOFSX, value vINCX_M, value vINCX_N, value vINCX_O,
value vY, value vOFSY, value vINCY_M, value vINCY_N, value vINCY_O
)
{
CAMLparam3(vM, vN, vO);
CAMLxparam5(vX, vOFSX, vINCX_M, vINCX_N, vINCX_O);
CAMLxparam5(vY, vOFSY, vINCY_M, vINCY_N, vINCY_O);
int M = Long_val(vM);
int N = Long_val(vN);
int O = Long_val(vO);
int ofsx = Long_val(vOFSX);
int incx_m = Long_val(vINCX_M);
int incx_n = Long_val(vINCX_N);
int incx_o = Long_val(vINCX_O);
int ofsy = Long_val(vOFSY);
int incy_m = Long_val(vINCY_M);
int incy_n = Long_val(vINCY_N);
int incy_o = Long_val(vINCY_O);
INIT;
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
NUMBER1 *Y_data = (NUMBER1 *) Y->data;
NUMBER *start_x_m;
NUMBER *start_x_n;
NUMBER *start_x_o;
NUMBER1 *start_y_m;
NUMBER1 *start_y_n;
NUMBER1 *start_y_o;
caml_release_runtime_system(); /* Allow other threads */
start_x_m = X_data + ofsx;
start_y_m = Y_data + ofsy;
for (int i = 0; i < M; i++) {
start_x_n = start_x_m;
start_y_n = start_y_m;
for (int j = 0; j < N; j++) {
start_x_o = start_x_n;
start_y_o = start_y_n;
for (int k = 0; k < O; k++) {
MAPFN(start_x_o, start_y_o);
start_x_o += incx_o;
start_y_o += incy_o;
}
start_x_n += incx_n;
start_y_n += incy_n;
}
start_x_m += incx_m;
start_y_m += incy_m;
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
CAMLprim value FUN28(value *argv, int __unused_argn)
{
return FUN28_IMPL(
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6],
argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]
);
}
#endif /* FUN28 */
// function to map [x] w.r.t scalar values, similar to FUN12 but saves to Y
#ifdef FUN29
CAMLprim value FUN29(value vN, value vA, value vB, value vX, value vY)
{
CAMLparam5(vN, vA, vB, vX, vY);
int N = Long_val(vN);
INIT;
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
NUMBER *Y_data = (NUMBER *) Y->data;
caml_release_runtime_system(); /* Allow other threads */
if (N >= OWL_OMP_THRESHOLD_DEFAULT) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < N; i++) {
MAPFN(*(X_data + i), *(Y_data + i));
}
}
else {
for (int i = 0; i < N; i++) {
MAPFN(*(X_data + i), *(Y_data + i));
}
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
#endif /* FUN29 */
#undef NUMBER
#undef NUMBER1
#undef NUMBER2
#undef MAPFN
#undef MAPFN1
#undef MAPFN2
#undef MAPFN3
#undef INIT
#undef FUN4
#undef FUN12
#undef FUN13
#undef FUN14
#undef FUN15
#undef FUN17
#undef FUN18
#undef FUN19
#undef FUN19_IMPL
#undef FUN20
#undef FUN20_IMPL
#undef FUN24
#undef FUN24_IMPL
#undef FUN24_CODE
#undef FUN25
#undef FUN25_IMPL
#undef FUN25_CODE
#undef FUN27
#undef FUN27_IMPL
#undef FUN27_CODE
#undef FUN28
#undef FUN28_IMPL
#undef FUN29
#undef OWL_OMP_THRESHOLD
#endif /* OWL_ENABLE_TEMPLATE */
|
dsgbsv.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zcgbsv.c, mixed zc -> ds, Fri Sep 28 17:38:17 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "core_lapack.h"
#include <math.h>
#include <omp.h>
/***************************************************************************//**
* TODO: adjust the documents for band matrix A.
* @ingroup plasma_gbsv
*
* Computes the solution to a system of linear equations A * X = B, where A is
* an n-by-n matrix and X and B are n-by-nrhs matrices.
*
* plasma_dsgesv first factorizes the matrix using plasma_sgetrf and uses
* this factorization within an iterative refinement procedure to produce a
* solution with COMPLEX*16 normwise backward error quality (see below). If
* the approach fails the method falls back to a COMPLEX*16 factorization and
* solve.
*
* The iterative refinement is not going to be a winning strategy if
* the ratio COMPLEX performance over COMPLEX*16 performance is too
* small. A reasonable strategy should take the number of right-hand
* sides and the size of the matrix into account. This might be done
* with a call to ILAENV in the future. Up to now, we always try
* iterative refinement.
*
* The iterative refinement process is stopped if iter > itermax or
* for all the RHS we have: Rnorm < sqrt(n)*Xnorm*Anorm*eps*BWDmax
* where:
*
* - iter is the number of the current iteration in the iterative refinement
* process
* - Rnorm is the Infinity-norm of the residual
* - Xnorm is the Infinity-norm of the solution
* - Anorm is the Infinity-operator-norm of the matrix A
* - eps is the machine epsilon returned by DLAMCH('Epsilon').
* The values itermax and BWDmax are fixed to 30 and 1.0D+00 respectively.
*
*******************************************************************************
*
* @param[in] n
* The number of linear equations, i.e., the order of the matrix A.
* n >= 0.
*
* @param[in] kl
* The number of subdiagonals within the band of A. kl >= 0.
*
* @param[in] ku
* The number of superdiagonals within the band of A. ku >= 0.
* @param[in] nrhs
* The number of right hand sides, i.e., the number of columns of the
* matrix B. nrhs >= 0.
*
* @param[in] pAB
* The band matrix AB in LAPACK band matrix format.
*
* @param[in] ldab
* The leading dimension of the array AB.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in] pB
* The n-by-nrhs matrix of right hand side matrix B.
* This matrix remains unchanged.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
* @param[out] pX
* If return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldx
* The leading dimension of the array X. ldx >= max(1,n).
*
* @param[out] iter
* The number of the iterations in the iterative refinement
* process, needed for the convergence. If failed, it is set
* to be -(1+itermax), where itermax = 30.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_dsgbsv
* @sa plasma_dsgbsv
* @sa plasma_dgbsv
*
******************************************************************************/
int plasma_dsgbsv(int n, int kl, int ku, int nrhs,
double *pAB, int ldab, int *ipiv,
double *pB, int ldb,
double *pX, int ldx, int *iter)
{
// Get PLASMA context
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (n < 0) {
plasma_error("illegal value of n");
return -1;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -2;
}
if (ldab < imax(1, 1+kl+ku)) {
plasma_error("illegal value of lda");
return -4;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -7;
}
if (ldx < imax(1, n)) {
plasma_error("illegal value of ldx");
return -9;
}
// quick return
*iter = 0;
if (imin(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gbtrf(plasma, PlasmaRealDouble, n, kl+ku+1);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize barrier
plasma_barrier_init(&plasma->barrier);
// Create tile matrices.
plasma_desc_t AB;
plasma_desc_t B;
plasma_desc_t X;
int tku = (ku+kl+nb-1)/nb; // number of tiles in upper band (not including diagonal)
int tkl = (kl+nb-1)/nb; // number of tiles in lower band (not including diagonal)
int lm = (tku+tkl+1)*nb; // since we use dgetrf on panel, we pivot back within panel.
// this could fill the last tile of the panel,
// and we need extra NB space on the bottom
int retval;
retval = plasma_desc_general_band_create(PlasmaRealDouble, PlasmaGeneral,
nb, nb, lm, n, 0, 0, n, n, kl, ku, &AB);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AB);
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &X);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AB);
plasma_desc_destroy(&B);
return retval;
}
// Create additional tile matrices.
plasma_desc_t R, ABs, Xs;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
B.m, B.n, 0, 0, B.m, B.n, &R);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AB);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
return retval;
}
retval = plasma_desc_general_band_create(PlasmaRealFloat, PlasmaGeneral,
nb, nb, lm, n, 0, 0, n, n, kl, ku, &ABs);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AB);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
plasma_desc_destroy(&R);
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
X.m, X.n, 0, 0, X.m, X.n, &Xs);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AB);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
plasma_desc_destroy(&R);
plasma_desc_destroy(&ABs);
return retval;
}
// Allocate tiled workspace for Infinity norm calculations.
size_t lwork = imax(((size_t)AB.nt*AB.mt*AB.mb+AB.mb*AB.mt),
(size_t)X.mt*X.n+(size_t)R.mt*R.n);
double *work = (double*)calloc((lwork),sizeof(double));
double *Rnorm = (double*)malloc(((size_t)R.n)*sizeof(double));
double *Xnorm = (double*)malloc(((size_t)X.n)*sizeof(double));
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// Initialize barrier.
plasma_barrier_init(&plasma->barrier);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate matrices to tile layout.
plasma_omp_dpb2desc(pAB, ldab, AB, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
// Call tile async function.
plasma_omp_dsgbsv(AB, ipiv, B, X, ABs, Xs, R, work, Rnorm, Xnorm, iter,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(X, pX, ldx, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&AB);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
plasma_desc_destroy(&R);
plasma_desc_destroy(&ABs);
plasma_desc_destroy(&Xs);
free(work);
free(Rnorm);
free(Xnorm);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
* @ingroup plasma_gbsv
*
* Solves a general band linear system of equations using iterative
* refinement with the LU factor computed using plasma_sgbtrf.
* Non-blocking tile version of plasma_dsgbsv(). Operates on
* matrices stored by tiles. All matrices are passed through
* descriptors. All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in] B
* Descriptor of matrix B.
*
* @param[in,out] X
* Descriptor of matrix X.
*
* @param[out] As
* Descriptor of auxiliary matrix A in single complex precision.
*
* @param[out] Xs
* Descriptor of auxiliary matrix X in single complex precision.
*
* @param[out] R
* Descriptor of auxiliary remainder matrix R.
*
* @param[out] work
* Workspace needed to compute infinity norm of the matrix A.
*
* @param[out] Rnorm
* Workspace needed to store the max value in each of resudual vectors.
*
* @param[out] Xnorm
* Workspace needed to store the max value in each of currenct solution
* vectors.
*
* @param[out] iter
* The number of the iterations in the iterative refinement
* process, needed for the convergence. If failed, it is set
* to be -(1+itermax), where itermax = 30.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PLASMA_SUCCESS (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dsgbsv
* @sa plasma_omp_dsgbsv
* @sa plasma_omp_dgbsv
*
******************************************************************************/
void plasma_omp_dsgbsv(plasma_desc_t A, int *ipiv,
plasma_desc_t B, plasma_desc_t X,
plasma_desc_t As, plasma_desc_t Xs, plasma_desc_t R,
double *work, double *Rnorm, double *Xnorm, int *iter,
plasma_sequence_t *sequence,
plasma_request_t *request)
{
const int itermax = 30;
const double bwdmax = 1.0;
const double zmone = -1.0;
const double zone = 1.0;
*iter = 0;
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(X) != PlasmaSuccess) {
plasma_error("invalid X");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(As) != PlasmaSuccess) {
plasma_error("invalid As");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(Xs) != PlasmaSuccess) {
plasma_error("invalid Xs");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(R) != PlasmaSuccess) {
plasma_error("invalid R");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0 || B.n == 0)
return;
// workspaces for damax
double *workX = work;
double *workR = &work[X.mt*X.n];
// Compute some constants.
double cte;
double eps = LAPACKE_dlamch_work('E');
double Anorm;
plasma_pzlangb(PlasmaInfNorm, A, work, &Anorm, sequence, request);
// Convert B from double to single precision, store result in Xs.
plasma_pdlag2s(B, Xs, sequence, request);
// Convert A from double to single precision, store result in As.
plasma_pdlag2s(A, As, sequence, request);
// Compute the LU factorization of As.
plasma_psgbtrf(As, ipiv, sequence, request);
// Solve the system As * Xs = Bs.
plasma_pstbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit,
1.0, As, Xs, ipiv, sequence, request);
plasma_pstbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit,
1.0, As, Xs, ipiv, sequence, request);
// Convert Xs to double precision
plasma_pslag2d(Xs, X, sequence, request);
// Compute R = B - A * X.
plasma_pdlacpy(PlasmaGeneral, PlasmaNoTrans, B, R, sequence, request);
plasma_pdgemm(PlasmaNoTrans, PlasmaNoTrans,
zmone, A, X, zone, R, sequence, request);
// Check whether the nrhs normwise backward error satisfies the
// stopping criterion. If yes, set iter=0 and return.
plasma_pdamax(PlasmaColumnwise, X, workX, Xnorm, sequence, request);
plasma_pdamax(PlasmaColumnwise, R, workR, Rnorm, sequence, request);
#pragma omp taskwait
{
cte = Anorm * eps * sqrt((double)A.n) * bwdmax;
int flag = 1;
for (int n = 0; n < R.n && flag == 1; n++) {
if (Rnorm[n] > Xnorm[n] * cte) {
flag = 0;
}
}
if (flag == 1) {
*iter = 0;
return;
}
}
// iterative refinement
for (int iiter = 0; iiter < itermax; iiter++) {
// Convert R from double to single precision, store result in Xs.
plasma_pdlag2s(R, Xs, sequence, request);
// Solve the system As * Xs = Rs.
plasma_pstbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit,
1.0, As, Xs, ipiv, sequence, request);
plasma_pstbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit,
1.0, As, Xs, ipiv, sequence, request);
// Convert Xs back to double precision and update the current iterate.
plasma_pslag2d(Xs, R, sequence, request);
plasma_pdgeadd(PlasmaNoTrans, zone, R, zone, X, sequence, request);
// Compute R = B - A * X.
plasma_pdlacpy(PlasmaGeneral, PlasmaNoTrans, B, R, sequence, request);
plasma_pdgemm(PlasmaNoTrans, PlasmaNoTrans, zmone, A, X, zone, R,
sequence, request);
// Check whether nrhs normwise backward error satisfies the
// stopping criterion. If yes, set iter = iiter > 0 and return.
plasma_pdamax(PlasmaColumnwise, X, workX, Xnorm, sequence, request);
plasma_pdamax(PlasmaColumnwise, R, workR, Rnorm, sequence, request);
#pragma omp taskwait
{
int flag = 1;
for (int n = 0; n < R.n && flag == 1; n++) {
if (Rnorm[n] > Xnorm[n] * cte) {
flag = 0;
}
}
if (flag == 1) {
*iter = iiter+1;
return;
}
}
}
// If we are at this place of the code, this is because we have performed
// iter = itermax iterations and never satisfied the stopping criterion,
// set up the iter flag accordingly and follow up with double precision
// routine.
*iter = -itermax - 1;
//#if !defined(PLASMA_DSGESV_WORKAROUND)
// Compute LU factorization of A.
//#pragma omp taskwait
plasma_pzgbtrf(A, ipiv, sequence, request);
// Solve the system A * X = B.
plasma_pdlacpy(PlasmaGeneral, PlasmaNoTrans, B, X, sequence, request);
//#pragma omp taskwait
//plasma_pdgeswp(PlasmaRowwise, X, ipiv, 1, sequence, request);
plasma_pztbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit,
1.0, A, X, ipiv, sequence, request);
plasma_pztbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit,
1.0, A, X, ipiv, sequence, request);
}
|
convolution_1x1_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2 * outw + w;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const __fp16* r0 = bottom_blob.channel(p);
__fp16* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
outptr[0] = r0[0];
r0 += 2;
outptr += 1;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
image_random-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file image_random-inl.h
* \brief
* \author
*/
#ifndef MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#define MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#include <algorithm>
#include <cmath>
#include <limits>
#include <tuple>
#include <utility>
#include <vector>
#include "mxnet/base.h"
#include "../mxnet_op.h"
#include "../operator_common.h"
#if MXNET_USE_OPENCV
#include <opencv2/opencv.hpp>
#endif // MXNET_USE_OPENCV
namespace mxnet {
namespace op {
namespace image {
using namespace mshadow;
#if MXNET_USE_CUDA
// NOTE: Kernel launch/map was extremely costly.
// Hence, we use separate CUDA kernels for these operators.
template<typename DType, typename T1, typename T2>
void ToTensorImplCUDA(mshadow::Stream<gpu> *s,
const T1 input,
const T2 output,
const int req,
const float normalize_factor);
template<typename DType>
void NormalizeImplCUDA(mshadow::Stream<gpu> *s,
const DType *input,
DType *output,
const int req,
const int N,
const int C,
const int H,
const int W,
const float mean_d0,
const float mean_d1,
const float mean_d2,
const float std_d0,
const float std_d1,
const float std_d2);
template<typename DType>
void NormalizeBackwardImplCUDA(mshadow::Stream<gpu> *s,
const DType *out_grad,
DType *in_grad,
const int req,
const int N,
const int C,
const int H,
const int W,
const float std_d0,
const float std_d1,
const float std_d2);
#endif // MXNET_USE_CUDA
// Shape and Type inference for image to tensor operator
inline bool ToTensorShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape &shp = (*in_attrs)[0];
if (!shape_is_known(shp)) return false;
CHECK((shp.ndim() == 3) || (shp.ndim() == 4))
<< "Input image must have shape (height, width, channels), or "
<< "(N, height, width, channels) but got " << shp;
if (shp.ndim() == 3) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({shp[2], shp[0], shp[1]}));
} else if (shp.ndim() == 4) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({shp[0], shp[3], shp[1], shp[2]}));
}
return true;
}
inline bool ToTensorType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat32);
return (*in_attrs)[0] != -1;
}
// Operator Implementation
template<typename DType, int req>
inline void ToTensor(float* out_data, const DType* in_data,
const int length,
const int channels,
const float normalize_factor,
const int step) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c*length + i], req,
(in_data[step + i*channels + c]) / normalize_factor);
}
}
}
inline void ToTensorImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channel,
const float normalize_factor,
const int step) {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
float* output = outputs[0].dptr<float>();
DType* input = inputs[0].dptr<DType>();
ToTensor<DType, req_type>(output, input, length, channel,
normalize_factor, step);
});
});
}
template<typename xpu>
void ToTensorOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
// We do not use temp buffer when performance the operation.
// Hence, this check is necessary.
CHECK_EQ(req[0], kWriteTo)
<< "`to_tensor` does not support inplace updates";
const float normalize_factor = 255.0f;
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
if (inputs[0].ndim() == 3) {
Tensor<gpu, 3, DType> input = inputs[0].get<gpu, 3, DType>(s);
Tensor<gpu, 3, float> output = outputs[0].get<gpu, 3, float>(s);
ToTensorImplCUDA<DType, Tensor<gpu, 3, DType>, Tensor<gpu, 3, float>>
(s, input, output, req_type, normalize_factor);
} else {
Tensor<gpu, 4, DType> input = inputs[0].get<gpu, 4, DType>(s);
Tensor<gpu, 4, float> output = outputs[0].get<gpu, 4, float>(s);
ToTensorImplCUDA<DType, Tensor<gpu, 4, DType>, Tensor<gpu, 4, float>>
(s, input, output, req_type, normalize_factor);
}
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use ToTensor operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
// 3D Input - (h, w, c)
const int length = inputs[0].shape_[0] * inputs[0].shape_[1];
const int channel = static_cast<int>(inputs[0].shape_[2]);
const int step = 0;
ToTensorImpl(inputs, outputs, req, length,
channel, normalize_factor, step);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, h, w, c)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const int channel = static_cast<int>(inputs[0].shape_[3]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
ToTensorImpl(inputs, outputs, req, length, channel,
normalize_factor, n*step);
}
}
}
struct NormalizeParam : public dmlc::Parameter<NormalizeParam> {
mxnet::Tuple<float> mean;
mxnet::Tuple<float> std;
DMLC_DECLARE_PARAMETER(NormalizeParam) {
DMLC_DECLARE_FIELD(mean)
.set_default(mxnet::Tuple<float> {0.0f, 0.0f, 0.0f, 0.0f})
.describe("Sequence of means for each channel. "
"Default value is 0.");
DMLC_DECLARE_FIELD(std)
.set_default(mxnet::Tuple<float> {1.0f, 1.0f, 1.0f, 1.0f})
.describe("Sequence of standard deviations for each channel. "
"Default value is 1.");
}
};
// Shape and Type inference for image Normalize operator
// Shape inference
inline bool NormalizeOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
const auto& dshape = (*in_attrs)[0];
if (!dshape.ndim()) return false;
CHECK((dshape.ndim() == 3) || (dshape.ndim() == 4))
<< "Input tensor must have shape (channels, height, width), or "
<< "(N, channels, height, width), but got " << dshape;
int nchannels = 0;
if (dshape.ndim() == 3) {
nchannels = dshape[0];
CHECK(nchannels == 3 || nchannels == 1)
<< "The first dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
} else if (dshape.ndim() == 4) {
nchannels = dshape[1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The second dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
}
CHECK((param.mean.ndim() == 1) || (param.mean.ndim() == nchannels))
<< "Invalid mean for input with shape " << dshape
<< ". mean must have either 1 or " << nchannels
<< " elements, but got " << param.mean;
CHECK(param.std.ndim() == 1 || param.std.ndim() == nchannels)
<< "Invalid std for input with shape " << dshape
<< ". std must have either 1 or " << nchannels
<< " elements, but got " << param.std;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
// Type Inference
inline bool NormalizeOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
template<typename DType, int req>
inline void Normalize(DType* out_data,
const DType* in_data,
const int length,
const int channels,
const int step,
const std::vector<float> mean,
const std::vector<float> std) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c*length + i], req,
(in_data[step + c*length + i] - mean[c]) / std[c]);
}
}
}
inline void NormalizeImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channels,
const int step,
const std::vector<float> mean,
const std::vector<float> std) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
Normalize<DType, req_type>(output, input, length, channels, step,
mean, std);
});
});
}
template<typename xpu>
void NormalizeOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// Mean and Std can be 1 or 3D only.
std::vector<float> mean(3);
std::vector<float> std(3);
if (param.mean.ndim() == 1) {
mean[0] = mean[1] = mean[2] = param.mean[0];
} else {
mean[0] = param.mean[0];
mean[1] = param.mean[1];
mean[2] = param.mean[2];
}
if (param.std.ndim() == 1) {
std[0] = std[1] = std[2] = param.std[0];
} else {
std[0] = param.std[0];
std[1] = param.std[1];
std[2] = param.std[2];
}
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
int N, C, H, W;
DType *input = nullptr;
DType *output = nullptr;
if (inputs[0].ndim() == 3) {
N = 1;
C = static_cast<int>(inputs[0].shape_[0]);
H = static_cast<int>(inputs[0].shape_[1]);
W = static_cast<int>(inputs[0].shape_[2]);
input = (inputs[0].get<gpu, 3, DType>(s)).dptr_;
output = (outputs[0].get<gpu, 3, DType>(s)).dptr_;
} else {
N = static_cast<int>(inputs[0].shape_[0]);
C = static_cast<int>(inputs[0].shape_[1]);
H = static_cast<int>(inputs[0].shape_[2]);
W = static_cast<int>(inputs[0].shape_[3]);
input = (inputs[0].get<gpu, 4, DType>(s)).dptr_;
output = (outputs[0].get<gpu, 4, DType>(s)).dptr_;
}
NormalizeImplCUDA<DType>(s, input, output, req_type,
N, C, H, W,
mean[0], mean[1], mean[2],
std[0], std[1], std[2]);
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
// 3D input (c, h, w)
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const int channel = static_cast<int>(inputs[0].shape_[0]);
const int step = 0;
NormalizeImpl(inputs, outputs, req, length, channel, step, mean, std);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[2] * inputs[0].shape_[3];
const int channel = static_cast<int>(inputs[0].shape_[1]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeImpl(inputs, outputs, req, length, channel, n*step, mean, std);
}
}
}
// Backward function
template<typename DType, int req>
inline void NormalizeBackward(const DType* out_grad,
DType* in_grad,
const int length,
const int channels,
const int step,
const std::vector<float> std) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(in_grad[step + c*length + i], req,
out_grad[step + c*length + i] * (1.0 / std[c]));
}
}
}
inline void NormalizeBackwardImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channels,
const int step,
const std::vector<float> std
) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* out_grad = inputs[0].dptr<DType>();
DType* in_grad = outputs[0].dptr<DType>();
NormalizeBackward<DType, req_type>(out_grad, in_grad, length,
channels, step, std);
});
});
}
template<typename xpu>
void NormalizeOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// Std can be 1 or 3D only.
std::vector<float> std(3);
if (param.std.ndim() == 1) {
std[0] = std[1] = std[2] = param.std[0];
} else {
std[0] = param.std[0];
std[1] = param.std[1];
std[2] = param.std[2];
}
// Note: inputs[0] is out_grad
const TBlob& in_data = inputs[1];
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
int N, C, H, W;
DType *in_grad = nullptr;
DType *out_grad = nullptr;
if (in_data.ndim() == 3) {
N = 1;
C = static_cast<int>(in_data.shape_[0]);
H = static_cast<int>(in_data.shape_[1]);
W = static_cast<int>(in_data.shape_[2]);
out_grad = (inputs[0].get<gpu, 3, DType>(s)).dptr_;
in_grad = (outputs[0].get<gpu, 3, DType>(s)).dptr_;
} else {
N = static_cast<int>(in_data.shape_[0]);
C = static_cast<int>(in_data.shape_[1]);
H = static_cast<int>(in_data.shape_[2]);
W = static_cast<int>(in_data.shape_[3]);
out_grad = (inputs[0].get<gpu, 4, DType>(s)).dptr_;
in_grad = (outputs[0].get<gpu, 4, DType>(s)).dptr_;
}
NormalizeBackwardImplCUDA<DType>(s, out_grad, in_grad, req_type,
N, C, H, W,
std[0], std[1], std[2]);
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize backward operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (in_data.ndim() == 3) {
// 3D input (c, h, w)
const int length = in_data.shape_[1] * in_data.shape_[2];
const int channel = static_cast<int>(in_data.shape_[0]);
const int step = 0;
NormalizeBackwardImpl(inputs, outputs, req, length, channel, step, std);
} else if (in_data.ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = in_data.shape_[0];
const int length = in_data.shape_[2] * in_data.shape_[3];
const int channel = static_cast<int>(in_data.shape_[1]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeBackwardImpl(inputs, outputs, req, length, channel, n*step, std);
}
}
}
template<typename DType>
inline DType saturate_cast(const float& src) {
return static_cast<DType>(src);
}
template<>
inline uint8_t saturate_cast(const float& src) {
return std::min(std::max(src, 0.f), 255.f);
}
inline bool ImageShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
mxnet::TShape& dshape = (*in_attrs)[0];
CHECK_EQ(dshape.ndim(), 3)
<< "Input image must have shape (height, width, channels), but got " << dshape;
auto nchannels = dshape[dshape.ndim()-1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The last dimension of input image must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename DType, int axis>
void FlipImpl(const mxnet::TShape &shape, DType *src, DType *dst) {
int head = 1, mid = shape[axis], tail = 1;
for (int i = 0; i < axis; ++i) head *= shape[i];
for (int i = axis+1; i < shape.ndim(); ++i) tail *= shape[i];
for (int i = 0; i < head; ++i) {
// if inplace flip, skip the mid point in axis, otherwise copy is required
int mid2 = (src == dst) ? mid >> 1 : (mid + 1) >> 1;
for (int j = 0; j < mid2; ++j) {
int idx1 = (i*mid + j) * tail;
int idx2 = idx1 + (mid-(j << 1)-1) * tail;
for (int k = 0; k < tail; ++k, ++idx1, ++idx2) {
DType tmp = src[idx1];
dst[idx1] = src[idx2];
dst[idx2] = tmp;
}
}
}
}
inline void FlipLeftRight(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
inline void FlipTopBottom(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
struct RandomFlipParam : public dmlc::Parameter<RandomFlipParam> {
float p;
DMLC_DECLARE_PARAMETER(RandomFlipParam) {
DMLC_DECLARE_FIELD(p)
.set_default(0.5f)
.describe("The probablity of flipping the image.");
}
};
inline void RandomFlipLeftRight(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomFlipParam ¶m = nnvm::get<RandomFlipParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, 1);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (dist(prnd->GetRndEngine()) > param.p) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
inline void RandomFlipTopBottom(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomFlipParam ¶m = nnvm::get<RandomFlipParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, 1);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (dist(prnd->GetRndEngine()) > param.p) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
struct RandomEnhanceParam : public dmlc::Parameter<RandomEnhanceParam> {
float min_factor;
float max_factor;
DMLC_DECLARE_PARAMETER(RandomEnhanceParam) {
DMLC_DECLARE_FIELD(min_factor)
.set_lower_bound(0.0)
.describe("Minimum factor.");
DMLC_DECLARE_FIELD(max_factor)
.set_lower_bound(0.0)
.describe("Maximum factor.");
}
};
inline void AdjustBrightnessImpl(const float& alpha_b,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
int length = inputs[0].Size();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int l = 0; l < length; ++l) {
float val = static_cast<float>(input[l]) * alpha_b;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomBrightness(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
float alpha_b = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, inputs, req, outputs);
}
inline void AdjustContrastImpl(const float& alpha_c,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
float sum = 0.f;
if (nchannels > 1) {
for (int l = 0; l < length; ++l) {
for (int c = 0; c < 3; ++c) sum += input[l*3 + c] * coef[c];
}
} else {
for (int l = 0; l < length; ++l) sum += input[l];
}
float gray_mean = sum / static_cast<float>(length);
float beta = (1 - alpha_c) * gray_mean;
for (int l = 0; l < length * nchannels; ++l) {
float val = input[l] * alpha_c + beta;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomContrast(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_c = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, inputs, req, outputs);
}
inline void AdjustSaturationImpl(const float& alpha_s,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
float alpha_o = 1.f - alpha_s;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
if (nchannels == 1) {
for (int l = 0; l < length; ++l) output[l] = input[l];
return;
}
for (int l = 0; l < length; ++l) {
float gray = 0.f;
for (int c = 0; c < 3; ++c) {
gray = input[l*3 + c] * coef[c];
}
gray *= alpha_o;
for (int c = 0; c < 3; ++c) {
float val = gray + input[l*3 + c] * alpha_s;
output[l*3 + c] = saturate_cast<DType>(val);
}
}
});
}
inline void RandomSaturation(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_s = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, inputs, req, outputs);
}
inline void RGB2HLSConvert(const float& src_r,
const float& src_g,
const float& src_b,
float *dst_h,
float *dst_l,
float *dst_s) {
float b = src_b / 255.f, g = src_g / 255.f, r = src_r / 255.f;
float h = 0.f, s = 0.f, l;
float vmin;
float vmax;
float diff;
vmax = vmin = r;
vmax = std::fmax(vmax, g);
vmax = std::fmax(vmax, b);
vmin = std::fmin(vmin, g);
vmin = std::fmin(vmin, b);
diff = vmax - vmin;
l = (vmax + vmin) * 0.5f;
if (diff > std::numeric_limits<float>::epsilon()) {
s = (l < 0.5f) * diff / (vmax + vmin);
s += (l >= 0.5f) * diff / (2.0f - vmax - vmin);
diff = 60.f / diff;
h = (vmax == r) * (g - b) * diff;
h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f);
h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f);
h += (h < 0.f) * 360.f;
}
*dst_h = h;
*dst_l = l;
*dst_s = s;
}
inline void HLS2RGBConvert(const float& src_h,
const float& src_l,
const float& src_s,
float *dst_r,
float *dst_g,
float *dst_b) {
static const int c_HlsSectorData[6][3] = {
{ 1, 3, 0 },
{ 1, 0, 2 },
{ 3, 0, 1 },
{ 0, 2, 1 },
{ 0, 1, 3 },
{ 2, 1, 0 }
};
float h = src_h, l = src_l, s = src_s;
float b = l, g = l, r = l;
if (s != 0) {
float p2 = (l <= 0.5f) * l * (1 + s);
p2 += (l > 0.5f) * (l + s - l * s);
float p1 = 2 * l - p2;
h *= 1.f / 60.f;
if (h < 0) {
do { h += 6; } while (h < 0);
}
if (h >= 6) { // h + 6 >= 6 holds true for some h < 0
do { h -= 6; } while (h >= 6);
}
int sector = static_cast<int>(h);
h -= sector;
float tab[4];
tab[0] = p2;
tab[1] = p1;
tab[2] = p1 + (p2 - p1) * (1 - h);
tab[3] = p1 + (p2 - p1) * h;
b = tab[c_HlsSectorData[sector][0]];
g = tab[c_HlsSectorData[sector][1]];
r = tab[c_HlsSectorData[sector][2]];
}
*dst_b = b * 255.f;
*dst_g = g * 255.f;
*dst_r = r * 255.f;
}
inline void AdjustHueImpl(float alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
if (inputs[0].shape_[2] == 1) return;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
for (int i = 0; i < length; ++i) {
float h, l, s;
float r = static_cast<float>(*(input++));
float g = static_cast<float>(*(input++));
float b = static_cast<float>(*(input++));
RGB2HLSConvert(r, g, b, &h, &l, &s);
h += alpha * 360.f;
HLS2RGBConvert(h, l, s, &r, &g, &b);
*(output++) = saturate_cast<DType>(r);
*(output++) = saturate_cast<DType>(g);
*(output++) = saturate_cast<DType>(b);
}
});
}
inline void RandomHue(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustHueImpl(alpha, ctx, inputs, req, outputs);
}
struct RandomColorJitterParam : public dmlc::Parameter<RandomColorJitterParam> {
float brightness;
float contrast;
float saturation;
float hue;
DMLC_DECLARE_PARAMETER(RandomColorJitterParam) {
DMLC_DECLARE_FIELD(brightness)
.describe("How much to jitter brightness.");
DMLC_DECLARE_FIELD(contrast)
.describe("How much to jitter contrast.");
DMLC_DECLARE_FIELD(saturation)
.describe("How much to jitter saturation.");
DMLC_DECLARE_FIELD(hue)
.describe("How much to jitter hue.");
}
};
inline void RandomColorJitter(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomColorJitterParam ¶m = nnvm::get<RandomColorJitterParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
int order[4] = {0, 1, 2, 3};
std::shuffle(order, order + 4, prnd->GetRndEngine());
bool flag = false;
for (int i = 0; i < 4; ++i) {
switch (order[i]) {
case 0:
if (param.brightness > 0) {
float alpha_b = 1.0 + std::uniform_real_distribution<float>(
-param.brightness, param.brightness)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 1:
if (param.contrast > 0) {
float alpha_c = 1.0 + std::uniform_real_distribution<float>(
-param.contrast, param.contrast)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 2:
if (param.saturation > 0) {
float alpha_s = 1.f + std::uniform_real_distribution<float>(
-param.saturation, param.saturation)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 3:
if (param.hue > 0) {
float alpha_h = std::uniform_real_distribution<float>(
-param.hue, param.hue)(prnd->GetRndEngine());
AdjustHueImpl(alpha_h, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
}
}
}
struct AdjustLightingParam : public dmlc::Parameter<AdjustLightingParam> {
mxnet::Tuple<float> alpha;
DMLC_DECLARE_PARAMETER(AdjustLightingParam) {
DMLC_DECLARE_FIELD(alpha)
.describe("The lighting alphas for the R, G, B channels.");
}
};
struct RandomLightingParam : public dmlc::Parameter<RandomLightingParam> {
float alpha_std;
DMLC_DECLARE_PARAMETER(RandomLightingParam) {
DMLC_DECLARE_FIELD(alpha_std)
.set_default(0.05)
.describe("Level of the lighting noise.");
}
};
inline void AdjustLightingImpl(const mxnet::Tuple<float>& alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float eig[3][3] = {
{ 55.46 * -0.5675, 4.794 * 0.7192, 1.148 * 0.4009 },
{ 55.46 * -0.5808, 4.794 * -0.0045, 1.148 * -0.8140 },
{ 55.46 * -0.5836, 4.794 * -0.6948, 1.148 * 0.4203 }
};
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int channels = inputs[0].shape_[2];
if (channels == 1) return;
float pca_r = eig[0][0] * alpha[0] + eig[0][1] * alpha[1] + eig[0][2] * alpha[2];
float pca_g = eig[1][0] * alpha[0] + eig[1][1] * alpha[1] + eig[1][2] * alpha[2];
float pca_b = eig[2][0] * alpha[0] + eig[2][1] * alpha[1] + eig[2][2] * alpha[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int i = 0; i < length; i++) {
int base_ind = 3 * i;
float in_r = static_cast<float>(input[base_ind]);
float in_g = static_cast<float>(input[base_ind + 1]);
float in_b = static_cast<float>(input[base_ind + 2]);
output[base_ind] = saturate_cast<DType>(in_r + pca_r);
output[base_ind + 1] = saturate_cast<DType>(in_g + pca_g);
output[base_ind + 2] = saturate_cast<DType>(in_b + pca_b);
}
});
}
inline void AdjustLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const AdjustLightingParam ¶m = nnvm::get<AdjustLightingParam>(attrs.parsed);
AdjustLightingImpl(param.alpha, ctx, inputs, req, outputs);
}
inline void RandomLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomLightingParam ¶m = nnvm::get<RandomLightingParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, param.alpha_std);
float alpha_r = dist(prnd->GetRndEngine());
float alpha_g = dist(prnd->GetRndEngine());
float alpha_b = dist(prnd->GetRndEngine());
AdjustLightingImpl({alpha_r, alpha_g, alpha_b}, ctx, inputs, req, outputs);
}
#define MXNET_REGISTER_IMAGE_AUG_OP(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ImageShape) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{ "_copy" }) \
.add_argument("data", "NDArray-or-Symbol", "The input.")
#define MXNET_REGISTER_IMAGE_RND_AUG_OP(name) \
MXNET_REGISTER_IMAGE_AUG_OP(name) \
.set_attr<FResourceRequest>("FResourceRequest", \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kRandom}; \
})
} // namespace image
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
|
wyhash.h | /**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#ifndef wyhash_version_2
#define wyhash_version_2
#include <stdint.h>
#include <string.h>
#if defined(_MSC_VER) && defined(_M_X64)
#include <intrin.h>
#pragma intrinsic(_umul128)
#endif
static const uint64_t _wyp0 = 0xa0761d6478bd642full, _wyp1 = 0xe7037ed1a0b428dbull, _wyp2 = 0x8ebc6af09c88c6e3ull,
_wyp3 = 0x589965cc75374cc3ull, _wyp4 = 0x1d8e4e27c47d124full;
static inline uint64_t _wymum(uint64_t A, uint64_t B)
{
#ifdef __SIZEOF_INT128__
__uint128_t r = A;
r *= B;
return (r >> 64) ^ r;
#elif defined(_MSC_VER) && defined(_M_X64)
A = _umul128(A, B, &B);
return A ^ B;
#else
uint64_t ha = A >> 32, hb = B >> 32, la = (uint32_t)A, lb = (uint32_t)B, hi, lo;
uint64_t rh = ha * hb, rm0 = ha * lb, rm1 = hb * la, rl = la * lb, t = rl + (rm0 << 32), c = t < rl;
lo = t + (rm1 << 32);
c += lo < t;
hi = rh + (rm0 >> 32) + (rm1 >> 32) + c;
return hi ^ lo;
#endif
}
static inline uint64_t _wymix0(uint64_t A, uint64_t B, uint64_t seed)
{
return _wymum(A ^ seed ^ _wyp0, B ^ seed ^ _wyp1);
}
static inline uint64_t _wymix1(uint64_t A, uint64_t B, uint64_t seed)
{
return _wymum(A ^ seed ^ _wyp2, B ^ seed ^ _wyp3);
}
static inline uint64_t _wyr08(const uint8_t* p)
{
uint8_t v;
memcpy(&v, p, 1);
return v;
}
static inline uint64_t _wyr16(const uint8_t* p)
{
uint16_t v;
memcpy(&v, p, 2);
return v;
}
static inline uint64_t _wyr32(const uint8_t* p)
{
uint32_t v;
memcpy(&v, p, 4);
return v;
}
static inline uint64_t _wyr64(const uint8_t* p)
{
uint64_t v;
memcpy(&v, p, 8);
return v;
}
static inline uint64_t __wyr64(const uint8_t* p)
{
return (_wyr32(p) << 32) | _wyr32(p + 4);
}
// to avoid attacks, seed should be initialized as a secret
static inline uint64_t wyhash(const void* key, uint64_t len, uint64_t seed)
{
const uint8_t* p = (const uint8_t*)key;
uint64_t i, len1 = len;
for (i = 0; i + 32 <= len; i += 32, p += 32) {
seed = _wymix0(_wyr64(p), _wyr64(p + 8), seed) ^ _wymix1(_wyr64(p + 16), _wyr64(p + 24), seed);
}
switch (len & 31) {
case 0:
len1 = _wymix0(len1, 0, seed);
break;
case 1:
seed = _wymix0(_wyr08(p), 0, seed);
break;
case 2:
seed = _wymix0(_wyr16(p), 0, seed);
break;
case 3:
seed = _wymix0((_wyr16(p) << 8) | _wyr08(p + 2), 0, seed);
break;
case 4:
seed = _wymix0(_wyr32(p), 0, seed);
break;
case 5:
seed = _wymix0((_wyr32(p) << 8) | _wyr08(p + 4), 0, seed);
break;
case 6:
seed = _wymix0((_wyr32(p) << 16) | _wyr16(p + 4), 0, seed);
break;
case 7:
seed = _wymix0((_wyr32(p) << 24) | (_wyr16(p + 4) << 8) | _wyr08(p + 6), 0, seed);
break;
case 8:
seed = _wymix0(__wyr64(p), 0, seed);
break;
case 9:
seed = _wymix0(__wyr64(p), _wyr08(p + 8), seed);
break;
case 10:
seed = _wymix0(__wyr64(p), _wyr16(p + 8), seed);
break;
case 11:
seed = _wymix0(__wyr64(p), (_wyr16(p + 8) << 8) | _wyr08(p + 8 + 2), seed);
break;
case 12:
seed = _wymix0(__wyr64(p), _wyr32(p + 8), seed);
break;
case 13:
seed = _wymix0(__wyr64(p), (_wyr32(p + 8) << 8) | _wyr08(p + 8 + 4), seed);
break;
case 14:
seed = _wymix0(__wyr64(p), (_wyr32(p + 8) << 16) | _wyr16(p + 8 + 4), seed);
break;
case 15:
seed = _wymix0(__wyr64(p), (_wyr32(p + 8) << 24) | (_wyr16(p + 8 + 4) << 8) | _wyr08(p + 8 + 6), seed);
break;
case 16:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed);
break;
case 17:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^ _wymix1(_wyr08(p + 16), 0, seed);
break;
case 18:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^ _wymix1(_wyr16(p + 16), 0, seed);
break;
case 19:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^ _wymix1((_wyr16(p + 16) << 8) | _wyr08(p + 16 + 2), 0, seed);
break;
case 20:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^ _wymix1(_wyr32(p + 16), 0, seed);
break;
case 21:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^ _wymix1((_wyr32(p + 16) << 8) | _wyr08(p + 16 + 4), 0, seed);
break;
case 22:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^ _wymix1((_wyr32(p + 16) << 16) | _wyr16(p + 16 + 4), 0, seed);
break;
case 23:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^
_wymix1((_wyr32(p + 16) << 24) | (_wyr16(p + 16 + 4) << 8) | _wyr08(p + 16 + 6), 0, seed);
break;
case 24:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^ _wymix1(__wyr64(p + 16), 0, seed);
break;
case 25:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^ _wymix1(__wyr64(p + 16), _wyr08(p + 24), seed);
break;
case 26:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^ _wymix1(__wyr64(p + 16), _wyr16(p + 24), seed);
break;
case 27:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^
_wymix1(__wyr64(p + 16), (_wyr16(p + 24) << 8) | _wyr08(p + 24 + 2), seed);
break;
case 28:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^ _wymix1(__wyr64(p + 16), _wyr32(p + 24), seed);
break;
case 29:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^
_wymix1(__wyr64(p + 16), (_wyr32(p + 24) << 8) | _wyr08(p + 24 + 4), seed);
break;
case 30:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^
_wymix1(__wyr64(p + 16), (_wyr32(p + 24) << 16) | _wyr16(p + 24 + 4), seed);
break;
case 31:
seed = _wymix0(__wyr64(p), __wyr64(p + 8), seed) ^
_wymix1(__wyr64(p + 16), (_wyr32(p + 24) << 24) | (_wyr16(p + 24 + 4) << 8) | _wyr08(p + 24 + 6), seed);
break;
}
return _wymum(seed ^ len1, _wyp4);
}
static inline uint64_t wyhash64(uint64_t A, uint64_t B)
{
return _wymum(_wymum(A ^ _wyp0, B ^ _wyp1), _wyp2);
}
static inline double wy2u01(uint64_t r)
{
const double _wynorm = 1.0 / (1ull << 52);
return (r & 0x000fffffffffffffull) * _wynorm;
}
static inline float wy2gau(uint64_t r)
{
const float _wynorm1 = 1.0f / (1ull << 15);
return (((r >> 16) & 0xffff) + ((r >> 32) & 0xffff) + (r >> 48)) * _wynorm1 - 3.0f;
}
static inline uint64_t wyrand(uint64_t* seed)
{
*seed += _wyp0;
return _wymum(*seed ^ _wyp1, *seed);
}
static uint64_t _wyrand_seed = 0;
#define WYRAND_MAX 0xffffffffffffffffull
static inline void wysrand(uint64_t seed)
{
_wyrand_seed = seed;
}
static inline uint64_t wygrand(void)
{
uint64_t s;
#if defined(_OPENMP)
#pragma omp atomic capture
#endif
{
_wyrand_seed += _wyp0;
s = _wyrand_seed;
}
return _wymum(s ^ _wyp1, s);
}
#endif
|
algorithms.h | #ifndef __algorithms_h__
#define __algorithms_h__
#include <algorithm>
#include <vector>
#include <cstdint>
#include <cstring>
#include <omp.h>
#include <immintrin.h>
template <typename Key, typename T>
static void counting_sort(
typename std::vector<T>::iterator begin,
typename std::vector<T>::iterator end,
typename std::vector<T>::iterator output,
typename std::vector<T>::iterator output_end,
unsigned b,
unsigned k)
{
unsigned counts[1<<b];
unsigned mask = (1<<b)-1;
memset(counts, 0, sizeof(unsigned)*(1<<b));
for (auto it = begin; it < end; it++) {
unsigned idx = ((Key)(*it) >> (k*b)) & mask;
counts[idx]++;
}
for (unsigned i = 1; i < 1<<b; i++) {
counts[i] += counts[i-1];
}
for (auto it = end-1; it >= begin; it--) {
unsigned idx = ((Key)(*it) >> (k*b)) & mask;
unsigned pos = --counts[idx];
*(output + pos) = *it;
}
}
template <typename Key, typename T>
static void parallel_counting_sort(
typename std::vector<T>::iterator begin,
typename std::vector<T>::iterator end,
typename std::vector<T>::iterator output,
typename std::vector<T>::iterator output_end,
unsigned b,
unsigned k)
{
#define NUM_SORT_THREADS 4
unsigned counts[NUM_SORT_THREADS][1<<b];
unsigned starts[NUM_SORT_THREADS][1<<b];
unsigned mask = (1<<b)-1;
memset(counts, 0, sizeof(unsigned)*(1<<b)*NUM_SORT_THREADS);
#pragma omp parallel num_threads(NUM_SORT_THREADS)
{
#pragma omp for
for (auto it = begin; it < end; it++) {
unsigned idx = ((Key)(*it) >> (k*b)) & mask;
unsigned tid = omp_get_thread_num();
counts[tid][idx]++;
}
#pragma omp single
{
unsigned previous_starts = 0;
unsigned previous_counts = 0;
for (unsigned i = 0; i < (unsigned)1<<b; i++) {
for (unsigned tid = 0; tid < NUM_SORT_THREADS; tid++) {
starts[tid][i] = previous_starts + previous_counts;
previous_starts = starts[tid][i];
previous_counts = counts[tid][i];
}
}
}
#pragma omp for
for (auto it = begin; it < end; it++) {
unsigned idx = ((Key)(*it) >> (k*b)) & mask;
unsigned tid = omp_get_thread_num();
unsigned pos = starts[tid][idx]++;
*(output + pos) = *it;
}
}
}
template <typename Key, typename T>
static void radix_sort(
typename std::vector<T>::iterator begin,
typename std::vector<T>::iterator end,
typename std::vector<T>::iterator scratch,
typename std::vector<T>::iterator scratch_end,
unsigned b)
{
if (sizeof(Key) == sizeof(uint32_t)) {
parallel_counting_sort<Key, T>(begin, end, scratch, scratch_end, b, 0);
parallel_counting_sort<Key, T>(scratch, scratch_end, begin, end, b, 1);
parallel_counting_sort<Key, T>(begin, end, scratch, scratch_end, b, 2);
parallel_counting_sort<Key, T>(scratch, scratch_end, begin, end, b, 3);
}
else if (sizeof(Key) == sizeof(uint64_t)) {
parallel_counting_sort<Key, T>(begin, end, scratch, scratch_end, b, 0);
parallel_counting_sort<Key, T>(scratch, scratch_end, begin, end, b, 1);
parallel_counting_sort<Key, T>(begin, end, scratch, scratch_end, b, 2);
parallel_counting_sort<Key, T>(scratch, scratch_end, begin, end, b, 3);
parallel_counting_sort<Key, T>(begin, end, scratch, scratch_end, b, 4);
parallel_counting_sort<Key, T>(scratch, scratch_end, begin, end, b, 5);
parallel_counting_sort<Key, T>(begin, end, scratch, scratch_end, b, 6);
parallel_counting_sort<Key, T>(scratch, scratch_end, begin, end, b, 7);
}
}
uint32_t bit_interleave_32(const uint32_t &x, const uint32_t &y, const uint32_t &z);
uint64_t bit_interleave_64(const uint64_t &x, const uint64_t &y, const uint64_t &z);
#endif
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) != 0)
channels++;
}
return(channels == 0 ? (size_t) 1 : channels);
}
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p,
*magick_restrict q;
Quantum
*magick_restrict r;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickBooleanType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area;
image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
{
distortion[i]+=area*QuantumScale*(p[i]-
image_statistics[channel].mean)*(GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
else
{
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel = GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
GetImageChannels(image));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=10.0*MagickLog10(1.0)-10.0*MagickLog10(distortion[i]);
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
#define SSIMRadius 5.0
#define SSIMSigma 1.5
#define SSIMBlocksize 8
#define SSIMK1 0.01
#define SSIMK2 0.03
#define SSIML 1.0
CacheView
*image_view,
*reconstruct_view;
char
geometry[MagickPathExtent];
const char
*artifact;
double
c1,
c2,
radius,
sigma;
KernelInfo
*kernel_info;
MagickBooleanType
status;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Compute structural similarity index @
https://en.wikipedia.org/wiki/Structural_similarity.
*/
radius=SSIMRadius;
artifact=GetImageArtifact(image,"compare:ssim-radius");
if (artifact != (const char *) NULL)
radius=StringToDouble(artifact,(char **) NULL);
sigma=SSIMSigma;
artifact=GetImageArtifact(image,"compare:ssim-sigma");
if (artifact != (const char *) NULL)
sigma=StringToDouble(artifact,(char **) NULL);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
c1=pow(SSIMK1*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k1");
if (artifact != (const char *) NULL)
c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
c2=pow(SSIMK2*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k2");
if (artifact != (const char *) NULL)
c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,reconstruct_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y-
((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/
2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
x_pixel_mu[MaxPixelChannels+1],
x_pixel_sigma_squared[MaxPixelChannels+1],
xy_sigma[MaxPixelChannels+1],
y_pixel_mu[MaxPixelChannels+1],
y_pixel_sigma_squared[MaxPixelChannels+1];
const Quantum
*magick_restrict reference,
*magick_restrict target;
MagickRealType
*k;
ssize_t
v;
(void) memset(x_pixel_mu,0,sizeof(x_pixel_mu));
(void) memset(x_pixel_sigma_squared,0,sizeof(x_pixel_sigma_squared));
(void) memset(xy_sigma,0,sizeof(xy_sigma));
(void) memset(x_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
(void) memset(y_pixel_mu,0,sizeof(y_pixel_mu));
(void) memset(y_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
k=kernel_info->values;
reference=p;
target=q;
for (v=0; v < (ssize_t) kernel_info->height; v++)
{
ssize_t
u;
for (u=0; u < (ssize_t) kernel_info->width; u++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
x_pixel,
y_pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel=QuantumScale*reference[i];
x_pixel_mu[i]+=(*k)*x_pixel;
x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel;
y_pixel=QuantumScale*
GetPixelChannel(reconstruct_image,channel,target);
y_pixel_mu[i]+=(*k)*y_pixel;
y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel;
xy_sigma[i]+=(*k)*x_pixel*y_pixel;
}
k++;
reference+=GetPixelChannels(image);
target+=GetPixelChannels(reconstruct_image);
}
reference+=GetPixelChannels(image)*columns;
target+=GetPixelChannels(reconstruct_image)*columns;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
ssim,
x_pixel_mu_squared,
x_pixel_sigmas_squared,
xy_mu,
xy_sigmas,
y_pixel_mu_squared,
y_pixel_sigmas_squared;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i];
y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i];
xy_mu=x_pixel_mu[i]*y_pixel_mu[i];
xy_sigmas=xy_sigma[i]-xy_mu;
x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared;
y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared;
ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/
((x_pixel_mu_squared+y_pixel_mu_squared+c1)*
(x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2));
channel_distortion[i]+=ssim;
channel_distortion[CompositePixelChannel]+=ssim;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetStructuralSimilarityDistortion)
#endif
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]+=channel_distortion[i];
}
image_view=DestroyCacheView(image_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
distortion[i]/=((double) columns*rows);
}
distortion[CompositePixelChannel]/=((double) columns*rows);
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
kernel_info=DestroyKernelInfo(kernel_info);
return(status);
}
static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=(1.0-(distortion[i]))/2.0;
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
pclange.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pzlange.c, normal z -> c, Fri Sep 28 17:38:12 2018
*
**/
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include <plasma_core_blas.h>
#define A(m, n) (plasma_complex32_t*)plasma_tile_addr(A, m, n)
/***************************************************************************//**
* Parallel tile calculation of max, one, infinity or Frobenius matrix norm
* for a general matrix.
******************************************************************************/
void plasma_pclange(plasma_enum_t norm,
plasma_desc_t A, float *work, float *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Return if failed sequence.
if (sequence->status != PlasmaSuccess)
return;
switch (norm) {
float stub;
float *workspace;
float *scale;
float *sumsq;
//================
// PlasmaMaxNorm
//================
case PlasmaMaxNorm:
for (int m = 0; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
for (int n = 0; n < A.nt; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_clange(PlasmaMaxNorm,
mvam, nvan,
A(m, n), ldam,
&stub, &work[A.mt*n+m],
sequence, request);
}
}
#pragma omp taskwait
plasma_core_omp_slange(PlasmaMaxNorm,
A.mt, A.nt,
work, A.mt,
&stub, value,
sequence, request);
break;
//================
// PlasmaOneNorm
//================
case PlasmaOneNorm:
for (int m = 0; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
for (int n = 0; n < A.nt; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_clange_aux(PlasmaOneNorm,
mvam, nvan,
A(m, n), ldam,
&work[A.n*m+n*A.nb],
sequence, request);
}
}
#pragma omp taskwait
workspace = work + A.mt*A.n;
plasma_core_omp_slange(PlasmaInfNorm,
A.n, A.mt,
work, A.n,
workspace, value,
sequence, request);
break;
//================
// PlasmaInfNorm
//================
case PlasmaInfNorm:
for (int m = 0; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
for (int n = 0; n < A.nt; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_clange_aux(PlasmaInfNorm,
mvam, nvan,
A(m, n), ldam,
&work[A.m*n+m*A.mb],
sequence, request);
}
}
#pragma omp taskwait
workspace = work + A.nt*A.m;
plasma_core_omp_slange(PlasmaInfNorm,
A.m, A.nt,
work, A.m,
workspace, value,
sequence, request);
break;
//======================
// PlasmaFrobeniusNorm
//======================
case PlasmaFrobeniusNorm:
scale = work;
sumsq = work + A.mt*A.nt;
for (int m = 0; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
for (int n = 0; n < A.nt; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_cgessq(mvam, nvan,
A(m, n), ldam,
&scale[A.mt*n+m], &sumsq[A.mt*n+m],
sequence, request);
}
}
#pragma omp taskwait
plasma_core_omp_sgessq_aux(A.mt*A.nt,
scale, sumsq,
value,
sequence, request);
break;
}
}
|
interpolation_pc.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
static inline void interpolation_pc_block(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){
// interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[]
int dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block
int dim_j = block->dim.j<<1;
int dim_k = block->dim.k<<1;
int read_i = block->read.i;
int read_j = block->read.j;
int read_k = block->read.k;
int read_jStride = block->read.jStride;
int read_kStride = block->read.kStride;
int write_i = block->write.i;
int write_j = block->write.j;
int write_k = block->write.k;
int write_jStride = block->write.jStride;
int write_kStride = block->write.kStride;
double * __restrict__ read = block->read.ptr;
double * __restrict__ write = block->write.ptr;
if(block->read.box >=0){
read = level_c->my_boxes[ block->read.box].vectors[id_c] + level_c->my_boxes[ block->read.box].ghosts*(1+level_c->my_boxes[ block->read.box].jStride+level_c->my_boxes[ block->read.box].kStride);
read_jStride = level_c->my_boxes[block->read.box ].jStride;
read_kStride = level_c->my_boxes[block->read.box ].kStride;
}
if(block->write.box>=0){
write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->my_boxes[block->write.box].ghosts*(1+level_f->my_boxes[block->write.box].jStride+level_f->my_boxes[block->write.box].kStride);
write_jStride = level_f->my_boxes[block->write.box].jStride;
write_kStride = level_f->my_boxes[block->write.box].kStride;
}
int i,j,k;
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride);
int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride);
write[write_ijk] = prescale_f*write[write_ijk] + read[read_ijk]; // CAREFUL !!! you must guarantee you zero'd the MPI buffers(write[]) and destination boxes at some point to avoid 0.0*NaN or 0.0*inf
}}}
}
//------------------------------------------------------------------------------------------------------------------------------
// perform a (inter-level) piecewise constant interpolation
void interpolation_pc(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){
uint64_t _timeCommunicationStart = CycleTime();
uint64_t _timeStart,_timeEnd;
int my_tag = (level_f->tag<<4) | 0x6;
int buffer=0;
int n;
#ifdef USE_MPI
// by convention, level_f allocates a combined array of requests for both level_f recvs and level_c sends...
int nMessages = level_c->interpolation.num_sends + level_f->interpolation.num_recvs;
MPI_Request *recv_requests = level_f->interpolation.requests;
MPI_Request *send_requests = level_f->interpolation.requests + level_f->interpolation.num_recvs;
// loop through packed list of MPI receives and prepost Irecv's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_f->interpolation.num_recvs;n++){
MPI_Irecv(level_f->interpolation.recv_buffers[n],
level_f->interpolation.recv_sizes[n],
MPI_DOUBLE,
level_f->interpolation.recv_ranks[n],
my_tag,
MPI_COMM_WORLD,
&recv_requests[n]
);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_recv += (_timeEnd-_timeStart);
// pack MPI send buffers...
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[0])
for(buffer=0;buffer<level_c->interpolation.num_blocks[0];buffer++){
// !!! prescale==0 because you don't want to increment the MPI buffer
interpolation_pc_block(level_f,id_f,0.0,level_c,id_c,&level_c->interpolation.blocks[0][buffer]);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_pack += (_timeEnd-_timeStart);
// loop through MPI send buffers and post Isend's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_c->interpolation.num_sends;n++){
MPI_Isend(level_c->interpolation.send_buffers[n],
level_c->interpolation.send_sizes[n],
MPI_DOUBLE,
level_c->interpolation.send_ranks[n],
my_tag,
MPI_COMM_WORLD,
&send_requests[n]
);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_send += (_timeEnd-_timeStart);
#endif
// perform local interpolation... try and hide within Isend latency...
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[1])
for(buffer=0;buffer<level_c->interpolation.num_blocks[1];buffer++){
interpolation_pc_block(level_f,id_f,prescale_f,level_c,id_c,&level_c->interpolation.blocks[1][buffer]);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_local += (_timeEnd-_timeStart);
// wait for MPI to finish...
#ifdef USE_MPI
_timeStart = CycleTime();
if(nMessages)MPI_Waitall(nMessages,level_f->interpolation.requests,level_f->interpolation.status);
_timeEnd = CycleTime();
level_f->cycles.interpolation_wait += (_timeEnd-_timeStart);
// unpack MPI receive buffers
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->interpolation.num_blocks[2])
for(buffer=0;buffer<level_f->interpolation.num_blocks[2];buffer++){
IncrementBlock(level_f,id_f,prescale_f,&level_f->interpolation.blocks[2][buffer]);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_unpack += (_timeEnd-_timeStart);
#endif
level_f->cycles.interpolation_total += (uint64_t)(CycleTime()-_timeCommunicationStart);
}
|
strsm.c | #include "blas.h"
#include "error.h"
#include <stdio.h>
#include "handle.h"
#include "config.h"
#include "strsm.fatbin.c"
static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; }
static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; }
static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj,
const void * B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static inline CUresult cuMemcpyDtoH2DAsync(void * A, size_t lda, size_t ai, size_t aj,
CUdeviceptr B, size_t ldb, size_t bi, size_t bj,
size_t m, size_t n, size_t elemSize, CUstream stream) {
CUDA_MEMCPY2D copy = {
bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize,
ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize,
m * elemSize, n };
return cuMemcpy2DAsync(©, stream);
}
static const float zero = 0.0f;
static const float one = 1.0f;
void strsm(CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag,
size_t m, size_t n,
float alpha, const float * restrict A, size_t lda,
float * restrict B, size_t ldb) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return;
}
if (m == 0 || n == 0)
return;
if (alpha == zero) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] = zero;
}
return;
}
if (side == CBlasLeft) {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
size_t k = m - 1;
do {
if (B[j * ldb + k] != zero) {
if (diag == CBlasNonUnit) B[j * ldb + k] /= A[k * lda + k];
register float temp = B[j * ldb + k];
for (size_t i = 0; i < k; i++)
B[j * ldb + i] -= temp * A[k * lda + i];
}
} while (k-- > 0);
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
for (size_t k = 0; k < m; k++) {
if (B[j * ldb + k] != zero) {
if (diag == CBlasNonUnit) B[j * ldb + k] /= A[k * lda + k];
register float temp = B[j * ldb + k];
for (size_t i = k + 1; i < m; i++)
B[j * ldb + i] -= temp * A[k * lda + i];
}
}
}
}
}
else {
if (uplo == CBlasUpper) {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
for (size_t i = 0; i < m; i++) {
register float temp = alpha * B[j * ldb + i];
for (size_t k = 0; k < i; k++)
temp -= A[i * lda + k] * B[j * ldb + k];
if (diag == CBlasNonUnit) temp /= A[i * lda + i];
B[j * ldb + i] = temp;
}
}
}
else {
#pragma omp parallel for
for (size_t j = 0; j < n; j++) {
size_t i = m - 1;
do {
register float temp = alpha * B[j * ldb + i];
for (size_t k = i + 1; k < m; k++)
temp -= A[i * lda + k] * B[j * ldb + k];
if (diag == CBlasNonUnit) temp /= A[i * lda + i];
B[j * ldb + i] = temp;
} while (i-- > 0);
}
}
}
}
else {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
for (size_t j = 0; j < n; j++) {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
for (size_t k = 0; k < j; k++) {
if (A[j * lda + k] != zero) {
register float temp = A[j * lda + k];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (diag == CBlasNonUnit) {
register float temp = one / A[j * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= temp;
}
}
}
else {
size_t j = n - 1;
do {
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= alpha;
}
for (size_t k = j + 1; k < n; k++) {
if (A[j * lda + k] != zero) {
register float temp = A[j * lda + k];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (diag == CBlasNonUnit) {
register float temp = one / A[j * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] *= temp;
}
} while (j-- > 0);
}
}
else {
if (uplo == CBlasUpper) {
size_t k = n - 1;
do {
if (diag == CBlasNonUnit) {
register float temp = one / A[k * lda + k];
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= temp;
}
for (size_t j = 0; j < k; j++) {
if (A[k * lda + j] != zero) {
register float temp = A[k * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= alpha;
}
} while (k-- > 0);
}
else {
for (size_t k = 0; k < n; k++) {
if (diag == CBlasNonUnit) {
register float temp = one / A[k * lda + k];
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= temp;
}
for (size_t j = k + 1; j < n; j++) {
if (A[k * lda + j] != zero) {
register float temp = A[k * lda + j];
for (size_t i = 0; i < m; i++)
B[j * ldb + i] -= temp * B[k * ldb + i];
}
}
if (alpha != one) {
for (size_t i = 0; i < m; i++)
B[k * ldb + i] *= alpha;
}
}
}
}
}
}
CUresult cuStrsm(CUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag,
size_t m, size_t n,
float alpha, CUdeviceptr A, size_t lda,
CUdeviceptr B, size_t ldb, CUstream stream) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
CU_ERROR_CHECK(cuCtxPushCurrent(handle->context));
if (handle->strsm == NULL)
CU_ERROR_CHECK(cuModuleLoadData(&handle->strsm, imageBytes));
const unsigned int bx = 8;
const unsigned int by = 8;
const unsigned int mb = (side == CBlasLeft) ? 8 : 64;
const unsigned int nb = (side == CBlasLeft) ? 64 : 8;
char name[102];
snprintf(name, 102,
"_Z5strsmIL9CBlasSide%dEL9CBlasUplo%dEL14CBlasTranspose%dEL9CBlasDiag%dELj%uELj%uELj%uELj%uEEvPKfPffiiii",
side, uplo, transA, diag, mb, nb, bx, by);
CUfunction function;
CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->strsm, name));
void * params[] = { &A, &B, &alpha, &lda, &ldb, &m, &n };
const unsigned int gx = (side == CBlasLeft) ? 1 : (unsigned int)(m + mb - 1) / mb;
const unsigned int gy = (side == CBlasLeft) ? (unsigned int)(n + nb - 1) / nb : 1;
CU_ERROR_CHECK(cuLaunchKernel(function, gx, gy, 1, bx, by, 1, 0, stream, params, NULL));
CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context));
return CUDA_SUCCESS;
}
CUresult cuMultiGPUStrsm(CUmultiGPUBLAShandle handle,
CBlasSide side, CBlasUplo uplo, CBlasTranspose transA, CBlasDiag diag,
size_t m, size_t n,
float alpha, const float * restrict A, size_t lda,
float * restrict B, size_t ldb) {
const size_t nRowA = (side == CBlasLeft) ? m : n;
int info = 0;
if (lda < nRowA)
info = 9;
else if (ldb < m)
info = 11;
if (info != 0) {
XERBLA(info);
return CUDA_ERROR_INVALID_VALUE;
}
if (m == 0 || n == 0)
return CUDA_SUCCESS;
if (alpha == zero) {
sgemm(CBlasNoTrans, CBlasNoTrans, m, n, 0, zero, A, lda, B, ldb, zero, B, ldb);
return CUDA_SUCCESS;
}
const size_t mb = (transA == CBlasNoTrans) ? SGEMM_N_MB : SGEMM_T_MB;
const size_t nb = SGEMM_N_NB;
if (side == CBlasLeft) {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
size_t r = m % mb;
size_t i = (r == 0) ? m : m + mb - r;
do {
i -= mb;
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[(i + ib) * lda + i], lda, &B[i + ib], ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasLeft, CBlasUpper, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
} while (i > 0);
}
else {
for (size_t i = 0; i < m; i += mb) {
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasNoTrans, ib, n, i, -one, &A[i], lda, B, ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasLeft, CBlasLower, CBlasNoTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
}
}
}
else {
if (uplo == CBlasUpper) {
for (size_t i = 0; i < m; i += mb) {
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasTrans, CBlasNoTrans, ib, n, i, -one, &A[i * lda], lda, B, ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasLeft, CBlasUpper, CBlasTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
}
}
else {
size_t r = m % mb;
size_t i = (r == 0) ? m : m + mb - r;
do {
i -= mb;
const size_t ib = min(mb, m - i);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasTrans, CBlasNoTrans, ib, n, m - i - ib, -one, &A[i * lda + i + ib], lda, &B[i + ib], ldb, alpha, &B[i], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasLeft, CBlasLower, CBlasTrans, diag, ib, n, one, &A[i * lda + i], lda, &B[i], ldb);
} while (i > 0);
}
}
}
else {
if (transA == CBlasNoTrans) {
if (uplo == CBlasUpper) {
for (size_t j = 0; j < n; j += nb) {
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, j, -one, B, ldb, &A[j * lda], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasRight, CBlasUpper, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
}
}
else {
size_t r = n % nb;
size_t j = (r == 0) ? n : n + nb - r;
do {
j -= nb;
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasNoTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[j * lda + j + jb], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasRight, CBlasLower, CBlasNoTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
} while (j > 0);
}
}
else {
if (uplo == CBlasUpper) {
size_t r = n % nb;
size_t j = (r == 0) ? n : n + nb - r;
do {
j -= nb;
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasTrans, m, jb, n - j - jb, -one, &B[(j + jb) * ldb], ldb, &A[(j + jb) * lda + j], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasRight, CBlasUpper, CBlasTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
} while (j > 0);
}
else {
for (size_t j = 0; j < n; j += nb) {
const size_t jb = min(nb, n - j);
CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasTrans, m, jb, j, -one, B, ldb, &A[j], lda, alpha, &B[j * ldb], ldb));
CU_ERROR_CHECK(cuMultiGPUBLASSynchronize(handle));
strsm(CBlasRight, CBlasLower, CBlasTrans, diag, m, jb, one, &A[j * lda + j], lda, &B[j * ldb], ldb);
}
}
}
}
return CUDA_SUCCESS;
}
|
pooling_layer.h | //Tencent is pleased to support the open source community by making FeatherCNN available.
//Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
//in compliance with the License. You may obtain a copy of the License at
//
//https://opensource.org/licenses/BSD-3-Clause
//
//Unless required by applicable law or agreed to in writing, software distributed
//under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
//CONDITIONS OF ANY KIND, either express or implied. See the License for the
//specific language governing permissions and limitations under the License.
#pragma once
#include "../feather_simple_generated.h"
#include "../layer.h"
#include <math.h>
#include <limits>
#define MAX(a,b) ((a)>(b))?(a):(b)
#define MIN(a,b) ((a)<(b))?(a):(b)
namespace feather
{
void ave_pool_inner_kernel(float* out, const float* in, const size_t ldin, const size_t kernel_h, const size_t kernel_w)
{
float total = 0.0;
for (size_t m = 0; m != kernel_h; ++m)
{
for (size_t n = 0; n != kernel_w; ++n)
{
size_t pos = m * ldin + n;
total += in[pos];
}
}
*out = total / kernel_h / kernel_w;
}
void max_pool_inner_kernel(float* out, const float* in, const size_t ldin, const size_t kernel_h, const size_t kernel_w)
{
float max = 0.0;
for (size_t m = 0; m != kernel_h; ++m)
{
for (size_t n = 0; n != kernel_w; ++n)
{
size_t pos = m * ldin + n;
max = (in[pos] > max) ? in[pos] : max;
}
}
*out = max;
}
class PoolingLayer : public Layer
{
public:
PoolingLayer(const LayerParameter *layer_param, const RuntimeParameter<float>* rt_param)
: stride_height(1),
stride_width(1),
Layer(layer_param, rt_param)
{
const PoolingParameter *pooling_param = layer_param->pooling_param();
kernel_height = pooling_param->kernel_h();
kernel_width = pooling_param->kernel_w();
pad_height = pooling_param->pad_h();
pad_width = pooling_param->pad_w();
stride_height = pooling_param->stride_h();
stride_width = pooling_param->stride_w();
stride_height = (stride_height <= 0) ? 1 : stride_height;
stride_width = (stride_width <= 0) ? 1 : stride_width;
global_pooling = pooling_param->global_pooling();
this->method = pooling_param->pool();
switch (this->method)
{
case PoolingParameter_::PoolMethod_MAX_:
_pool_inner_kernel = max_pool_inner_kernel;
break;
case PoolingParameter_::PoolMethod_AVE:
_pool_inner_kernel = ave_pool_inner_kernel;
break;
default:
fprintf(stderr, "Unsupported pool method\n");
}
//printf("kernel (%ld %ld) pad (%ld %ld) stride (%ld %ld) global_pooling %d\n",
// kernel_height, kernel_width, pad_height, pad_width, stride_height, stride_width, global_pooling);
}
int Forward()
{
fprintf(stderr, "Pooling layer %s\ninput shape %ld %ld %ld kernel shape %ld %ld stride %ld %ld\n", this->name().c_str(), input_channels, input_height, input_width, kernel_height, kernel_width, stride_height, stride_width);
fprintf(stderr, "output (%d %d)\n", output_height, output_width);
const float *input = _bottom_blobs[_bottom[0]]->data();
float *output = _top_blobs[_top[0]]->data();
float *p = output;
int slot = input_channels * output_height;
#pragma omp parallel for schedule(static) num_threads(num_threads)
// for (int u=0;u<slot;u++)
// {
for (int i = 0; i < input_channels; ++i)
{
for (int j = 0; j < output_height; j ++)
{
// int i=slot/output_height, j=slot%output_height;
float *p = output + i * output_height * output_width + j * output_width;
for (int l = 0; l < output_width; l++) p[l] = (this->method != PoolingParameter_::PoolMethod_MAX_ ? 0 : -1 * std::numeric_limits<float>::max()) ;
int tmp_pos = j * (int)stride_height - (int)pad_height;
int x_min = MAX(tmp_pos, 0);
int x_max = MIN((int)(tmp_pos + kernel_height), (int) input_height);
for (int k = 0; k < output_width; k ++)
{
int counter = 0;
float total = (this->method != PoolingParameter_::PoolMethod_MAX_ ? 0 : -1 * std::numeric_limits<float>::max());
for (int x = x_min; x < x_max; ++x)
{
int xpos = i * input_height * input_width + x * input_width;
int local_pos = k * (int)stride_width - (int)pad_width;
int y_min = MAX(local_pos, 0);
int y_max = MIN((int)(local_pos + kernel_width), (int) input_width);
for (int y = y_min; y < y_max; ++y)
{
float value = input[xpos + y];
if (this->method != PoolingParameter_::PoolMethod_MAX_) total += value, counter++;
else total = total > value ? total : value;
}
}
if (this->method != PoolingParameter_::PoolMethod_MAX_)
p[k] += total / (counter);
else p[k] = (p[k] > total) ? p[k] : total;
}
}
}
return 0;
}
int ForwardReshape()
{
const Blob<float> *bottom_blob = _bottom_blobs[_bottom[0]];
input_height = bottom_blob->height();
input_width = bottom_blob->width();
input_channels = bottom_blob->channels();
//printf("layer %s\n", _name.c_str());
//printf("input %lu %lu %lu\n", input_channels, input_height, input_width);
if (global_pooling)
{
kernel_height = input_height;
kernel_width = input_width;
output_height = 1;
output_width = 1;
output_channels = input_channels;
}
else
{
//General pooling.
output_channels = input_channels;
output_height = static_cast<int>(ceil(static_cast<float>(input_height + 2 * pad_height - kernel_height) / stride_height)) + 1;
output_width = static_cast<int>(ceil(static_cast<float>(input_width + 2 * pad_width - kernel_width) / stride_width)) + 1;
}
_top_blobs[_top[0]]->ReshapeWithRealloc(1, output_channels, output_height, output_width);
return Forward();
}
int GenerateTopBlobs()
{
//Only accept a single bottom blob.
const Blob<float> *bottom_blob = _bottom_blobs[_bottom[0]];
input_height = bottom_blob->height();
input_width = bottom_blob->width();
input_channels = bottom_blob->channels();
//printf("layer %s\n", _name.c_str());
//printf("input %lu %lu %lu\n", input_channels, input_height, input_width);
if (global_pooling)
{
kernel_height = input_height;
kernel_width = input_width;
output_height = 1;
output_width = 1;
output_channels = input_channels;
}
else
{
//General pooling.
output_channels = input_channels;
output_height = static_cast<int>(ceil(static_cast<float>(input_height + 2 * pad_height - kernel_height) / stride_height)) + 1;
output_width = static_cast<int>(ceil(static_cast<float>(input_width + 2 * pad_width - kernel_width) / stride_width)) + 1;
}
_top_blobs[_top[0]] = new Blob<float>(1, output_channels, output_height, output_width);
_top_blobs[_top[0]]->Alloc();
//_top_blobs[_top[0]]->PrintBlobInfo();
return 0;
}
private:
size_t input_height;
size_t input_width;
size_t input_channels;
size_t output_height;
size_t output_width;
size_t output_channels;
size_t pad_height;
size_t pad_width;
size_t kernel_height;
size_t kernel_width;
size_t stride_height;
size_t stride_width;
bool global_pooling;
PoolingParameter_::PoolMethod method;
void (*_pool_inner_kernel)(float* out, const float* in, const size_t ldin, const size_t kernel_h, const size_t kernel_w);
};
};
|
LAGraph_tricount.c | //------------------------------------------------------------------------------
// LAGraph_tricount: count the number of triangles in a graph
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2020 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_tricount: count the number of triangles in a graph,
// Contributed by Tim Davis, Texas A&M.
// Given a symmetric graph A with no-self edges, LAGraph_tricount counts the
// number of triangles in the graph. A triangle is a clique of size three,
// that is, 3 nodes that are all pairwise connected.
// One of 6 methods are used, defined below where L and U are the strictly
// lower and strictly upper triangular parts of the symmetrix matrix A,
// respectively. Each method computes the same result, ntri:
// 1: Burkhardt: ntri = sum (sum ((A^2) .* A)) / 6
// 2: Cohen: ntri = sum (sum ((L * U) .* A)) / 2
// 3: Sandia: ntri = sum (sum ((L * L) .* L))
// 4: Sandia2: ntri = sum (sum ((U * U) .* U))
// 5: SandiaDot: ntri = sum (sum ((L * U') .* L)). Note that L=U'.
// 6: SandiaDot2: ntri = sum (sum ((U * L') .* U)). Note that U=L'.
// A is a square symmetric matrix, of any type. Its values are ignored,
// (assuming v3.2.0 of SuiteSparse:GraphBLAS is used); otherwise, A must be
// binary. Results are undefined for methods 1 and 2 if self-edges exist in A.
// Results are undefined for all methods if A is unsymmetric.
// TODO use an enum for the above methods.
// All matrices are assumed to be in CSR format (GxB_BY_ROW in
// SuiteSparse:GraphBLAS). The 6 methods work fine if the matrices are in CSC
// format; just the underlying algorithms employed inside SuiteSparse:GraphBLAS
// will differ (dot product vs saxpy, for example). If L and U are in CSC
// format, then the "Dot" methods would use an outer product approach, which is
// slow in SuiteSparse:GraphBLAS (requiring an explicit transpose). The
// auto-sort rule probably needs to be reversed, if A is in CSC format (this is
// not yet tested).
// Methods 1 and 2 are much slower than methods 3 to 6 and take more memory.
// Methods 3 to 6 take a little less memory than methods 1 and 2, are by far
// the fastest methods in general. The methods 3 and 5 compute the same
// intermediate matrix (L*L), and differ only in the way the matrix
// multiplication is done. Method 3 uses an outer-product method (Gustavson's
// method). Method 5 uses dot products (assuming both matrices are in CSR
// format) and does not explicitly transpose U. They are called the "Sandia"
// method since matrices in the KokkosKernels are stored in compressed-sparse
// row form, so (L*L).*L in the KokkosKernel method is equivalent to (L*L).*L
// in SuiteSparse:GraphBLAS when the matrices in SuiteSparse:GraphBLAS are in
// their default format (also by row).
// The new GxB_PAIR_INT64 binary operator in SuiteSparse:GraphBLAS v3.2.0 is
// used in the semiring, if available. This is the function f(x,y)=1, so the
// values of A are not accessed. They can have any values and any type. Only
// the structure of A. Otherwise, without this operator, the input matrix A
// must be binary.
// Reference: Wolf, Deveci, Berry, Hammond, Rajamanickam, 'Fast linear algebra-
// based triangle counting with KokkosKernels', IEEE HPEC'17,
// https://dx.doi.org/10.1109/HPEC.2017.8091043,
#include "LAGraph_internal.h"
#include "GB_msort_2.h"
//------------------------------------------------------------------------------
// tricount_prep: construct L and U
//------------------------------------------------------------------------------
#undef LAGRAPH_FREE_ALL
#define LAGRAPH_FREE_ALL \
GrB_free (&thunk) ; \
GrB_free (L) ; \
GrB_free (U) ;
static GrB_Info tricount_prep
(
GrB_Matrix *L,
GrB_Matrix *U,
GrB_Matrix A
)
{
GrB_Index n, *I = NULL, *J = NULL ;
bool *X = NULL ;
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,0,1) )
//----------------------------------------------------------------------
// build L and/or U with GxB_select
//----------------------------------------------------------------------
GxB_Scalar thunk ;
LAGr_Matrix_nrows (&n, A) ;
LAGr_Scalar_new (&thunk, GrB_INT64) ;
if (L != NULL)
{
// L = tril (A,-1)
LAGr_Matrix_new (L, GrB_BOOL, n, n) ;
LAGr_Scalar_setElement (thunk, -1) ;
LAGr_select (*L, NULL, NULL, GxB_TRIL, A, thunk, NULL) ;
}
if (U != NULL)
{
// U = triu (A,1)
LAGr_Matrix_new (U, GrB_BOOL, n, n) ;
LAGr_Scalar_setElement (thunk, 1) ;
LAGr_select (*U, NULL, NULL, GxB_TRIU, A, thunk, NULL) ;
}
LAGr_free (&thunk) ;
#else
//----------------------------------------------------------------------
// build L and U with extractTuples (slower than GxB_select)
//----------------------------------------------------------------------
GrB_Vector thunk ;
LAGr_Matrix_nrows (&n, A) ;
if (L != NULL || U != NULL)
{
GrB_Index nvals ;
LAGr_Matrix_nvals (&nvals, A) ;
I = LAGraph_malloc (nvals, sizeof (GrB_Index)) ;
J = LAGraph_malloc (nvals, sizeof (GrB_Index)) ;
X = LAGraph_malloc (nvals, sizeof (bool)) ;
if (I == NULL || J == NULL || X == NULL)
{
LAGRAPH_ERROR ("out of memory") ;
}
LAGr_Matrix_extractTuples (I, J, X, &nvals, A) ;
// remove entries in the upper triangular part
nedges = 0 ;
for (int64_t k = 0 ; k < nvals ; k++)
{
if (I [k] > J [k])
{
// keep this entry
I [nedges] = I [k] ;
J [nedges] = J [k] ;
X [nedges] = X [k] ;
nedges++ ;
}
}
if (L != NULL)
{
LAGr_Matrix_new (L, GrB_BOOL, n, n) ;
LAGr_Matrix_build (*L, I, J, X, nedges, GrB_LOR) ;
}
if (U != NULL)
{
LAGr_Matrix_new (U, GrB_BOOL, n, n) ;
LAGr_Matrix_build (*U, J, I, X, nedges, GrB_LOR) ;
}
LAGRAPH_FREE (I) ;
LAGRAPH_FREE (J) ;
LAGRAPH_FREE (X) ;
}
#endif
}
//------------------------------------------------------------------------------
// LAGraph_tricount: count the number of triangles in a graph
//------------------------------------------------------------------------------
#undef LAGRAPH_FREE_ALL
#define LAGRAPH_FREE_ALL \
GrB_free (&C) ; \
GrB_free (&L) ; \
GrB_free (&T) ; \
GrB_free (&U) ; \
LAGRAPH_FREE (W0) ; \
LAGRAPH_FREE (W1) ; \
LAGRAPH_FREE (P) ; \
LAGRAPH_FREE (D) ;
#if 0
// easy mode:
LAGr_info LAGraph_tricount
(
uint64_t *ntriangles, // # of triangles
LAGr_Graph G, // a graph
LAGr_descriptor d
) ;
LAGr_info LAGraph_tricount
(
uint64_t *ntriangles, // # of triangles
bool directed,
LAGr_Matrix A // adj matrix of an directed graph
) ;
#endif
GrB_Info LAGraph_tricount // count # of triangles
(
int64_t *ntri, // # of triangles
const int method, // 1 to 6, see above
int sorting, // 0: no sort
// 1: sort by degree, ascending order
// -1: sort by degree, descending order
// 2: auto selection: no sort if rule is not
// triggered. Otherise: sort in ascending order
// for methods 3 and 5, descending ordering for
// methods 4 and 6.
const int64_t *degree, // degree of each node, may be NULL if sorting==0.
// of size n, unmodified.
const GrB_Matrix A_in // input matrix, must be symmetric, no diag entries
)
{
//--------------------------------------------------------------------------
// check inputs and initialize
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Index n ;
GrB_Matrix C = NULL, L = NULL, U = NULL, T = NULL, A = NULL ;
int64_t *P = NULL, *D = NULL, *W0 = NULL, *W1 = NULL ;
LAGr_Matrix_nrows (&n, A_in) ;
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
// the PAIR function is f(x,y)=1, ignoring input values and type
GrB_Descriptor desc_s = GrB_DESC_S ;
GrB_Descriptor desc_st1 = GrB_DESC_ST1 ;
GrB_Semiring semiring = GxB_PLUS_PAIR_INT64 ;
// GrB_Semiring semiring = GxB_PLUS_PAIR_INT32 ;
#else
// f(x,y)=x*y, so x and y must be 1 to compute the correct count, and
// thus the input matrix A must be binary.
GrB_Descriptor desc_s = NULL ;
GrB_Descriptor desc_st1 = LAGraph_desc_otoo ;
GrB_Semiring semiring = LAGraph_PLUS_TIMES_INT64 ;
#endif
GrB_Monoid sum = LAGraph_PLUS_INT64_MONOID ;
LAGr_Matrix_new (&C, GrB_INT64, n, n) ;
// LAGr_Matrix_new (&C, GrB_INT32, n, n) ;
//--------------------------------------------------------------------------
// heuristic sort rule
//--------------------------------------------------------------------------
if (sorting == 2)
{
// auto selection of sorting method
sorting = 0 ; // default is not to sort
if (method >= 3 && method <= 6)
{
// This rule is very similar to Scott Beamer's rule in the GAP TC
// benchmark, except that it is extended to handle the ascending
// sort needed by methods 3 and 5. It also uses a stricter rule,
// since the performance of triangle counting in GraphBLAS is less
// sensitive to the sorting as compared to the GAP algorithm. This
// is because the dot products in GraphBLAS use binary search if
// one vector is very sparse compared to the other. As a result,
// GraphBLAS needs the sort for fewer matrices, as compared to the
// GAP algorithm.
// With this rule, the GAP-kron and GAP-twitter matrices are
// sorted, and the others remain unsorted. With the rule in the
// GAP tc.cc benchmark, GAP-web is also sorted, but it is not
// sorted here.
#define NSAMPLES 1000
GrB_Index nvals ;
LAGr_Matrix_nvals (&nvals, A_in) ;
if (n > NSAMPLES && ((double) nvals / ((double) n)) >= 10)
{
// pick 1000 nodes at random and determine their degree
// struct drand48_data buffer ;
// srand48_r ((long int) n, &buffer) ;
uint64_t seed = n ;
int64_t samples [NSAMPLES] ;
int64_t dsum = 0 ;
for (int k = 0 ; k < NSAMPLES ; k++)
{
uint64_t result = LAGraph_rand64 (&seed) ;
// lrand48_r (&buffer, &result) ;
int64_t i = result % n ;
int64_t d = degree [i] ;
samples [k] = d ;
dsum += d ;
}
// find the average degree
double sample_average = ((double) dsum) / NSAMPLES ;
// find the median degree
GB_qsort_1a (samples, NSAMPLES) ;
double sample_median = (double) samples [NSAMPLES/2] ;
printf ("average degree: %g\n", sample_average) ;
printf ("median degree: %g\n", sample_median) ;
// sort if the average degree is very high compared to the
// median
if (sample_average > 4 * sample_median)
{
switch (method)
{
case 3: sorting = 1 ; break ; // sort ascending
case 4: sorting = -1 ; break ; // sort descending
case 5: sorting = 1 ; break ; // sort ascending
case 6: sorting = -1 ; break ; // sort descending
default: sorting = 0 ; break ; // no sort
}
}
}
}
printf ("auto sorting: %d: ", sorting) ;
if (sorting == 0) printf ("none") ;
else if (sorting == -1) printf ("descending") ;
else if (sorting == 1) printf ("ascending") ;
printf ("\n") ;
}
//--------------------------------------------------------------------------
// sort the input matrix, if requested
//--------------------------------------------------------------------------
if (sorting != 0)
{
// decide how many threads to use
#define CHUNK (64*1024)
int nthreads = LAGraph_get_nthreads ( ) ;
nthreads = LAGRAPH_MIN (nthreads, n/CHUNK) ;
nthreads = LAGRAPH_MAX (nthreads, 1) ;
// allocate workspace
P = LAGraph_malloc (n, sizeof (int64_t)) ;
D = LAGraph_malloc (n, sizeof (int64_t)) ;
W0 = LAGraph_malloc (n, sizeof (int64_t)) ;
W1 = LAGraph_malloc (n, sizeof (int64_t)) ;
if (P == NULL || D == NULL || W0 == NULL || W1 == NULL)
{
// out of memory
LAGRAPH_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// construct the pair [D,P] to sort
if (sorting > 0)
{
printf ("sort ascending\n") ;
// sort [D,P] in ascending order of degree, tie-breaking on P
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < n ; k++)
{
D [k] = degree [k] ;
P [k] = k ;
}
}
else
{
printf ("sort descending\n") ;
// sort [D,P] in descending order of degree, tie-breaking on P
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < n ; k++)
{
D [k] = -degree [k] ;
P [k] = k ;
}
}
// for (int64_t k = 0 ; k < n ; k++)
// {
// printf ("before [%3ld %3ld]\n", D [k], P [k]) ;
// }
GB_msort_2 (D, P, W0, W1, n, nthreads) ;
// printf ("\n") ;
// for (int64_t k = 0 ; k < n ; k++)
// {
// printf ("after [%3ld %3ld]\n", D [k], P [k]) ;
// }
// T = A_in (P,P) and typecast to boolean
LAGr_Matrix_new (&T, GrB_BOOL, n, n) ;
LAGr_extract (T, NULL, NULL, A_in, P, n, P, n, NULL) ;
A = T ;
}
else
{
// use the input matrix as-is
A = A_in ;
}
#if 0
printf ("permuted:\n") ;
GrB_Index ignore ;
GrB_Matrix_nvals (&ignore, A) ;
GxB_print (A, 3) ;
// compute the degree of each node (TODO: make this an LAGraph utility)
GrB_Vector X, D2 ;
LAGr_Vector_new (&X, GrB_BOOL, n) ;
LAGr_Vector_new (&D2, GrB_INT64, n) ;
LAGr_assign (X, NULL, NULL, 0, GrB_ALL, n, NULL) ;
LAGr_assign (D2, NULL, NULL, 0, GrB_ALL, n, NULL) ;
LAGr_vxm (D2, NULL, GrB_PLUS_INT64, GxB_PLUS_PAIR_INT64, X, A, NULL) ;
GxB_print (D2, 3) ;
GrB_free (&X) ;
GrB_Type type ;
GrB_Index n2, nvals2, *Di ;
int64_t *deg ;
LAGr_Vector_export (&D2, &type, &n2, &nvals2, &Di, (void **) °, NULL) ;
if (n != n2 || n != nvals2) { printf ("??\n") ; abort ( ) ; }
printf ("\nNew: sorting %d\n", sorting) ;
for (int i = 0 ; i < 67 ; i++)
{
printf ("node: %d degree %ld\n", i, deg [i]) ;
}
#endif
// free workspace
LAGRAPH_FREE (W0) ;
LAGRAPH_FREE (W1) ;
LAGRAPH_FREE (D) ;
LAGRAPH_FREE (P) ;
//--------------------------------------------------------------------------
// count triangles
//--------------------------------------------------------------------------
switch (method)
{
#if 0
// case 0: // minitri: ntri = nnz (A*E == 2) / 3
// This method requires the incidence matrix E. It is very slow
// compared to the other methods. The construction of E was done
// in the Test/Tricount/*.c driver, and it hasn't been added here.
LAGr_Matrix_ncols (&ne, E) ;
LAGr_free (&C) ;
LAGr_Matrix_new (&C, GrB_INT64, n, ne) ;
LAGr_mxm (C, NULL, NULL, semiring, A, E, NULL) ;
LAGr_Matrix_new (&S, GrB_BOOL, n, ne) ;
LAGr_apply (S, NULL, NULL, LAGraph_ISTWO_INT64, C, NULL) ;
LAGr_reduce (ntri, NULL, sum, S, NULL) ;
(*ntri) /= 3 ;
break ;
#endif
case 1: // Burkhardt: ntri = sum (sum ((A^2) .* A)) / 6
LAGr_mxm (C, A, NULL, semiring, A, A, desc_s) ;
LAGr_reduce (ntri, NULL, sum, C, NULL) ;
(*ntri) /= 6 ;
break ;
case 2: // Cohen: ntri = sum (sum ((L * U) .* A)) / 2
LAGRAPH_OK (tricount_prep (&L, &U, A)) ;
LAGr_mxm (C, A, NULL, semiring, L, U, desc_s) ;
LAGr_reduce (ntri, NULL, sum, C, NULL) ;
(*ntri) /= 2 ;
break ;
case 3: // Sandia: ntri = sum (sum ((L * L) .* L))
// using the masked saxpy3 method
LAGRAPH_OK (tricount_prep (&L, NULL, A)) ;
LAGr_mxm (C, L, NULL, semiring, L, L, desc_s) ;
LAGr_reduce (ntri, NULL, sum, C, NULL) ;
break ;
case 4: // Sandia2: ntri = sum (sum ((U * U) .* U))
// using the masked saxpy3 method
LAGRAPH_OK (tricount_prep (NULL, &U, A)) ;
LAGr_mxm (C, U, NULL, semiring, U, U, desc_s) ;
LAGr_reduce (ntri, NULL, sum, C, NULL) ;
break ;
case 5: // SandiaDot: ntri = sum (sum ((L * U') .* L))
// This tends to be the fastest method, for most matrices, but the
// Dot2 method is also very fast.
// using the masked dot product
LAGRAPH_OK (tricount_prep (&L, &U, A)) ;
LAGr_mxm (C, L, NULL, semiring, L, U, desc_st1) ;
LAGr_reduce (ntri, NULL, sum, C, NULL) ;
break ;
case 6: // SandiaDot2: ntri = sum (sum ((U * L') .* U))
// using the masked dot product
LAGRAPH_OK (tricount_prep (&L, &U, A)) ;
LAGr_mxm (C, U, NULL, semiring, U, L, desc_st1) ;
LAGr_reduce (ntri, NULL, sum, C, NULL) ;
break ;
default: // invalid method
LAGRAPH_FREE_ALL ;
return (GrB_INVALID_VALUE) ;
break ;
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
LAGRAPH_FREE_ALL ;
return (GrB_SUCCESS) ;
}
|
j3d27pt.gold.h | #include <cstdio>
#include <cstring>
void jacobi_gold(double *fout, double *ftemp, const double *fin, double h2inv, double a, double b, int L, int M, int N) {
double (*out)[M][N] = (double (*)[M][N]) fout;
double (*in)[M][N] = (double (*)[M][N]) fin;
double (*temp)[M][N] = (double (*)[M][N]) ftemp;
double c = b * h2inv;
double d = c * 0.5;
double e = c * 0.125;
double f = c * 0.3;
#pragma omp parallel for
for (int k = 1; k < L - 1; ++k) {
for (int j = 1; j < M - 1; ++j) {
for (int i = 1; i < N - 1; ++i) {
temp[k][j][i] = a*in[k][j][i] -
d*(in[k-1][j-1][i-1] +
in[k-1][j-1][i+1] +
in[k-1][j+1][i-1] +
in[k-1][j+1][i+1] +
in[k+1][j-1][i-1] +
in[k+1][j-1][i+1] +
in[k+1][j+1][i-1] +
in[k+1][j+1][i+1]) +
e*(in[k-1][j-1][i] +
in[k-1][j][i-1] +
in[k-1][j][i+1] +
in[k-1][j+1][i] +
in[k][j-1][i-1] +
in[k][j-1][i+1] +
in[k][j+1][i-1] +
in[k][j+1][i+1] +
in[k+1][j-1][i] +
in[k+1][j][i-1] +
in[k+1][j][i+1] +
in[k][j+1][i]) +
f*(in[k-1][j][i] +
in[k][j-1][i] +
in[k][j][i-1] +
in[k][j][i+1] +
in[k][j+1][i] +
in[k+1][j][i]) +
0.13*in[k][j][i];
}
}
}
#pragma omp parallel for
for (int k = 1; k < L - 1; ++k) {
for (int j = 1; j < M - 1; ++j) {
for (int i = 1; i < N - 1; ++i) {
out[k][j][i] = a*temp[k][j][i] -
d*(temp[k-1][j-1][i-1] +
temp[k-1][j-1][i+1] +
temp[k-1][j+1][i-1] +
temp[k-1][j+1][i+1] +
temp[k+1][j-1][i-1] +
temp[k+1][j-1][i+1] +
temp[k+1][j+1][i-1] +
temp[k+1][j+1][i+1]) +
e*(temp[k-1][j-1][i] +
temp[k-1][j][i-1] +
temp[k-1][j][i+1] +
temp[k-1][j+1][i] +
temp[k][j-1][i-1] +
temp[k][j-1][i+1] +
temp[k][j+1][i-1] +
temp[k][j+1][i+1] +
temp[k+1][j-1][i] +
temp[k+1][j][i-1] +
temp[k+1][j][i+1] +
temp[k][j+1][i]) +
f*(temp[k-1][j][i] +
temp[k][j-1][i] +
temp[k][j][i-1] +
temp[k][j][i+1] +
temp[k][j+1][i] +
temp[k+1][j][i]) +
0.13*temp[k][j][i];
}
}
}
}
|
simd1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
/* { dg-additional-options "-std=c99" { target c } } */
extern int a[1024], b[1024], k, l, m;
void
foo ()
{
int i;
#pragma omp simd safelen(16) aligned(a, b : 32)
for (i = 0; i < 1024; i++)
a[i] *= b[i];
}
void
bar (int *p)
{
int i;
#pragma omp simd safelen(16) aligned(a, p : 32) linear(k, l : m + 1)
for (i = 0; i < 1024; i++)
a[i] *= p[i], k += m + 1;
}
void
baz (int *p)
{
#pragma omp simd safelen(16) aligned(a, p : 32) linear(k, l : m + 1)
for (int i = 0; i < 1024; i++)
a[i] *= p[i], k += m + 1;
}
|
test.c | #include <stdlib.h>
#include <stdio.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define N 100
int main ()
{
int a[N], b[N], c[N];
check_offloading();
long cpuExec = 0;
#pragma omp target map(tofrom: cpuExec)
{
cpuExec = omp_is_initial_device();
}
if (!cpuExec) {
// Test: no clauses
int fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: private, firstprivate, lastprivate, linear
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
int q = -5;
int p = -3;
int r = 0;
int l = 10;
#pragma omp target teams num_teams(1) thread_limit(1024) map(tofrom: a, r) map(to: b,c)
#pragma omp parallel
#pragma omp for simd private(q) firstprivate(p) lastprivate(r) linear(l:2)
for (int i = 0 ; i < N ; i++) {
q = i + 5;
p += i + 2;
a[i] += p*b[i] + c[i]*q +l;
r = i;
}
for (int i = 0 ; i < N ; i++) {
int expected = (-1 + (-3 + i + 2)*i + (2*i)*(i + 5) + 10+(2*i));
if (a[i] != expected) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], expected);
fail = 1;
}
}
if (r != N-1) {
printf("Error for lastprivate: device = %d, host = %d\n", r, N-1);
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule static no chunk
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(static)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule static no chunk, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target teams num_teams(1) thread_limit(1024) map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: static)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule static no chunk, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: static)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule static chunk
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
int ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(static, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule static chunk, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: static, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule static chunk, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: static, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule dyanmic no chunk
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(dynamic)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// hangs
#if 0
// Test: schedule dyanmic no chunk, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: dynamic)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule dyanmic no chunk, nonmonotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(nonmonotonic: dynamic)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#endif
// Test: schedule dyanmic no chunk, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: dynamic)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule dynamic chunk
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(dynamic, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#if 0
// Test: schedule dynamic chunk, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: dynamic, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule dynamic chunk, nonmonotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(nonmonotonic: dynamic, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#endif
// Test: schedule dynamic chunk, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: dynamic, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule guided no chunk
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(guided)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#if 0
// Test: schedule guided no chunk, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: guided)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule guided no chunk, nonmonotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(nonmonotonic: guided)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#endif
// Test: schedule guided no chunk, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: guided)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule guided chunk
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(guided, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#if 0
// Test: schedule guided chunk, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: guided, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule guided chunk, nonmonotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(nonmonotonic: guided, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#endif
// Test: schedule guided chunk, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: guided, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule auto
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(auto)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule auto, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: auto)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule auto, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: auto)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule runtime
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(runtime)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#if 0
// Test: schedule runtime, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: runtime)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#endif
// Test: schedule runtime, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: runtime)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: collapse
fail = 0;
int ma[N][N], mb[N][N], mc[N][N];
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++) {
ma[i][j] = -1;
mb[i][j] = i;
mc[i][j] = 2*i;
}
#pragma omp target map(tofrom: ma) map(to: mb,mc)
#pragma omp parallel
#pragma omp for simd collapse(2)
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
ma[i][j] += mb[i][j] + mc[i][j];
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
if (ma[i][j] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, ma[i][j], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: ordered
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd ordered
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: nowait
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd nowait
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: safelen
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd safelen(16)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: simdlen
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd simdlen(16)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: aligned
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd aligned(a,b,c:8)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
} else {
DUMP_SUCCESS(27);
}
return 0;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1,2),ceild(8*t2-Nz+5,8));t3<=min(floord(4*Nt+Ny-9,8),floord(4*t1+Ny-1,8));t3++) {
for (t4=max(max(ceild(t1-62,64),ceild(8*t2-Nz-243,256)),ceild(8*t3-Ny-243,256));t4<=min(min(floord(4*Nt+Nx-9,256),floord(4*t1+Nx-1,256)),floord(8*t3+Nx-5,256));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(256*t4-Nx+5,4)),t1);t5<=min(min(min(2*t3,Nt-1),t1+1),64*t4+62);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(256*t4,4*t5+4);
ubv=min(256*t4+255,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
BinaryLutN.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include <array>
#include <vector>
#include "bb/BinaryLutModel.h"
namespace bb {
// テーブルサイズ固定LUT
template <int N = 6, typename FT = Bit, typename BT = float>
class BinaryLutN : public BinaryLutModel
{
using _super = BinaryLutModel;
public:
static inline std::string ClassName(void) { return "BinaryLut" + std::to_string(N); }
static inline std::string ObjectName(void){ return ClassName() + "_" + DataType<FT>::Name() + "_" + DataType<BT>::Name(); }
std::string GetModelName(void) const override { return ClassName(); }
std::string GetObjectName(void) const override { return ObjectName(); }
protected:
bool m_host_only = false;
bool m_host_simd = true;
std::string m_connection;
indices_t m_input_shape;
indices_t m_output_shape;
static int const m_table_size = (1 << N);
static int const m_table_bits = sizeof(std::int32_t) * 8;
static int const m_table_unit = (m_table_size + (m_table_bits - 1)) / m_table_bits;
Tensor_<std::int32_t> m_table;
Tensor_<std::int32_t> m_input_index;
std::mt19937_64 m_mt;
public:
struct create_t
{
indices_t output_shape;
std::string connection="";
std::uint64_t seed = 1;
};
protected:
BinaryLutN(create_t const &create)
{
BB_ASSERT(!create.output_shape.empty());
m_mt.seed(create.seed);
m_output_shape = create.output_shape;
m_connection = create.connection;
m_input_index.Resize(CalcShapeSize(m_output_shape), (index_t)N);
m_table.Resize(CalcShapeSize(m_output_shape), (index_t)m_table_unit);
}
void CommandProc(std::vector<std::string> args) override
{
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
// Host SIMDモード設定
if (args.size() == 2 && args[0] == "host_simd")
{
m_host_simd = EvalBool(args[1]);
}
}
public:
~BinaryLutN() {}
static std::shared_ptr<BinaryLutN> Create(create_t const &create)
{
return std::shared_ptr<BinaryLutN>(new BinaryLutN(create));
}
static std::shared_ptr<BinaryLutN> Create(indices_t const &output_shape, std::uint64_t seed = 1)
{
create_t create;
create.output_shape = output_shape;
create.seed = seed;
return Create(create);
}
static std::shared_ptr<BinaryLutN> Create(index_t output_node_size, std::uint64_t seed = 1)
{
create_t create;
create.output_shape.resize(1);
create.output_shape[0] = output_node_size;
create.seed = seed;
return Create(create);
}
static std::shared_ptr<BinaryLutN> Create(void)
{
return Create(create_t());
}
#ifdef BB_PYBIND11 // python用
static std::shared_ptr<BinaryLutN> CreatePy(
indices_t output_shape,
std::string connection="",
std::uint64_t seed = 1)
{
create_t create;
create.output_shape = output_shape;
create.connection = connection;
create.seed = seed;
return Create(create);
}
#endif
auto lock_InputIndex(void) { return m_input_index.Lock(); }
auto lock_InputIndex_const(void) const { return m_input_index.LockConst(); }
// 疎結合の管理
index_t GetNodeConnectionSize(index_t node) const override
{
return N;
}
void SetNodeConnectionIndex(index_t node, index_t input_index, index_t input_node) override
{
BB_ASSERT(node >= 0 && node < CalcShapeSize(m_output_shape));
BB_ASSERT(input_index >= 0 && input_index < N);
BB_DEBUG_ASSERT(input_node >= 0 && input_node < GetInputNodeSize());
auto ptr = lock_InputIndex();
ptr(node, input_index) = (std::int32_t)input_node;
}
index_t GetNodeConnectionIndex(index_t node, index_t input_index) const override
{
BB_ASSERT(node >= 0 && node < CalcShapeSize(m_output_shape));
BB_ASSERT(input_index >= 0 && input_index < N);
auto ptr = lock_InputIndex_const();
return (index_t)ptr(node, input_index);
}
// LUT操作の定義
int GetLutTableSize(index_t node) const
{
return m_table_size;
}
void SetLutTable(index_t node, int bitpos, bool value) override
{
BB_ASSERT(node >= 0 && node < CalcShapeSize(m_output_shape));
BB_ASSERT(bitpos >= 0 && bitpos < m_table_size);
int idx = bitpos / m_table_bits;
int bit = bitpos % m_table_bits;
auto ptr = m_table.Lock();
if ( value ) {
ptr(node, idx) |= (1 << bit);
}
else {
ptr(node, idx) &= ~(1 << bit);
}
}
bool GetLutTable(index_t node, int bitpos) const override
{
BB_ASSERT(node >= 0 && node < CalcShapeSize(m_output_shape));
BB_ASSERT(bitpos >= 0 && bitpos < (1 << N));
int idx = bitpos / m_table_bits;
int bit = bitpos % m_table_bits;
auto ptr = m_table.LockConst();
return ((ptr(node, idx) & (1 << bit)) != 0);
}
/**
* @brief 入力のshape設定
* @detail 入力のshape設定
* @param shape 新しいshape
* @return なし
*/
indices_t SetInputShape(indices_t shape) override
{
// 設定済みなら何もしない
if ( shape == this->GetInputShape() ) {
return this->GetOutputShape();
}
// 形状設定
m_input_shape = shape;
// 接続初期化
this->InitializeNodeInput(m_mt(), m_connection);
// テーブル初期化
this->InitializeLutTable(m_mt());
return m_output_shape;
}
/**
* @brief 出力のshape設定
* @detail 出力のshape設定
* 出力ノード数が変わらない限りshpeは自由
* @param shape 新しいshape
* @return なし
*/
void SetOutputShape(indices_t const &shape)
{
BB_ASSERT(CalcShapeSize(shape) == this->m_output_node_size);
m_output_shape = shape;
}
/**
* @brief 入力形状取得
* @detail 入力形状を取得する
* @return 入力形状を返す
*/
indices_t GetInputShape(void) const override
{
return m_input_shape;
}
/**
* @brief 出力形状取得
* @detail 出力形状を取得する
* @return 出力形状を返す
*/
indices_t GetOutputShape(void) const override
{
return m_output_shape;
}
private:
template<int LUT, int VAL>
inline __m256i lut_mask_unit(__m256i& val, __m256i& lut)
{
if ((LUT & (1 << VAL)) == 0) {
return _mm256_andnot_si256(val, lut);
}
else {
return _mm256_and_si256(val, lut);
}
}
template<int LUT>
inline void lut6_mask(__m256i& msk, __m256i lut, __m256i val[6])
{
lut = lut_mask_unit<LUT, 0>(val[0], lut);
lut = lut_mask_unit<LUT, 1>(val[1], lut);
lut = lut_mask_unit<LUT, 2>(val[2], lut);
lut = lut_mask_unit<LUT, 3>(val[3], lut);
lut = lut_mask_unit<LUT, 4>(val[4], lut);
lut = lut_mask_unit<LUT, 5>(val[5], lut);
msk = _mm256_or_si256(msk, lut);
}
inline bool GetLutTableFromPtr(Tensor_<std::int32_t>::ConstPtr ptr, index_t node, int index)
{
auto idx = index / m_table_bits;
auto bit = index % m_table_bits;
return (((ptr(node, idx) >> bit) & 1) != 0);
}
public:
FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override
{
BB_ASSERT(x_buf.GetType() == DataType<FT>::type);
// SetInputShpaeされていなければ初回に設定
if (x_buf.GetShape() != m_input_shape) {
SetInputShape(x_buf.GetShape());
}
// 出力を設定
FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<FT>::type);
#ifdef BB_WITH_CUDA
if ( N == 6 && DataType<FT>::type == BB_TYPE_BIT && !m_host_only
&& x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory(true);
auto input_index_ptr = m_input_index.LockDeviceMemoryConst();
auto table_ptr = m_table.LockDeviceMemoryConst();
bbcu_bit_BinatyLut6_Forward
(
(int const *)x_ptr.GetAddr(),
(int *)y_ptr.GetAddr(),
(int const *)input_index_ptr.GetAddr(),
(int const *)table_ptr.GetAddr(),
(int )y_buf.GetNodeSize(),
(int )y_buf.GetFrameSize(),
(int )(y_buf.GetFrameStride() / sizeof(int))
);
return y_buf;
}
#endif
if ( N == 6 && DataType<FT>::type == BB_TYPE_BIT && m_host_simd ) {
auto x_ptr = x_buf.LockConst<Bit>();
auto y_ptr = y_buf.Lock<Bit>(true);
auto input_index_ptr = m_input_index.LockConst();
auto table_ptr = m_table.LockConst();
index_t node_size = y_buf.GetNodeSize();
index_t frame_size = y_buf.GetFrameStride() / sizeof(__m256i);
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
__m256i* x_addr[6];
__m256i* y_addr;
__m256i x[6];
x_addr[0] = (__m256i*)x_ptr.GetAddr(input_index_ptr(node, 0));
x_addr[1] = (__m256i*)x_ptr.GetAddr(input_index_ptr(node, 1));
x_addr[2] = (__m256i*)x_ptr.GetAddr(input_index_ptr(node, 2));
x_addr[3] = (__m256i*)x_ptr.GetAddr(input_index_ptr(node, 3));
x_addr[4] = (__m256i*)x_ptr.GetAddr(input_index_ptr(node, 4));
x_addr[5] = (__m256i*)x_ptr.GetAddr(input_index_ptr(node, 5));
y_addr = (__m256i*)y_ptr.GetAddr(node);
char table[64];
std::int32_t t0 = table_ptr(node, 0);
std::int32_t t1 = table_ptr(node, 1);
for (int i = 0; i < 32; ++i) { table[i] = (t0 & (1 << i)) ? -1 : 0; }
for (int i = 0; i < 32; ++i) { table[i+32] = (t1 & (1 << i)) ? -1 : 0; }
for (index_t frame = 0; frame < frame_size; ++frame) {
// input
x[0] = _mm256_loadu_si256(&x_addr[0][frame]);
x[1] = _mm256_loadu_si256(&x_addr[1][frame]);
x[2] = _mm256_loadu_si256(&x_addr[2][frame]);
x[3] = _mm256_loadu_si256(&x_addr[3][frame]);
x[4] = _mm256_loadu_si256(&x_addr[4][frame]);
x[5] = _mm256_loadu_si256(&x_addr[5][frame]);
// LUT
__m256i y = _mm256_set1_epi8(0);
lut6_mask< 0>(y, _mm256_set1_epi8(table[0]), x);
lut6_mask< 1>(y, _mm256_set1_epi8(table[1]), x);
lut6_mask< 2>(y, _mm256_set1_epi8(table[2]), x);
lut6_mask< 3>(y, _mm256_set1_epi8(table[3]), x);
lut6_mask< 4>(y, _mm256_set1_epi8(table[4]), x);
lut6_mask< 5>(y, _mm256_set1_epi8(table[5]), x);
lut6_mask< 6>(y, _mm256_set1_epi8(table[6]), x);
lut6_mask< 7>(y, _mm256_set1_epi8(table[7]), x);
lut6_mask< 8>(y, _mm256_set1_epi8(table[8]), x);
lut6_mask< 9>(y, _mm256_set1_epi8(table[9]), x);
lut6_mask<10>(y, _mm256_set1_epi8(table[10]), x);
lut6_mask<11>(y, _mm256_set1_epi8(table[11]), x);
lut6_mask<12>(y, _mm256_set1_epi8(table[12]), x);
lut6_mask<13>(y, _mm256_set1_epi8(table[13]), x);
lut6_mask<14>(y, _mm256_set1_epi8(table[14]), x);
lut6_mask<15>(y, _mm256_set1_epi8(table[15]), x);
lut6_mask<16>(y, _mm256_set1_epi8(table[16]), x);
lut6_mask<17>(y, _mm256_set1_epi8(table[17]), x);
lut6_mask<18>(y, _mm256_set1_epi8(table[18]), x);
lut6_mask<19>(y, _mm256_set1_epi8(table[19]), x);
lut6_mask<20>(y, _mm256_set1_epi8(table[20]), x);
lut6_mask<21>(y, _mm256_set1_epi8(table[21]), x);
lut6_mask<22>(y, _mm256_set1_epi8(table[22]), x);
lut6_mask<23>(y, _mm256_set1_epi8(table[23]), x);
lut6_mask<24>(y, _mm256_set1_epi8(table[24]), x);
lut6_mask<25>(y, _mm256_set1_epi8(table[25]), x);
lut6_mask<26>(y, _mm256_set1_epi8(table[26]), x);
lut6_mask<27>(y, _mm256_set1_epi8(table[27]), x);
lut6_mask<28>(y, _mm256_set1_epi8(table[28]), x);
lut6_mask<29>(y, _mm256_set1_epi8(table[29]), x);
lut6_mask<30>(y, _mm256_set1_epi8(table[30]), x);
lut6_mask<31>(y, _mm256_set1_epi8(table[31]), x);
lut6_mask<32>(y, _mm256_set1_epi8(table[32]), x);
lut6_mask<33>(y, _mm256_set1_epi8(table[33]), x);
lut6_mask<34>(y, _mm256_set1_epi8(table[34]), x);
lut6_mask<35>(y, _mm256_set1_epi8(table[35]), x);
lut6_mask<36>(y, _mm256_set1_epi8(table[36]), x);
lut6_mask<37>(y, _mm256_set1_epi8(table[37]), x);
lut6_mask<38>(y, _mm256_set1_epi8(table[38]), x);
lut6_mask<39>(y, _mm256_set1_epi8(table[39]), x);
lut6_mask<40>(y, _mm256_set1_epi8(table[40]), x);
lut6_mask<41>(y, _mm256_set1_epi8(table[41]), x);
lut6_mask<42>(y, _mm256_set1_epi8(table[42]), x);
lut6_mask<43>(y, _mm256_set1_epi8(table[43]), x);
lut6_mask<44>(y, _mm256_set1_epi8(table[44]), x);
lut6_mask<45>(y, _mm256_set1_epi8(table[45]), x);
lut6_mask<46>(y, _mm256_set1_epi8(table[46]), x);
lut6_mask<47>(y, _mm256_set1_epi8(table[47]), x);
lut6_mask<48>(y, _mm256_set1_epi8(table[48]), x);
lut6_mask<49>(y, _mm256_set1_epi8(table[49]), x);
lut6_mask<50>(y, _mm256_set1_epi8(table[50]), x);
lut6_mask<51>(y, _mm256_set1_epi8(table[51]), x);
lut6_mask<52>(y, _mm256_set1_epi8(table[52]), x);
lut6_mask<53>(y, _mm256_set1_epi8(table[53]), x);
lut6_mask<54>(y, _mm256_set1_epi8(table[54]), x);
lut6_mask<55>(y, _mm256_set1_epi8(table[55]), x);
lut6_mask<56>(y, _mm256_set1_epi8(table[56]), x);
lut6_mask<57>(y, _mm256_set1_epi8(table[57]), x);
lut6_mask<58>(y, _mm256_set1_epi8(table[58]), x);
lut6_mask<59>(y, _mm256_set1_epi8(table[59]), x);
lut6_mask<60>(y, _mm256_set1_epi8(table[60]), x);
lut6_mask<61>(y, _mm256_set1_epi8(table[61]), x);
lut6_mask<62>(y, _mm256_set1_epi8(table[62]), x);
lut6_mask<63>(y, _mm256_set1_epi8(table[63]), x);
_mm256_storeu_si256(&y_addr[frame], y);
}
}
return y_buf;
}
{
// 汎用版
auto x_ptr = x_buf.LockConst<FT>();
auto y_ptr = y_buf.Lock<FT>();
auto input_index_ptr = m_input_index.LockConst();
auto table_ptr = m_table.LockConst();
index_t frame_size = x_buf.GetFrameSize();
index_t node_size = this->GetOutputNodeSize();
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
for (index_t frame = 0; frame < frame_size; ++frame) {
int index = 0;
int mask = 1;
for (index_t i = 0; i < N; i++) {
index_t input_node = input_index_ptr(node, i);
bool x = (x_ptr.Get(frame, input_node) != 0);
index |= x ? mask : 0;
mask <<= 1;
}
auto y = GetLutTableFromPtr(table_ptr, node, index);
y_ptr.Set(frame, node, y);
}
}
return y_buf;
}
}
// Backwardは存在しない
FrameBuffer Backward(FrameBuffer dy_buf) override
{
if (dy_buf.Empty()) {
return dy_buf;
}
FrameBuffer dx_buf(dy_buf.GetFrameSize(), m_input_shape, DataType<BT>::type);
return dx_buf;
}
// シリアライズ
protected:
void DumpObjectData(std::ostream &os) const override
{
// バージョン
std::int64_t ver = 1;
bb::SaveValue(os, ver);
// 親クラス
_super::DumpObjectData(os);
// メンバ
bb::SaveValue(os, m_host_only);
bb::SaveValue(os, m_host_simd);
bb::SaveValue(os, m_connection);
bb::SaveValue(os, m_input_shape);
bb::SaveValue(os, m_output_shape);
m_table.DumpObject(os);
m_input_index.DumpObject(os);
}
void LoadObjectData(std::istream &is) override
{
// バージョン
std::int64_t ver;
bb::LoadValue(is, ver);
BB_ASSERT(ver == 1);
// 親クラス
_super::LoadObjectData(is);
// メンバ
bb::LoadValue(is, m_host_only);
bb::LoadValue(is, m_host_simd);
bb::LoadValue(is, m_connection);
bb::LoadValue(is, m_input_shape);
bb::LoadValue(is, m_output_shape);
m_table.LoadObject(is);
m_input_index.LoadObject(is);
}
};
} |
a.10.1.c | /* { dg-do compile } */
#include <stdio.h>
void
work1 ()
{
}
void
work2 ()
{
}
void
a10 ()
{
#pragma omp parallel
{
#pragma omp single
printf ("Beginning work1.\n");
work1 ();
#pragma omp single
printf ("Finishing work1.\n");
#pragma omp single nowait
printf ("Finished work1 and beginning work2.\n");
work2 ();
}
}
|
MixedSolverSchurMP.h | /**
* This file is part of the Eigen Recursive Matrix Extension (ERME).
*
* Copyright (c) 2019 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#pragma once
#include "../Cholesky.h"
#include "../Core.h"
#include "MixedSolver.h"
#include <memory>
namespace Eigen::Recursive
{
// Schur Solver for the following format:
//
// | U W | |x1| |a|
// | WT V | |x2| = |b|
//
// U: Sparse Block
// V: Block Diagonal
// W: Sparse Block
//
// Solution: (https://en.wikipedia.org/wiki/Schur_complement)
//
// Schur Matrix
// S = U - W * V^-1 * WT
// Right Hand Side of Schur System
// r1 = a - W * V^-1 * b
// Solve for x1
// S * x1 = r1
// Solve for x2
// r2 = b - WT * x1
// x2 = V^-1 * r
//
//
template <typename UBlock, typename VBlock, typename WBlock, typename XType>
class MixedSymmetricRecursiveSolver<
SymmetricMixedMatrix2<Eigen::SparseMatrix<UBlock, Eigen::RowMajor>, Eigen::DiagonalMatrix<VBlock, -1>,
Eigen::SparseMatrix<WBlock, Eigen::RowMajor>>,
XType>
{
public:
using AType = SymmetricMixedMatrix2<Eigen::SparseMatrix<UBlock, Eigen::RowMajor>, Eigen::DiagonalMatrix<VBlock, -1>,
Eigen::SparseMatrix<WBlock, Eigen::RowMajor>>;
using AUType = typename AType::UType;
using AVType = typename AType::VType;
using AWType = typename AType::WType;
using AWTType = typename TransposeType<AWType>::Type;
using XUType = typename XType::UType;
using XVType = typename XType::VType;
using S1Type = Eigen::SparseMatrix<UBlock, Eigen::RowMajor>;
using S2Type = Eigen::SparseMatrix<VBlock, Eigen::RowMajor>;
using LDLT = Eigen::RecursiveSimplicialLDLT<S1Type, Eigen::Upper>;
using InnerSolver1 = MixedSymmetricRecursiveSolver<S1Type, XUType>;
void resize(int n, int m)
{
this->n = n;
this->m = m;
Vinv.resize(m);
Y.resize(n, m);
ej.resize(n);
q.resize(m);
S1.resize(n, n);
P.resize(n);
tmp.resize(n);
}
void analyzePattern(const AType& A, const LinearSolverOptions& solverOptions)
{
resize(A.u.rows(), A.v.rows());
if (solverOptions.solverType == LinearSolverOptions::SolverType::Direct)
{
hasWT = true;
explizitSchur = true;
ldlt = nullptr;
}
else
{
// TODO: add heurisitc here
hasWT = true;
if (solverOptions.buildExplizitSchur)
explizitSchur = true;
else
explizitSchur = false;
}
if (hasWT)
{
transposeStructureOnly(A.w, WT);
}
patternAnalyzed = true;
}
void solve(AType& A, XType& x, XType& b, const LinearSolverOptions& solverOptions = LinearSolverOptions())
{
// Some references for easier access
const AUType& U = A.u;
const AVType& V = A.v;
const AWType& W = A.w;
XUType& da = x.u;
XVType& db = x.v;
const XUType& ea = b.u;
const XVType& eb = b.v;
if (!patternAnalyzed) analyzePattern(A, solverOptions);
if (hasWT)
{
transposeValueOnly(A.w, WT);
}
// Invert V
for (int i = 0; i < m; ++i)
{
// Vinv.diagonal()(i) = V.diagonal()(i).get().inverse();
Vinv.diagonal()(i) = Recursive::inverseCholesky(V.diagonal()(i));
}
// Y = W * V^-1
multSparseDiag(W, Vinv, Y);
eigen_assert(hasWT);
// S = U - W * V^-1 * WT
S1 = (Y * WT).template triangularView<Eigen::Upper>();
S1 = U - S1;
// r = a - W * V^-1 * b
ej = ea + -(Y * eb);
if (solverOptions.solverType == LinearSolverOptions::SolverType::Direct)
{
// Direct recusive ldlt solver
if (!ldlt)
{
ldlt = std::make_unique<LDLT>();
ldlt->compute(S1);
}
else
{
ldlt->factorize(S1);
}
da = ldlt->solve(ej);
}
else
{
P.compute(S1);
da.setZero();
// Iterative CG solver
Eigen::Index iters = solverOptions.maxIterativeIterations;
double tol = solverOptions.iterativeTolerance;
// XUType tmp(n);
recursive_conjugate_gradient(
[&](const XUType& v, XUType& result) {
// x = U * p - Y * WT * p
result = S1.template selfadjointView<Eigen::Upper>() * v;
},
ej, da, P, iters, tol);
}
// finalize
if (hasWT)
{
q = WT * da;
}
else
{
multSparseRowTransposedVector(W, da, q);
}
q = eb - q;
db = multDiagVector(Vinv, q);
}
private:
int n, m;
// ==== Solver tmps ====
XVType q;
AVType Vinv;
AWType Y;
XUType ej;
XUType tmp;
std::vector<int> transposeTargets;
AWTType WT;
RecursiveDiagonalPreconditioner<UBlock> P;
S1Type S1;
// InnerSolver1 solver1;
std::unique_ptr<LDLT> ldlt;
bool patternAnalyzed = false;
bool hasWT = true;
bool explizitSchur = true;
};
// Schur Solver for the following format:
//
// | U W | |x1| |a|
// | WT V | |x2| = |b|
//
// U: Block Diagonal
// V: Block Diagonal
// W: Sparse Block
//
// Solution: (https://en.wikipedia.org/wiki/Schur_complement)
//
// Schur Matrix
// S = U - W * V^-1 * WT
// Right Hand Side of Schur System
// r1 = a - W * V^-1 * b
// Solve for x1
// S * x1 = r1
// Solve for x2
// r2 = b - WT * x1
// x2 = V^-1 * r
//
//
template <typename UBlock, typename VBlock, typename WBlock, typename XType>
class MixedSymmetricRecursiveSolver<
SymmetricMixedMatrix2<Eigen::DiagonalMatrix<UBlock, -1>, Eigen::DiagonalMatrix<VBlock, -1>,
Eigen::SparseMatrix<WBlock, Eigen::RowMajor>>,
XType>
{
public:
using AType = SymmetricMixedMatrix2<Eigen::DiagonalMatrix<UBlock, -1>, Eigen::DiagonalMatrix<VBlock, -1>,
Eigen::SparseMatrix<WBlock, Eigen::RowMajor>>;
using AUType = typename AType::UType;
using AVType = typename AType::VType;
using AWType = typename AType::WType;
using AWTType = typename TransposeType<AWType>::Type;
using XUType = typename XType::UType;
using XVType = typename XType::VType;
using S1Type = Eigen::SparseMatrix<UBlock, Eigen::RowMajor>;
using S2Type = Eigen::SparseMatrix<VBlock, Eigen::RowMajor>;
using LDLT = Eigen::RecursiveSimplicialLDLT<S1Type, Eigen::Upper>;
using InnerSolver1 = MixedSymmetricRecursiveSolver<S1Type, XUType>;
void resize(int n, int m)
{
this->n = n;
this->m = m;
Vinv.resize(m);
Y.resize(n, m);
Sdiag.resize(n);
ej.resize(n);
q.resize(m);
S1.resize(n, n);
P.resize(n);
tmp.resize(n);
}
void analyzePattern(const AType& A, const LinearSolverOptions& solverOptions)
{
resize(A.u.rows(), A.v.rows());
if (solverOptions.solverType == LinearSolverOptions::SolverType::Direct)
{
hasWT = true;
explizitSchur = true;
ldlt = nullptr;
}
else
{
// TODO: add heurisitc here
hasWT = true;
if (solverOptions.buildExplizitSchur)
explizitSchur = true;
else
explizitSchur = false;
}
if (hasWT)
{
transposeStructureOnly(A.w, WT);
}
patternAnalyzed = true;
}
void solve(AType& A, XType& x, XType& b, const LinearSolverOptions& solverOptions = LinearSolverOptions())
{
// Some references for easier access
const AUType& U = A.u;
const AVType& V = A.v;
const AWType& W = A.w;
XUType& da = x.u;
XVType& db = x.v;
const XUType& ea = b.u;
const XVType& eb = b.v;
if (!patternAnalyzed) analyzePattern(A, solverOptions);
if (hasWT)
{
transposeValueOnly(A.w, WT);
}
// Invert V
for (int i = 0; i < m; ++i) Vinv.diagonal()(i) = V.diagonal()(i).get().inverse();
// Y = W * V^-1
multSparseDiag(W, Vinv, Y);
if (explizitSchur)
{
eigen_assert(hasWT);
// S = U - W * V^-1 * WT
S1 = (Y * WT).template triangularView<Eigen::Upper>();
S1 = -S1;
S1.diagonal() = U.diagonal() + S1.diagonal();
}
else
{
diagInnerProductTransposed(Y, W, Sdiag);
Sdiag.diagonal() = U.diagonal() - Sdiag.diagonal();
}
// r = a - W * V^-1 * b
ej = ea + -(Y * eb);
if (solverOptions.solverType == LinearSolverOptions::SolverType::Direct)
{
// Direct recusive ldlt solver
if (!ldlt)
{
ldlt = std::make_unique<LDLT>();
ldlt->compute(S1);
}
else
{
ldlt->factorize(S1);
}
da = ldlt->solve(ej);
}
else
{
if (explizitSchur)
{
P.compute(S1);
}
else
{
P.compute(Sdiag);
}
da.setZero();
// Iterative CG solver
Eigen::Index iters = solverOptions.maxIterativeIterations;
double tol = solverOptions.iterativeTolerance;
// XUType tmp(n);
recursive_conjugate_gradient(
[&](const XUType& v, XUType& result) {
// x = U * p - Y * WT * p
if (explizitSchur)
{
// if constexpr (denseSchur)
// denseMV(S1, v, result);
// else
result = S1.template selfadjointView<Eigen::Upper>() * v;
// std::cout << expand(result) << std::endl << std::endl;
}
else
{
if (hasWT)
{
tmp = Y * (WT * v);
}
else
{
multSparseRowTransposedVector(W, v, q);
tmp = Y * q;
}
result = (U.diagonal().array() * v.array()) - tmp.array();
// std::cout << expand(result) << std::endl << std::endl;
}
},
ej, da, P, iters, tol);
}
// finalize
if (hasWT)
{
q = WT * da;
}
else
{
multSparseRowTransposedVector(W, da, q);
}
q = eb - q;
db = multDiagVector(Vinv, q);
}
void analyzePattern_omp(const AType& A, const LinearSolverOptions& solverOptions)
{
#pragma omp single
{
resize(A.u.rows(), A.v.rows());
if (solverOptions.solverType == LinearSolverOptions::SolverType::Direct)
{
std::terminate();
hasWT = true;
explizitSchur = true;
}
else
{
// TODO: add heurisitc here
hasWT = true;
explizitSchur = true;
}
if (hasWT)
{
transposeStructureOnly_omp(A.w, WT, transposeTargets);
}
patternAnalyzed = true;
}
}
void solve_omp(AType& A, XType& x, XType& b, const LinearSolverOptions& solverOptions = LinearSolverOptions())
{
// Some references for easier access
const AUType& U = A.u;
const AVType& V = A.v;
const AWType& W = A.w;
XUType& da = x.u;
XVType& db = x.v;
const XUType& ea = b.u;
const XVType& eb = b.v;
if (!patternAnalyzed) analyzePattern_omp(A, solverOptions);
transposeValueOnly_omp(A.w, WT, transposeTargets);
// U schur (S1)
#pragma omp for
for (int i = 0; i < m; ++i) Vinv.diagonal()(i) = V.diagonal()(i).get().inverse();
multSparseDiag_omp(W, Vinv, Y);
diagInnerProductTransposed_omp(Y, W, Sdiag);
#pragma omp for
for (int i = 0; i < n; ++i) Sdiag.diagonal()(i).get() = U.diagonal()(i).get() - Sdiag.diagonal()(i).get();
sparse_mv_omp(Y, eb, ej);
#pragma omp for
for (int i = 0; i < n; ++i)
{
ej(i).get() = ea(i).get() - ej(i).get();
da(i).get().setZero();
}
{
// A special implicit schur solver.
// We cannot use the recursive inner solver here.
// (Maybe a todo for the future)
// da.setZero();
}
Eigen::Index iters = solverOptions.maxIterativeIterations;
double tol = solverOptions.iterativeTolerance;
P.compute(Sdiag);
recursive_conjugate_gradient_OMP(
[&](const XUType& v, XUType& result) {
// x = U * p - Y * WT * p
sparse_mv_omp(WT, v, q);
sparse_mv_omp(Y, q, tmp);
#pragma omp for
for (int i = 0; i < v.rows(); ++i)
{
result(i).get() = (U.diagonal()(i).get() * v(i).get()) - tmp(i).get();
}
},
ej, da, P, iters, tol);
sparse_mv_omp(WT, da, q);
{
#pragma omp for
for (int i = 0; i < m; ++i)
{
q(i).get() = eb(i).get() - q(i).get();
}
}
multDiagVector_omp(Vinv, q, db);
}
private:
int n, m;
// ==== Solver tmps ====
XVType q;
AVType Vinv;
AWType Y;
Eigen::DiagonalMatrix<UBlock, -1> Sdiag;
XUType ej;
XUType tmp;
std::vector<int> transposeTargets;
AWTType WT;
RecursiveDiagonalPreconditioner<UBlock> P;
S1Type S1;
// InnerSolver1 solver1;
std::unique_ptr<LDLT> ldlt;
bool patternAnalyzed = false;
bool hasWT = true;
bool explizitSchur = true;
};
} // namespace Eigen::Recursive
|
GB_unop__atanh_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__atanh_fp32_fp32
// op(A') function: GB_unop_tran__atanh_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = atanhf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = atanhf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = atanhf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATANH || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__atanh_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = atanhf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__atanh_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
graph.h | /**
* Author: Kartik Lakhotia
Sourav Pati
* Email id: klakhoti@usc.edu
spati@usc.edu
* Date: 27-Feb-2018
*
* This code implements work optimized propagation blocking with
* transposed bin graph to reduce cache misses in scatter
*/
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <atomic>
#include <boost/dynamic_bitset.hpp>
#if defined (HUGE_EDGE) || defined (HUGE_VERTEX)
typedef unsigned long long int intE;
#else
typedef unsigned int intE;
#endif
//typedef unsigned long long int intE;
//typedef unsigned int intV;
#ifdef HUGE_VERTEX
typedef unsigned long long int intV;
#else
typedef unsigned int intV;
#endif
//////////////////////////////////////////
//partition centric programming data types
//////////////////////////////////////////
typedef struct partitionGraph
{
intV numVertex;
intE numEdges;
intE* VI;
intV* EI;
unsigned int* EW;
intV* outDeg;
} partitionGraph;
typedef struct partitionData
{
intV tid; //partition ID
intV startVertex;
intV endVertex;
partitionGraph* PNG; //for dense scatter
partitionGraph* IPG; //for intra-partition asynch processing
intV* frontier; //active vertex list within the partition
intV frontierSize;
bool isDense; //true if partition should be scattered in dense mode
intE totalEdges;
intE activeEdges;
std::atomic<intV> binListPtr; //number of bins with messages to be gathered??
}partitionData;
template<class type>
struct graph
{
intV numBins; // # partitions
intV numVertex;
intE numEdges;
intE* VI; //vertex offset in CSR
intV* EI; //edge array in CSR
unsigned int* EW; //edge weights
intV* frontier; //
intV start; //starting vertex??
unsigned int rounds;
bool* inFrontier; //array to indicate if a vertex is in frontier
std::atomic<intV> frontierSize;
partitionData* TD; //array of partitions
intV* outDeg;
intV* inDeg;
bool* scatterDone; // to indicate if a partition is done scattering
bool* flag; //to indicate if a partition has a message or not
bool** binFlag; //to indicate if a bin has any message or not
intE** updateBinAddrSize; //2D array for no. of updates
intE** destIdBinAddrSize; //2D array for no. of dest IDs
intE** updateBinPointers; //running address (counters) pointing to next write location
intE** destIdBinPointers;
unsigned int*** indWeightBins; //weight bins
intV*** indDestIdBins; //dest ID bins (dense, fixed order)
intV*** sparseDestIdBins; //sparse dest ID bins (empty, to be filled on scatter)
type*** indUpdateBins; //update bins (same for dense and sparse)
intV* activeScatter; //list of partitions with active vertices
intV* activeGather; //list of partitions who have received a message
std::atomic<intV> partListPtr; //# partitions to be processed (scatter or gather)
intV** activeBins; //array of bins with messages
};
template<class graph>
int read_csr (char* filename, graph* G)
{
FILE* graphFile = fopen(filename, "rb");
if (graphFile == NULL)
{
fputs("file error", stderr);
return -1;
}
fread (&(G->numVertex), sizeof(intV), 1, graphFile);
std::cout << G->numVertex << std::endl;
fread (&(G->numEdges), sizeof(intE), 1, graphFile);
std::cout << G->numEdges << std::endl;
G->VI = new intE[G->numVertex+1];
fread (G->VI, sizeof(intE), G->numVertex, graphFile);
std::cout << "read offsets" << std::endl;
if (feof(graphFile))
{
delete[] G->VI;
printf("unexpected end of file while reading vertices\n");
return -1;
}
else if (ferror(graphFile))
{
delete[] G->VI;
printf("error reading file\n");
return -1;
}
G->VI[G->numVertex] = G->numEdges;
G->EI = new intV[G->numEdges];
fread (G->EI, sizeof(intV), G->numEdges, graphFile);
std::cout << "read edges" << std::endl;
if (feof(graphFile))
{
delete[] G->EI;
delete[] G->VI;
printf("unexpected end of file while reading edges\n");
return -1;
}
else if (ferror(graphFile))
{
delete[] G->EI;
delete[] G->VI;
printf("error reading file\n");
return -1;
}
#ifdef WEIGHTED
G->EW = new unsigned int[G->numEdges];
fread (G->EW, sizeof(unsigned int), G->numEdges, graphFile);
if (feof(graphFile))
{
delete[] G->EW;
delete[] G->EI;
delete[] G->VI;
printf("unexpected end of file while reading edge weights\n");
return -1;
}
else if (ferror(graphFile))
{
delete[] G->EW;
delete[] G->EI;
delete[] G->VI;
printf("error reading file\n");
return -1;
}
std::cout << "read weights" << std::endl;
#endif
fclose(graphFile);
return 1;
}
template<class graph>
void write_csr (char* filename, graph* G)
{
FILE* fp = fopen(filename, "wb");
if (fp == NULL)
{
fputs("file error", stderr);
return;
}
fwrite(&G->numVertex, sizeof(intV), 1, fp);
fwrite(&G->numEdges, sizeof(intE), 1, fp);
fwrite(G->VI, sizeof(intE), G->numVertex, fp);
fwrite(G->EI, sizeof(intV), G->numEdges, fp);
fclose(fp);
}
template<class graph>
void printGraph(graph* G)
{
printf("num vertices = %d\n numEdges = %d\n", G->numVertex, G->numEdges);
for (intV i=0; i<=G->numVertex; i++)
{
for (intE j=G->VI[i]; j<G->VI[i+1]; j++)
printf("%d, %d\n", i, G->EI[j]);
}
}
template<class graph>
void transposeCSR(graph* G1)
{
intE* newVI = new intE[G1->numVertex+1]();
intV* newEI = new intV[G1->numEdges];
for (intE i=0; i<G1->numEdges; i++)
{
newVI[G1->EI[i]+1]++;
}
for (intV i=0; i<G1->numVertex; i++)
newVI[i+1] += newVI[i];
intV* tempId = new intV [G1->numVertex]();
for (intV i=0; i<G1->numVertex; i++)
{
for (intE j=G1->VI[i]; j<G1->VI[i+1]; j++)
{
newEI[newVI[G1->EI[j]] + tempId[G1->EI[j]]] = i;
tempId[G1->EI[j]]++;
}
}
delete[] G1->VI;
delete[] G1->EI;
delete[] tempId;
G1->VI = newVI;
G1->EI = newEI;
}
template<class graph>
void findOutDeg(graph* G)
{
#pragma omp parallel for
for (intV i=0; i<G->numVertex; i++)
{
intE outDeg = G->VI[i+1] - G->VI[i];
G->outDeg[i] = outDeg;
for (intE j=G->VI[i]; j<G->VI[i+1]; j++)
{
#pragma omp atomic
G->inDeg[G->EI[j]]++;
}
}
return;
}
template<class graph>
void initGraph (graph* G)
{
G->inFrontier = new bool [G->numVertex]();
G->outDeg = new intV [G->numVertex]();
G->inDeg = new intV [G->numVertex]();
findOutDeg(G);
G->frontierSize = 0;
G->frontier = new intV [G->numVertex];
G->flag = new bool [G->numBins]();
G->scatterDone = new bool [G->numBins]();
return;
}
template<class graph>
void freeMem (graph* G)
{
if (G->VI != NULL)
{
delete[] G->VI;
G->VI = NULL;
}
if (G->EI != NULL)
{
delete[] G->EI;
G->EI = NULL;
}
#ifdef WEIGHTED
if (G->EW != NULL)
{
delete[] G->EW;
G->EW = NULL;
}
#endif
if (G->outDeg != NULL)
{
delete[] G->outDeg;
G->outDeg = NULL;
}
}
template<class graph>
intV findFrontierSize(graph* G)
{
return G->frontierSize.load();
}
|
GB_unaryop__abs_uint64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint64_uint32
// op(A') function: GB_tran__abs_uint64_uint32
// C type: uint64_t
// A type: uint32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint64_uint32
(
uint64_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__band_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__band_int8
// A.*B function (eWiseMult): GB_AemultB__band_int8
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__band_int8
// C+=b function (dense accum): GB_Cdense_accumb__band_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__band_int8
// C=scalar+B GB_bind1st__band_int8
// C=scalar+B' GB_bind1st_tran__band_int8
// C=A+scalar GB_bind2nd__band_int8
// C=A'+scalar GB_bind2nd_tran__band_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x) & (y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_INT8 || GxB_NO_BAND_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__band_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__band_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__band_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__band_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__band_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__band_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__band_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB_bind1st_tran__band_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB_bind2nd_tran__band_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sw-gapless.c | #include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <math.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <zlib.h>
#include <sys/time.h>
#include "../common/util.h"
#include "../common/sw-gapless.h"
#include "../common/stats.h"
static int initialised;
static int match, mismatch;
/* statistics */
static count_t ticks, cells, invocs;
#pragma omp threadprivate(initialised, match, mismatch, ticks, cells, invocs)
int
sw_gapless_setup(int _match, int _mismatch, bool reset_stats)
{
match = _match;
mismatch = _mismatch;
if (reset_stats) {
count_init(&ticks);
count_init(&cells);
count_init(&invocs);
}
initialised = 1;
return 0;
}
void
sw_gapless_stats(uint64_t * _invocs, uint64_t * _cells, uint64_t * _ticks)
{
if (_invocs != NULL)
*_invocs = (uint64_t)count_get_count(&invocs);
if (_cells != NULL)
*_cells = (uint64_t)count_get_count(&cells);
if (_ticks != NULL)
*_ticks = (uint64_t)count_get_count(&ticks);
}
int
sw_gapless(uint32_t * genome, int glen, uint32_t * read, int rlen, int g_idx, int r_idx,
uint32_t * genome_ls, int init_bp, bool is_rna)
{
int score;
int g_left, g_right, r_left, r_right;
int max_score;
llint before = rdtsc(), after;
if (!initialised)
abort();
count_increment(&invocs);
if (g_idx < r_idx) {
g_left = 0;
r_left = r_idx - g_idx;
} else {
g_left = g_idx - r_idx;
r_left = 0;
}
g_right = g_left;
r_right = r_left;
score = 0;
if (genome_ls != NULL && r_left == 0) { // forcefully match first colour in read
int real_colour = lstocs(EXTRACT(genome_ls, g_right), init_bp, is_rna);
if (real_colour == (int)EXTRACT(read, 0)) {
score = match;
} else {
r_left++;
g_left++;
}
r_right++;
g_right++;
}
max_score = score;
while (g_right < glen && r_right < rlen) {
score += (EXTRACT(genome, g_right) == EXTRACT(read, r_right)? match : mismatch);
if (score > max_score)
max_score = score;
g_right++;
r_right++;
if (score < 0) {
g_left = g_right;
r_left = r_right;
score = 0;
}
}
count_add(&cells, rlen);
after = rdtsc();
count_add(&ticks, MAX(after - before, 0));
return max_score;
}
|
GB_unaryop__identity_uint8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_fp64
// op(A') function: GB_tran__identity_uint8_fp64
// C type: uint8_t
// A type: double
// cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8)
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_fp64
(
uint8_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_split_sparse_template.c | //------------------------------------------------------------------------------
// GB_split_sparse_template: split a single tile from a sparse matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
{
//--------------------------------------------------------------------------
// get A and C, and the slicing of C
//--------------------------------------------------------------------------
const GB_CTYPE *restrict Ax = (GB_CTYPE *) A->x ;
GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ;
//--------------------------------------------------------------------------
// copy the tile from A to C
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < C_ntasks ; tid++)
{
int64_t kfirst = kfirst_Cslice [tid] ;
int64_t klast = klast_Cslice [tid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// int64_t jA = GBH (Ah, k+akstart) ; not needed
int64_t pC_start, pC_end ;
GB_get_pA (&pC_start, &pC_end, tid, k,
kfirst, klast, pstart_Cslice, Cp, cvlen) ;
int64_t p0 = Cp [k] ;
int64_t pA_offset = Wp [k + akstart] ;
// copy the vector from A to C
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
// get the index of A(iA,jA)
int64_t pA = pA_offset + pC - p0 ;
int64_t iA = Ai [pA] ;
// shift the index and copy into C(i,j)
Ci [pC] = iA - aistart ;
GB_COPY (pC, pA) ;
}
}
}
done = true ;
}
#undef GB_CTYPE
|
zlaebz2.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
*
* @precisions normal z -> s d
*
**/
#include "plasma.h"
#include "plasma_internal.h" /* needed for imin, imax. */
#include "plasma_zlaebz2_work.h" /* work areas. */
#include <string.h>
#include <omp.h>
#include <math.h>
#include <core_lapack.h>
/***************************************************************************//**
*
* @ingroup plasma_gemm
*
*
* This file is a z-template to generate s and d code.
* Only s and d are compiled; not c or z.
* This code is not designed to be called directly by users; it is a subroutine
* for zstevx2.c.
*
* Specifically, this is a task-based parallel algorithm, the parameters are
* contained in the already initialized and populated zlaebz2_Control_t; For
* example, from zstevx2:
*
* #pragma omp parallel
* {
* #pragma omp single
* {
* plasma_zlaebz2(&Control, ...etc...);
* }
* }
*
*
*******************************************************************************
*
* @param[in] *Control
* A pointer to the global variables needed.
*
* @param[in] Control->N
* int number of rows in the matrix.
*
* @param[in] Control->diag
* real array of [N] diagonal elements of the matrix.
*
* @param[in] Control->offd
* real array of [N-1] sub-diagonal elements of the matrix.
*
* @param[in] Control->range
* int enum.
* PlasmaRangeI if user is finding eigenvalues by index range.
* PlasmaRangeV if user is finding eigenvuales by value range.
*
* @param[in] Control->jobtype
* int enum.
* PlasmaNoVec if user does not want eigenvectors computed.
* PlasmaVec if user desires eigenvectors computed.
*
* @param[in] Control->il
* int enum. The lowerBound of an index range if range is
* PlasmaRangeI.
*
* @param[in] Control->iu
* int enum. The upperBound of an index range, if range is
* PlasmaRangeI.
*
* @param[in] Control->stein_arrays
* array of [max_threads], type zlaebz2_Stein_Array_t, contains work
* areas per thread for invoking _stein (inverse iteration to find
* eigenvectors).
*
* @param[in] Control->baseIdx
* The index of the least eigenvalue to be found in the bracket,
* used to calculate the offset into the return vectors/arrays.
*
* @param[out] Control->error
* If non-zero, the first error we encountered in the operation.
*
* @param[out] Control->pVal
* real vector of [eigenvaues] to store the eigenvalues discovered,
* these are returned in ascending sorted order.
*
* @param[out] Control->pVec
* real array of [N x eigenvalues] to store the eigenvectors, not
* references unless jobtype==PlasmaVec. Stored in the same order as
* their corresponding eigenvalue. Only referenced if jobtype is
* PlasmaVec.
*
* @param[out] Control->pMul
* int vector of [eigenvalues], the corresponding ULP-multiplicity of
* each eigenvalue, typically == 1.
*
* @param[in] lowerBound
* Real lowerBound (inclusive) for range of eigenvalues to find.
*
* @param[in] upperBound
* Real upperBound (non-inclusive) of the range of eigenvalues to find.
*
* @param[in] nLT_low
* int number of eigenvalues less than lowerBound. Computed if < 0.
*
* @param[in] nLT_hi
* int number of eigevalues less than upperBound. Computed if < 0.
*
* @param[in] numEV
* int number of eigenvalues in [lowerBound, upperBound). Computed if
* either nLT_low or nLT_hi were computed.
*
* A 'bracket' is a range of either real eigenvalues, or eigenvalue indices,
* that this code is given to discover. It is provided in the arguments. Upon
* entry, the number of theoretical eigenvalues in this range has already been
* determined, but the actual number may be less, due to ULP-multiplicity. (ULP
* is the Unit of Least Precision, the magnitude of the smallest change
* possible to a given real number). To explain: A real symmetric matrix in NxN
* should have N distinct real eigenvalues; however, if eigenvalues are closely
* packed either absolutely (their difference is close to zero) or relatively
* (their ratio is close to 1.0) then in real arithmetic two such eigenvalues
* may be within ULP of each other, and thus represented by the same real
* number. Thus we have ULP-multiplicity, two theoretically distinct
* eigenvalues represented by the same real number.
*
*
* This algorithm uses Bisection by the Scaled Sturm Sequence, implemented in
* plasma_zlaebz2, followed by the LAPACK routine _STEIN, which uses inverse
* iteration to find the eigenvalue. The initial 'bracket' parameters should
* contain the full range for the eigenvalues we are to discover. The algorithm
* is recursively task based, at each division the bracket is divided into two
* brackets. If either is empty (no eigenvalues) we discard it, otherwise a new
* task is created to further subdivide the right-hand bracket while the
* current task continues dividing the left-hand side, until it can no longer
* divide it, and proceeds to store the eigenvalue and compute the eigenvector
* if needed. Thus the discovery process is complete when all tasks are
* completed. We then proceed to orthogonalizing any eigenvectors discovered;
* because inverse iteration does not inherently ensure orthogonal
* eigenvectors.
*
* The most comparable serial LAPACK routine is DLAEBZ.
*
* Once all thread work is complete, the code will condense these arrays to
* just the actual number of unique eigenvalues found, if any ULP-multiplicity
* is present.
*****************************************************************************/
/*******************************************************************************
* Use LAPACK zstein to find a single eigenvector. We may use this routine
* multiple times, so instead of allocating/freeing the work spaces repeatedly,
* we have an array of pointers, per thread, to workspaces we allocate if not
* already allocated for this thread. So we don't allocate more than once per
* thread. These are freed by the main program before exit. Returns INFO.
* 0=success. <0, |INFO| is invalid argument index. >0, if eigenvector failed
* to converge.
*******************************************************************************/
int plasma_zstein( plasma_complex64_t *diag, plasma_complex64_t *offd,
plasma_complex64_t u, plasma_complex64_t *v, int N,
zlaebz2_Stein_Array_t *myArrays) {
int M=1, LDZ=N, INFO;
int thread = omp_get_thread_num();
if (myArrays[thread].IBLOCK == NULL) {
myArrays[thread].IBLOCK = (int*) calloc(N, sizeof(int));
if (myArrays[thread].IBLOCK != NULL) myArrays[thread].IBLOCK[0]=1;
}
if (myArrays[thread].ISPLIT == NULL) {
myArrays[thread].ISPLIT = (int*) calloc(N, sizeof(int));
if (myArrays[thread].ISPLIT != NULL) myArrays[thread].ISPLIT[0]=N;
}
if (myArrays[thread].WORK == NULL) myArrays[thread].WORK = (plasma_complex64_t*) calloc(5*N, sizeof(plasma_complex64_t));
if (myArrays[thread].IWORK == NULL) myArrays[thread].IWORK = (int*) calloc(N, sizeof(int));
if (myArrays[thread].IFAIL == NULL) myArrays[thread].IFAIL = (int*) calloc(N, sizeof(int));
if (myArrays[thread].IBLOCK == NULL ||
myArrays[thread].ISPLIT == NULL ||
myArrays[thread].WORK == NULL ||
myArrays[thread].IWORK == NULL ||
myArrays[thread].IFAIL == NULL) {
return(PlasmaErrorOutOfMemory);
}
plasma_complex64_t W = u;
/* We use the 'work' version so we can re-use our work arrays; using LAPACKE_zstein() */
/* would re-allocate and release work areas on every call. */
INFO = LAPACKE_zstein_work(LAPACK_COL_MAJOR, N, diag, offd, M, &W, myArrays[thread].IBLOCK,
myArrays[thread].ISPLIT, v, LDZ, myArrays[thread].WORK, myArrays[thread].IWORK,
myArrays[thread].IFAIL);
return(INFO);
}
/******************************************************************************
* This a task that subdivides a bracket, throwing off other tasks like this
* if necessary, until the bracket zeroes in on a single eigenvalue, which it
* then stores and possibly finds the corresponding eigenvector.
* Parameters:
* Control: Global variables.
* lowerBound: of bracket to subdivide.
* upperBound: of bracket to subdivide.
* nLT_low: number of eigenvalues less than lower bound.
* -1 if it needs to be found.
* nLT_hi: number of eigevalues less than the upper bound.
* -1 if it needs t obe found.
* numEV: number of eigenvalues within bracket. Computed if either
* nLT_Low or nLT_hi is computed.
* ***************************************************************************/
void plasma_zlaebz2(zlaebz2_Control_t *Control, plasma_complex64_t lowerBound,
plasma_complex64_t upperBound, int nLT_low, int nLT_hi, int numEV) {
plasma_complex64_t *diag = Control->diag;
plasma_complex64_t *offd = Control->offd;
int N = Control->N;
plasma_complex64_t cp;
int flag=0, evLess;
if (nLT_low < 0) {
nLT_low = plasma_zlaneg2(diag, offd, N, lowerBound);
flag=1;
}
if (nLT_hi < 0) {
nLT_hi = plasma_zlaneg2(diag, offd, N, upperBound);
flag=1;
}
if (flag) {
numEV = (nLT_hi - nLT_low);
}
/* If there are no eigenvalues in the supplied range, we are done. */
if (numEV < 1) return;
if (Control->range == PlasmaRangeI) {
if (nLT_hi < Control->il || /* e.g if il=500, and nLT_hi=499, this bracket is under range of interest. */
nLT_low > Control->iu) { /* e.g if iu=1000, and nLT_low=1001, this bracket is above range of interest. */
return;
}
}
/* Bisect the bracket until we can't anymore. */
flag = 0;
for (;;) {
cp = (lowerBound+upperBound)*0.5;
if (cp == lowerBound || cp == upperBound) {
/* Our bracket has been narrowed to machine epsilon for this magnitude (=ulp).
* We are done; the bracket is always [low,high). 'high' is not included, so
* we have numEV eigenvalues at low, whether it == 1 or is > 1. We find
* the eigenvector. (We can test multiplicity with GluedWilk).
*/
break; /* exit for(;;). */
} else {
/* we have a new cutpoint. */
evLess = plasma_zlaneg2(diag, offd, N, cp);
if (evLess < 0) {
/* We could not compute the Sturm sequence for it. */
flag = -1; /* indicate an error. */
break; /* exit for (;;). */
}
/* Discard empty halves in both PlasmaRangeV and PlasmaRangeI.
* If #EV < cutpoint is the same as the #EV < high, it means
* no EV are in [cutpoint, hi]. We can discard that range.
*/
if (evLess == nLT_hi) {
upperBound = cp;
continue;
}
/* If #EV < cutpoint is the same as #EV < low, it means no
* EV are in [low, cutpoint]. We can discard that range.
*/
if (evLess == nLT_low) {
lowerBound = cp;
continue;
}
/* Note: If we were PlasmaRangeV, the initial bounds given by the user are the ranges,
* so we have nothing further to do. In PlasmaRangeI; the initial bounds are Gerschgorin
* limits and not enough: We must further narrow to the desired indices.
*/
if (Control->range == PlasmaRangeI) {
/* For PlasmaRangeI:
* Recall that il, iu are 1-relative; while evLess is zero-relative; i.e.
* if [il,iu]=[1,2], evless must be 0, or 1.
* when evLess<cp == il-1, or just <il, cp is a good boundary and
* we can discard the lower half.
*
* To judge the upper half, the cutpoint must be < iu, so if it is >= iu,
* cannot contain eigenvalue[iu-1].
* if evLess >= iu, we can discard upper half.
*/
if (evLess < Control->il) {
/* The lower half [lowerBound, cp) is not needed, it has no indices >= il. */
lowerBound = cp;
nLT_low = evLess;
numEV = (nLT_hi-nLT_low);
continue;
}
if (evLess >= Control->iu) {
/* The upper half [cp, upperBound) is not needed, it has no indices > iu; */
upperBound = cp;
nLT_hi = evLess;
numEV = (nLT_hi-nLT_low);
continue;
}
} /*end if index search. */
/* Here, the cutpoint has EV on both left right. We push off the right bracket.
* The new lowerBound is the cp, the upperBound is unchanged, the number of
* eigenvalues changes. */
#pragma omp task
plasma_zlaebz2(Control, cp, upperBound, evLess, nLT_hi, (nLT_hi-evLess));
/* Update the Left side I kept. The new number of EV less than upperBound
* is evLess, recompute number of EV in the bracket. */
upperBound = cp;
nLT_hi = evLess;
numEV =( evLess - nLT_low);
continue;
}
} /* end for (;;) for Bisection. */
/* Okay, count this eigenpair done, add to the Done list.
* NOTE: nLT_low is the global zero-relative index of
* this set of mpcity eigenvalues.
* No other brackets can change our entry, so we
* don't need any thread block or atomicity.
*/
int myIdx;
if (Control->range == PlasmaRangeI) {
myIdx = nLT_low - (Control->il-1);
} else { /* range == PlasmaRangeV */
myIdx = nLT_low - Control->baseIdx;
}
if (Control->jobtype == PlasmaVec) {
/* get the eigenvector. */
int ret=plasma_zstein(diag, offd, lowerBound, &(Control->pVec[myIdx*N]), N, Control->stein_arrays);
if (ret != 0) {
#pragma omp critical (UpdateStack)
{
/* Only store first error we encounter */
if (Control->error == 0) Control->error = ret;
}
}
}
/* Add eigenvalue and multiplicity. */
Control->pVal[myIdx]=lowerBound;
Control->pMul[myIdx]=numEV;
// #pragma omp atomic
// Control->finished += numEV;
}
|
CartesianHDF.h | // File : CartesianHDF.h
// Created : Wed Jan 29 2020 10:25:26 AM (+0100)
// Author : Fabian Wermelinger
// Description: HDF IO routines for Cartesian grid types
// Copyright 2020 ETH Zurich. All Rights Reserved.
#ifndef CARTESIANHDF_H_GQ8CNN6W
#define CARTESIANHDF_H_GQ8CNN6W
#include "Cubism/Common.h"
#include "Cubism/IO/FieldAOS.h"
#include "Cubism/IO/HDFDriver.h"
#include <cstdio>
#include <fstream>
#include <string>
NAMESPACE_BEGIN(Cubism)
NAMESPACE_BEGIN(IO)
DISABLE_WARNING_PUSH
DISABLE_WARNING_UNREFERENCED_FORMAL_PARAMETER
/**
* @ingroup IO
* @brief Write Cartesian grid data to HDF file
* @tparam FileDataType HDF file data type
* @tparam Grid Grid type
* @tparam Mesh Mesh type
* @tparam Dir Special type that defines a cast to ``size_t``
* @param fname Output full filename without file extension
* @param aname Name of quantity in ``grid``
* @param grid Input grid
* @param mesh Input mesh corresponding to the extracted data
* @param time Current time
* @param face_dir Face direction (relevant for ``Cubism::EntityType::Face``)
* @param create_xdmf Flag for XDMF wrapper
*
* @rst
* Write the data carried by ``grid`` to an HDF5 container file. The data that
* is written to the file is specified by the index space described in ``mesh``.
* @endrst
*/
template <typename FileDataType,
typename Grid,
typename Mesh,
typename Dir = size_t>
void CartesianWriteHDF(const std::string &fname,
const std::string &aname,
const Grid &grid,
const Mesh &mesh,
const double time,
const Dir face_dir = 0,
const bool create_xdmf = true)
{
#ifdef CUBISM_USE_HDF
static_assert(Grid::BaseType::Class == Cubism::FieldClass::Scalar ||
Grid::BaseType::Class == Cubism::FieldClass::Tensor ||
Grid::BaseType::Class ==
Cubism::FieldClass::FaceContainer,
"CartesianWriteHDF: Unsupported Cubism::FieldClass");
using IRange = typename Mesh::IndexRangeType;
using MIndex = typename IRange::MultiIndex;
constexpr typename Cubism::EntityType entity = Grid::EntityType;
constexpr size_t NComp = Grid::NComponents;
const size_t dface = static_cast<size_t>(face_dir);
const auto clip = mesh.getSubMesh(
grid.getMesh().getIndexRange(entity, dface), entity, dface);
const IRange file_span = clip->getIndexRange(entity, dface);
const MIndex file_extent = file_span.getExtent();
if (create_xdmf) {
std::printf("CartesianWriteHDF: Allocating %.1f MB file buffer (%s)\n",
file_extent.prod() * NComp * sizeof(FileDataType) / 1024. /
1024.,
fname.c_str());
}
FileDataType *buf = new FileDataType[file_extent.prod() * NComp];
#pragma omp parallel for
for (size_t i = 0; i < grid.size(); ++i) {
const auto &bf = grid[i]; // block field
Field2AOS(bf, file_span, buf, dface);
}
HDFDriver<FileDataType, typename Mesh::BaseMesh, Mesh::Class> hdf_driver;
hdf_driver.file_span = file_span;
hdf_driver.write(fname,
aname,
buf,
*clip,
entity,
NComp,
time,
create_xdmf);
delete[] buf;
#else
std::fprintf(
stderr, "CartesianWriteHDF: HDF not supported (%s)\n", fname.c_str());
#endif /* CUBISM_USE_HDF */
}
/**
* @ingroup IO
* @brief Write Cartesian grid data to HDF file
* @tparam FileDataType HDF file data type
* @tparam Grid Grid type
* @tparam Dir Special type that defines a cast to ``size_t``
* @param fname Output full filename without file extension
* @param aname Name of quantity in ``grid``
* @param grid Input grid
* @param time Current time
* @param face_dir Face direction (relevant for ``Cubism::EntityType::Face``)
* @param create_xdmf Flag for XDMF wrapper
*
* Convenience wrapper to dump a full grid to an HDF container file.
*/
template <typename FileDataType, typename Grid, typename Dir = size_t>
void CartesianWriteHDF(const std::string &fname,
const std::string &aname,
const Grid &grid,
const double time,
const Dir face_dir = 0,
const bool create_xdmf = true)
{
Cubism::IO::CartesianWriteHDF<FileDataType>(fname,
aname,
grid,
grid.getGlobalMesh(),
time,
static_cast<size_t>(face_dir),
create_xdmf);
}
/**
* @ingroup IO
* @brief Read Cartesian grid data from HDF file
* @tparam FileDataType HDF file data type
* @tparam Grid Grid type
* @tparam Mesh Mesh type
* @tparam Dir Special type that defines a cast to ``size_t``
* @param fname Input full filename without file extension
* @param grid Grid populated with file data
* @param mesh Grid (sub)mesh
* @param face_dir Face direction (relevant for ``Cubism::EntityType::Face``)
*
* @rst
* Read the data of an HDF5 container file into ``grid``. The data that is
* read from the file is specified by the index space described in ``mesh``.
* @endrst
*/
template <typename FileDataType,
typename Grid,
typename Mesh,
typename Dir = size_t>
void CartesianReadHDF(const std::string &fname,
Grid &grid,
const Mesh &mesh,
const Dir face_dir = 0)
{
#ifdef CUBISM_USE_HDF
static_assert(Grid::BaseType::Class == Cubism::FieldClass::Scalar ||
Grid::BaseType::Class == Cubism::FieldClass::Tensor ||
Grid::BaseType::Class ==
Cubism::FieldClass::FaceContainer,
"CartesianReadHDF: Unsupported Cubism::FieldClass");
{
std::ifstream file(fname + ".h5");
if (!file.good()) {
throw std::runtime_error("CartesianReadHDF: File '" + fname +
"' does not exist");
}
}
using IRange = typename Mesh::IndexRangeType;
using MIndex = typename IRange::MultiIndex;
constexpr typename Cubism::EntityType entity = Grid::EntityType;
constexpr size_t NComp = Grid::NComponents;
const size_t dface = static_cast<size_t>(face_dir);
const auto clip = mesh.getSubMesh(
grid.getMesh().getIndexRange(entity, dface), entity, dface);
const IRange file_span = clip->getIndexRange(entity, dface);
const MIndex file_extent = file_span.getExtent();
FileDataType *buf = new FileDataType[file_extent.prod() * NComp];
HDFDriver<FileDataType, typename Mesh::BaseMesh, Mesh::Class> hdf_driver;
hdf_driver.file_span = file_span;
hdf_driver.read(fname, buf, NComp);
#pragma omp parallel for
for (size_t i = 0; i < grid.size(); ++i) {
auto &bf = grid[i]; // block field
AOS2Field(buf, file_span, bf, dface);
}
delete[] buf;
#else
std::fprintf(
stderr, "CartesianReadHDF: HDF not supported (%s)\n", fname.c_str());
#endif /* CUBISM_USE_HDF */
}
/**
* @ingroup IO
* @brief Read Cartesian grid data from HDF file
* @tparam FileDataType HDF file data type
* @tparam Grid Grid type
* @tparam Dir Special type that defines a cast to ``size_t``
* @param fname Input full filename without file extension
* @param grid Grid populated with file data
* @param face_dir Face direction (relevant for ``Cubism::EntityType::Face``)
*
* Convenience wrapper to read a full grid from an HDF container file.
*/
template <typename FileDataType, typename Grid, typename Dir = size_t>
void CartesianReadHDF(const std::string &fname,
Grid &grid,
const Dir face_dir = 0)
{
Cubism::IO::CartesianReadHDF<FileDataType>(
fname, grid, grid.getGlobalMesh(), static_cast<size_t>(face_dir));
}
DISABLE_WARNING_POP
NAMESPACE_END(IO)
NAMESPACE_END(Cubism)
#endif /* CARTESIANHDF_H_GQ8CNN6W */
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.