source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_binop__le_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_int32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int32)
// A*D function (colscale): GB (_AxD__le_int32)
// D*A function (rowscale): GB (_DxB__le_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__le_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__le_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int32)
// C=scalar+B GB (_bind1st__le_int32)
// C=scalar+B' GB (_bind1st_tran__le_int32)
// C=A+scalar GB (_bind2nd__le_int32)
// C=A'+scalar GB (_bind2nd_tran__le_int32)
// C type: bool
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT32 || GxB_NO_LE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__le_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__le_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test_openmp.c | #include "config.h"
#include <limits.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "kseq.h"
KSEQ_INIT(int, read)
#if HAVE_SSE2
#include "ssw.h"
#endif
#include "parasail.h"
#include "parasail/memory.h"
#include "parasail/stats.h"
//#include "timer.h"
#include "timer_real.h"
#if HAVE_SSE2
parasail_result_t* parasail_ssw_(
const char * const restrict s1, const int s1_len,
const char * const restrict s2, const int s2_len,
const int open, const int gap, const parasail_matrix_t * pmatrix,
int score_size)
{
parasail_result_t *result = parasail_result_new();
s_profile *profile = NULL;
int8_t *s1_num = (int8_t*)malloc(sizeof(int8_t) * s1_len);
int8_t *s2_num = (int8_t*)malloc(sizeof(int8_t) * s2_len);
int8_t *matrix = (int8_t*)malloc(sizeof(int8_t) * 24 * 24);
s_align *ssw_result = NULL;
int m = 0;
/* This table is used to transform amino acid letters into numbers. */
static const int8_t table[128] = {
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 0, 20, 4, 3, 6, 13, 7, 8, 9, 23, 11, 10, 12, 2, 23,
14, 5, 1, 15, 16, 23, 19, 17, 22, 18, 21, 23, 23, 23, 23, 23,
23, 0, 20, 4, 3, 6, 13, 7, 8, 9, 23, 11, 10, 12, 2, 23,
14, 5, 1, 15, 16, 23, 19, 17, 22, 18, 21, 23, 23, 23, 23, 23
};
/* initialize score matrix */
for (m = 0; m < s1_len; ++m) s1_num[m] = table[(int)s1[m]];
for (m = 0; m < s2_len; ++m) s2_num[m] = table[(int)s2[m]];
for (m = 0; m < 24*24; ++m) matrix[m] = pmatrix->matrix[m];
profile = ssw_init(s1_num, s1_len, matrix, 24, score_size);
ssw_result = ssw_align(profile, s2_num, s2_len, -open, -gap, 2, 0, 0, s1_len/2);
result->score = ssw_result->score1;
result->saturated = ssw_result->saturated;
align_destroy(ssw_result);
init_destroy(profile);
free(s1_num);
free(s2_num);
free(matrix);
return result;
}
parasail_result_t* parasail_ssw(
const char * const restrict s1, const int s1_len,
const char * const restrict s2, const int s2_len,
const int open, const int gap, const parasail_matrix_t *matrix)
{
return parasail_ssw_(s1, s1_len, s2, s2_len, open, gap, matrix, 2);
}
parasail_result_t* parasail_ssw_16(
const char * const restrict s1, const int s1_len,
const char * const restrict s2, const int s2_len,
const int open, const int gap, const parasail_matrix_t *matrix)
{
return parasail_ssw_(s1, s1_len, s2, s2_len, open, gap, matrix, 1);
}
#endif
parasail_result_t* parasail_sw(
const char * const restrict s1, const int s1Len,
const char * const restrict s2, const int s2Len,
const int open, const int gap, const parasail_matrix_t *matrix)
{
int saturated = 0;
parasail_result_t *result;
result = parasail_sw_scan_8(s1, s1Len, s2, s2Len, open, gap, matrix);
if (result->saturated) {
saturated = 1;
parasail_result_free(result);
result = parasail_sw_scan_16(s1, s1Len, s2, s2Len, open, gap, matrix);
}
if (result->saturated) {
parasail_result_free(result);
result = parasail_sw_scan_32(s1, s1Len, s2, s2Len, open, gap, matrix);
}
result->saturated = saturated;
return result;
}
static inline void parse_sequences(
const char *filename, char ***strings_, size_t **sizes_, size_t *count_)
{
FILE* fp;
kseq_t *seq = NULL;
int l = 0;
char **strings = NULL;
size_t *sizes = NULL;
size_t count = 0;
size_t memory = 1000;
fp = fopen(filename, "r");
if(fp == NULL) {
perror("fopen");
exit(1);
}
strings = malloc(sizeof(char*) * memory);
sizes = malloc(sizeof(size_t) * memory);
seq = kseq_init(fileno(fp));
while ((l = kseq_read(seq)) >= 0) {
strings[count] = strdup(seq->seq.s);
if (NULL == strings[count]) {
perror("strdup");
exit(1);
}
sizes[count] = seq->seq.l;
++count;
if (count >= memory) {
char **new_strings = NULL;
size_t *new_sizes = NULL;
memory *= 2;
new_strings = realloc(strings, sizeof(char*) * memory);
if (NULL == new_strings) {
perror("realloc");
exit(1);
}
strings = new_strings;
new_sizes = realloc(sizes, sizeof(size_t) * memory);
if (NULL == new_sizes) {
perror("realloc");
exit(1);
}
sizes = new_sizes;
}
}
kseq_destroy(seq);
fclose(fp);
*strings_ = strings;
*sizes_ = sizes;
*count_ = count;
}
static inline unsigned long binomial_coefficient(unsigned long n, unsigned long k)
{
/* from http://blog.plover.com/math/choose.html */
unsigned long r = 1;
unsigned long d;
if (k > n) {
return 0;
}
for (d = 1; d <= k; d++) {
r *= n--;
r /= d;
}
return r;
}
static inline void k_combination2(unsigned long pos, unsigned long *a, unsigned long *b)
{
double s;
double i = floor(sqrt(2.0 * pos)) - 1.0;
if (i <= 1.0) {
i = 1.0;
}
s = i * (i - 1.0) / 2.0;
while (pos - s >= i) {
s += i;
i += 1;
}
*a = (unsigned long)(pos - s);
*b = (unsigned long)(i);
}
int main(int argc, char **argv)
{
double timer_clock = 0.0;
unsigned long i = 0;
unsigned long j = 0;
size_t limit = 0;
char *filename_database = NULL;
char **sequences_database = NULL;
size_t *sizes_database = NULL;
size_t seq_count_database = 0;
char *filename_queries = NULL;
char **sequences_queries = NULL;
size_t *sizes_queries = NULL;
size_t seq_count_queries = 0;
char *endptr = NULL;
char *funcname1 = NULL;
char *funcname2 = NULL;
parasail_function_t *function1 = NULL;
parasail_function_t *function2 = NULL;
int c = 0;
char *matrixname = "blosum62";
const parasail_matrix_t *matrix = NULL;
int gap_open = 10;
int gap_extend = 1;
int N = 1;
int saturated = 0;
int smallest_first = 0;
int biggest_first = 0;
int truncate = 0;
int iterations = 1;
int func_cutoff = 0;
int iter = 0;
stats_t stats_time;
stats_clear(&stats_time);
while ((c = getopt(argc, argv, "a:A:c:b:f:q:o:e:slt:i:")) != -1) {
switch (c) {
case 'a':
funcname1 = optarg;
break;
case 'A':
funcname2 = optarg;
break;
case 'b':
matrixname = optarg;
break;
case 'c':
errno = 0;
func_cutoff = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol");
exit(1);
}
break;
case 'f':
filename_database = optarg;
break;
case 'q':
filename_queries = optarg;
break;
case 'i':
errno = 0;
iterations = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol");
exit(1);
}
break;
case 'o':
errno = 0;
gap_open = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol");
exit(1);
}
break;
case 'e':
errno = 0;
gap_extend = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol");
exit(1);
}
break;
case 's':
smallest_first = 1;
break;
case 'l':
biggest_first = 1;
break;
case 't':
errno = 0;
truncate = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol");
exit(1);
}
break;
case '?':
if (optopt == 'a'
|| optopt == 'b'
|| optopt == 'e'
|| optopt == 'f'
|| optopt == 'i'
|| optopt == 'n'
|| optopt == 'o'
|| optopt == 't')
{
fprintf(stderr,
"Option -%c requires an argument.\n",
optopt);
}
else if (isprint(optopt)) {
fprintf(stderr, "Unknown option `-%c'.\n",
optopt);
}
else {
fprintf(stderr,
"Unknown option character `\\x%x'.\n",
optopt);
}
exit(1);
default:
fprintf(stderr, "default case in getopt\n");
exit(1);
}
}
if (smallest_first && biggest_first) {
fprintf(stderr, "cannot choose both smallest and biggest first\n");
exit(1);
}
/* select the function */
if (funcname1) {
function1 = parasail_lookup_function(funcname1);
#if HAVE_SSE2
if (NULL == function1) {
if (0 == strcmp(funcname1, "ssw_16")) {
function1 = parasail_ssw_16;
}
else if (0 == strcmp(funcname1, "ssw_8")) {
function1 = parasail_ssw;
}
}
#endif
if (NULL == function1) {
fprintf(stderr, "Specified function1 not found.\n");
exit(1);
}
}
else {
fprintf(stderr, "No alignment function1 specified.\n");
exit(1);
}
if (funcname2) {
function2 = parasail_lookup_function(funcname2);
if (NULL == function2) {
fprintf(stderr, "Specified function2 not found.\n");
exit(1);
}
}
/* select the substitution matrix */
if (matrixname) {
matrix = parasail_matrix_lookup(matrixname);
if (NULL == matrix) {
fprintf(stderr, "Specified substitution matrix not found.\n");
exit(1);
}
}
if (filename_database) {
parse_sequences(filename_database, &sequences_database, &sizes_database, &seq_count_database);
}
else {
fprintf(stderr, "missing database filename\n");
exit(1);
}
limit = binomial_coefficient(seq_count_database, 2);
//printf("%lu choose 2 is %lu\n", seq_count_database, limit);
#if defined(_OPENMP)
#pragma omp parallel
{
#pragma omp single
{
N = omp_get_max_threads();
//printf("omp_get_max_threads()=%d\n", N);
}
}
#endif
if (filename_queries) {
parse_sequences(filename_queries,
&sequences_queries, &sizes_queries, &seq_count_queries);
double total_timer = 0.0;
for (i=0; i<seq_count_queries; ++i) {
int saturated_query = 0;
double local_timer = 0.0;
parasail_function_t *function = function1;
if (func_cutoff > 0) {
if (sizes_queries[i] > (unsigned long)func_cutoff) {
function = function2;
}
}
local_timer = timer_real();
#pragma omp parallel
{
#pragma omp for schedule(guided)
for (j=0; j<seq_count_database; ++j) {
parasail_result_t *result = function(
sequences_queries[i], sizes_queries[i],
sequences_database[j], sizes_database[j],
gap_open, gap_extend, matrix);
#pragma omp atomic
saturated_query += result->saturated;
parasail_result_free(result);
}
}
local_timer = timer_real() - local_timer;
total_timer += local_timer;
printf("%lu\t %lu\t %d\t %f\n",
i, (unsigned long)sizes_queries[i],
saturated_query, local_timer);
fflush(stdout);
}
printf("total_time=%f\n", total_timer);
fflush(stdout);
}
else {
for (iter=0; iter<iterations; ++iter) {
timer_clock = timer_real();
#pragma omp parallel
{
unsigned long a=0;
unsigned long b=1;
unsigned long swap=0;
#pragma omp for schedule(guided)
for (i=0; i<limit; ++i) {
parasail_function_t *function = function1;
parasail_result_t *result = NULL;
unsigned long query_size;
k_combination2(i, &a, &b);
if (smallest_first) {
if (sizes_database[a] > sizes_database[b]) {
swap = a;
a = b;
b = swap;
}
}
else if (biggest_first) {
if (sizes_database[a] < sizes_database[b]) {
swap = a;
a = b;
b = swap;
}
}
query_size = sizes_database[a];
if (truncate > 0) {
if (query_size > (unsigned long)truncate) {
query_size = truncate;
}
}
if (func_cutoff > 0) {
if (query_size > (unsigned long)func_cutoff) {
function = function2;
}
}
result = function(
sequences_database[a], query_size,
sequences_database[b], sizes_database[b],
gap_open, gap_extend, matrix);
#pragma omp atomic
saturated += result->saturated;
parasail_result_free(result);
}
}
timer_clock = timer_real() - timer_clock;
stats_sample_value(&stats_time, timer_clock);
}
printf("%s\t %s\t %d\t %d\t %d\t %d\t %f\t %f\t %f\t %f\n",
funcname1, matrixname, gap_open, gap_extend, N,
saturated,
stats_time._mean, stats_stddev(&stats_time),
stats_time._min, stats_time._max);
fflush(stdout);
}
return 0;
}
|
GB_unaryop__identity_fp64_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp64_int32
// op(A') function: GB_tran__identity_fp64_int32
// C type: double
// A type: int32_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp64_int32
(
double *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp64_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
TaskEndLink.c | int x;
int main() {
#pragma omp task
{
11;
}
#pragma omp task
{
int x;
}
}
|
threshold.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/property.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
size_t
width,
height;
ssize_t
divisor,
*levels;
};
/*
Static declarations.
*/
static const char
*MinimalThresholdMap =
"<?xml version=\"1.0\"?>"
"<thresholds>"
" <threshold map=\"threshold\" alias=\"1x1\">"
" <description>Threshold 1x1 (non-dither)</description>"
" <levels width=\"1\" height=\"1\" divisor=\"2\">"
" 1"
" </levels>"
" </threshold>"
" <threshold map=\"checks\" alias=\"2x1\">"
" <description>Checkerboard 2x1 (dither)</description>"
" <levels width=\"2\" height=\"2\" divisor=\"3\">"
" 1 2"
" 2 1"
" </levels>"
" </threshold>"
"</thresholds>";
/*
Forward declarations.
*/
static ThresholdMap
*GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,const size_t width,
% const size_t height,const double bias,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o bias: the mean bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const size_t width,const size_t height,const double bias,
ExceptionInfo *exception)
{
#define AdaptiveThresholdImageTag "AdaptiveThreshold/Image"
CacheView
*image_view,
*threshold_view;
Image
*threshold_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickSizeType
number_pixels;
ssize_t
y;
/*
Initialize threshold image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
threshold_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(threshold_image,DirectClass,exception);
if (status == MagickFalse)
{
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Threshold image.
*/
status=MagickTrue;
progress=0;
number_pixels=(MagickSizeType) width*height;
image_view=AcquireVirtualCacheView(image,exception);
threshold_view=AcquireAuthenticCacheView(threshold_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,threshold_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_bias[MaxPixelChannels],
channel_sum[MaxPixelChannels];
register const Quantum
*magick_restrict p,
*magick_restrict pixels;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
center,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(height/2L),image->columns+width,height,exception);
q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+
GetPixelChannels(image)*(width/2);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if (((threshold_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) == 0))
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
pixels=p;
channel_bias[channel]=0.0;
channel_sum[channel]=0.0;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (u == (ssize_t) (width-1))
channel_bias[channel]+=pixels[i];
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
mean;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if (((threshold_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) == 0))
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
channel_sum[channel]-=channel_bias[channel];
channel_bias[channel]=0.0;
pixels=p;
for (v=0; v < (ssize_t) height; v++)
{
channel_bias[channel]+=pixels[i];
pixels+=(width-1)*GetPixelChannels(image);
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image)*(image->columns+1);
}
mean=(double) (channel_sum[channel]/number_pixels+bias);
SetPixelChannel(threshold_image,channel,(Quantum) ((double)
p[center+i] <= mean ? 0 : QuantumRange),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(threshold_image);
}
if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveThresholdImage)
#endif
proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_image->type=image->type;
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImage method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
% o exception: return any errors or warnings in this structure.
%
% Aside: You can get the same results as operator using LevelImages()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BilevelImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels at or above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < GetPixelInfoChannel(&threshold,channel))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlackThresholdImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l a m p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampImage() set each pixel whose value is below zero to zero and any the
% pixel whose value is above the quantum range to the quantum range (e.g.
% 65535) otherwise the pixel value remains unchanged.
%
% The format of the ClampImage method is:
%
% MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) ClampPixel(q->red);
q->green=(double) ClampPixel(q->green);
q->blue=(double) ClampPixel(q->blue);
q->alpha=(double) ClampPixel(q->alpha);
q++;
}
return(SyncImage(image,exception));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampPixel(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ClampImage)
#endif
proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (ssize_t *) NULL)
map->levels=(ssize_t *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() loads and searches one or more threshold map files for the
% map matching the given name or alias.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
ThresholdMap
*map;
map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
#if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT)
{
const StringInfo
*option;
LinkedListInfo
*options;
options=GetConfigureOptions(ThresholdsFilename,exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
if (map != (ThresholdMap *) NULL)
break;
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
}
#endif
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename,
const char *map_id,ExceptionInfo *exception)
{
char
*p;
const char
*attribute,
*content;
double
value;
register ssize_t
i;
ThresholdMap
*map;
XMLTreeInfo
*description,
*levels,
*threshold,
*thresholds;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
map=(ThresholdMap *) NULL;
thresholds=NewXMLTree(xml,exception);
if (thresholds == (XMLTreeInfo *) NULL)
return(map);
for (threshold=GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
attribute=GetXMLTreeAttribute(threshold,"map");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(threshold,"alias");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
}
if (threshold == (XMLTreeInfo *) NULL)
{
thresholds=DestroyXMLTree(thresholds);
return(map);
}
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
levels=GetXMLTreeChild(threshold,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
map=(ThresholdMap *) AcquireMagickMemory(sizeof(ThresholdMap));
if (map == (ThresholdMap *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
map->map_id=(char *) NULL;
map->description=(char *) NULL;
map->levels=(ssize_t *) NULL;
attribute=GetXMLTreeAttribute(threshold,"map");
if (attribute != (char *) NULL)
map->map_id=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
map->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->width=StringToUnsignedLong(attribute);
if (map->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->height=StringToUnsignedLong(attribute);
if (map->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->divisor=(ssize_t) StringToLong(attribute);
if (map->divisor < 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if (map->levels == (ssize_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
for (i=0; i < (ssize_t) (map->width*map->height); i++)
{
map->levels[i]=(ssize_t) strtol(content,&p,10);
if (p == content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
if ((map->levels[i] < 0) || (map->levels[i] > map->divisor))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %.20g out of range, map \"%s\"",
(double) map->levels[i],map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=p;
}
value=(double) strtol(content,&p,10);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
const char
*alias,
*content,
*map;
XMLTreeInfo
*description,
*threshold,
*thresholds;
assert( xml != (char *) NULL );
assert( file != (FILE *) NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *) NULL )
return(MagickFalse);
(void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description");
(void) FormatLocaleFile(file,
"----------------------------------------------------\n");
threshold=GetXMLTreeChild(thresholds,"threshold");
for ( ; threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
map=GetXMLTreeAttribute(threshold,"map");
if (map == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias=GetXMLTreeAttribute(threshold,"alias");
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "",
content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickTrue;
if (file == (FILE *) NULL)
file=stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) FormatLocaleFile(file,
"\n Threshold Maps for Ordered Dither Operations\n");
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
(void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option));
status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedDitherImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedDitherImage method is:
%
% MagickBooleanType OrderedDitherImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels tho dither between.
%
% Any level number less than 2 will be equivalent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with a ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedDitherImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
char
token[MagickPathExtent];
const char
*p;
double
levels[CompositePixelChannel];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
ThresholdMap
*map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
p=(char *) threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0'))
{
if ((p-threshold_map) >= (MagickPathExtent-1))
break;
token[p-threshold_map]=(*p);
p++;
}
token[p-threshold_map]='\0';
map=GetThresholdMap(token,exception);
if (map == (ThresholdMap *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
for (i=0; i < MaxPixelChannels; i++)
levels[i]=2.0;
p=strchr((char *) threshold_map,',');
if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0))
{
GetNextToken(p,&p,MagickPathExtent,token);
for (i=0; (i < MaxPixelChannels); i++)
levels[i]=StringToDouble(token,(char **) NULL);
for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
levels[i]=StringToDouble(token,(char **) NULL);
}
}
for (i=0; i < MaxPixelChannels; i++)
if (fabs(levels[i]) >= 1)
levels[i]-=1.0;
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
ssize_t
n;
n=0;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
level,
threshold;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(levels[n]) < MagickEpsilon)
{
n++;
continue;
}
threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1));
level=threshold/(map->divisor-1);
threshold-=level*(map->divisor-1);
q[i]=ClampToQuantum((double) (level+(threshold >=
map->levels[(x % map->width)+map->width*(y % map->height)]))*
QuantumRange/levels[n]);
n++;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OrderedDitherImage)
#endif
proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P e r c e p t i b l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PerceptibleImage() set each pixel whose value is less than |epsilon| to
% epsilon or -epsilon (whichever is closer) otherwise the pixel value remains
% unchanged.
%
% The format of the PerceptibleImage method is:
%
% MagickBooleanType PerceptibleImage(Image *image,const double epsilon,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o epsilon: the epsilon threshold (e.g. 1.0e-9).
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PerceptibleThreshold(const Quantum quantum,
const double epsilon)
{
double
sign;
sign=(double) quantum < 0.0 ? -1.0 : 1.0;
if ((sign*quantum) >= epsilon)
return(quantum);
return((Quantum) (sign*epsilon));
}
MagickExport MagickBooleanType PerceptibleImage(Image *image,
const double epsilon,ExceptionInfo *exception)
{
#define PerceptibleImageTag "Perceptible/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red),
epsilon);
q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green),
epsilon);
q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue),
epsilon);
q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha),
epsilon);
q++;
}
return(SyncImage(image,exception));
}
/*
Perceptible image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PerceptibleThreshold(q[i],epsilon);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PerceptibleImage)
#endif
proceed=SetImageProgress(image,PerceptibleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImage(Image *image,
% const char *thresholds,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low,high: Specify the high and low thresholds. These values range from
% 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const double min_threshold, const double max_threshold,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&threshold);
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
threshold;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] < min_threshold)
threshold=min_threshold;
else
if ((double) q[i] > max_threshold)
threshold=max_threshold;
else
threshold=(double) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
q[i]=(double) q[i] <= threshold ? 0 : QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RandomThresholdImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels at or below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel > GetPixelInfoChannel(&threshold,channel))
q[i]=QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WhiteThresholdImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
kmeans.c | /** @file kmeans.c
** @brief K-means - Declaration
** @author Andrea Vedaldi, David Novotny
**/
/*
Copyright (C) 2007-12 Andrea Vedaldi and Brian Fulkerson.
Copyright (C) 2013 Andrea Vedaldi and David Novotny.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page kmeans K-means clustering
@author Andrea Vedaldi
@author David Novotny
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@ref kmeans.h implements a number of algorithm for **K-means
quantization**: Lloyd @cite{lloyd82least}, an accelerated version by
Elkan @cite{elkan03using}, and a large scale algorithm based on
Approximate Nearest Neighbors (ANN). All algorithms support @c float
or @c double data and can use the $l^1$ or the $l^2$ distance for
clustering. Furthermore, all algorithms can take advantage of multiple
CPU cores.
Please see @subpage kmeans-fundamentals for a technical description of
K-means and of the algorithms implemented here.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-starting Getting started
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The goal of K-means is to partition a dataset into $K$
“compact” clusters. The following example demonstrates
using @ref kmeans.h in the C programming language to partition @c
numData @c float vectors into compute @c numCenters clusters using
Lloyd's algorithm:
@code
#include <vl/kmeans.h>
double energy ;
double * centers ;
// Use float data and the L2 distance for clustering
KMeans * kmeans = vl_kmeans_new (VLDistanceL2, VL_TYPE_FLOAT) ;
// Use Lloyd algorithm
vl_kmeans_set_algorithm (kmeans, VlKMeansLloyd) ;
// Initialize the cluster centers by randomly sampling the data
vl_kmeans_init_centers_with_rand_data (kmeans, data, dimension, numData, numCenters) ;
// Run at most 100 iterations of cluster refinement using Lloyd algorithm
vl_kmeans_set_max_num_iterations (kmeans, 100) ;
vl_kmeans_refine_centers (kmeans, data, numData) ;
// Obtain the energy of the solution
energy = vl_kmeans_get_energy(kmeans) ;
// Obtain the cluster centers
centers = vl_kmeans_get_centers(kmeans) ;
@endcode
Once the centers have been obtained, new data points can be assigned
to clusters by using the ::vl_kmeans_quantize function:
@code
vl_uint32 * assignments = vl_malloc(sizeof(vl_uint32) * numData) ;
float * distances = vl_malloc(sizeof(float) * numData) ;
vl_kmeans_quantize(kmeans, assignments, distances, data, numData) ;
@endcode
Alternatively, one can directly assign new pointers to the closest
centers, without bothering with a ::VlKMeans object.
There are several considerations that may impact the performance of
KMeans. First, since K-means is usually based local optimization
algorithm, the **initialization method** is important. The following
initialization methods are supported:
Method | Function | Description
---------------|-----------------------------------------|-----------------------------------------------
Random samples | ::vl_kmeans_init_centers_with_rand_data | Random data points
K-means++ | ::vl_kmeans_init_centers_plus_plus | Random selection biased towards diversity
Custom | ::vl_kmeans_set_centers | Choose centers (useful to run quantization only)
See @ref kmeans-init for further details. The initialization methods
use a randomized selection of the data points; the random number
generator init is controlled by ::vl_rand_init.
The second important choice is the **optimization algorithm**. The
following optimization algorithms are supported:
Algorithm | Symbol | See | Description
------------|------------------|-------------------|-----------------------------------------------
Lloyd | ::VlKMeansLloyd | @ref kmeans-lloyd | Alternate EM-style optimization
Elkan | ::VlKMeansElkan | @ref kmeans-elkan | A speedup using triangular inequalities
ANN | ::VlKMeansANN | @ref kmeans-ann | A speedup using approximated nearest neighbors
See the relative sections for further details. These algorithm are
iterative, and stop when either a **maximum number of iterations**
(::vl_kmeans_set_max_num_iterations) is reached, or when the energy
changes sufficiently slowly in one iteration (::vl_kmeans_set_min_energy_variation).
All the three algorithms support multithreaded computations. The number
of threads used is usually controlled globally by ::vl_set_num_threads.
**/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page kmeans-fundamentals K-means fundamentals
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
Given $n$ points $\bx_1,\dots,\bx_n \in \real^d$, the goal of K-means
is find $K$ `centers` $\bc_1,\dots,\bc_m \in \real^d$ and
`assignments` $q_1,\dots,q_n \in \{1,\dots,K\}$ of the points to the
centers such that the sum of distances
\[
E(\bc_1,\dots,\bc_k,q_1,\dots,q_n)
= \sum_{i=1}^n \|\bx_i - \bc_{q_i} \|_p^p
\]
is minimized. $K$-means is obtained for the case $p=2$ ($l^2$ norm),
because in this case the optimal centers are the means of the input
vectors assigned to them. Here the generalization $p=1$ ($l^1$ norm)
will also be considered.
Up to normalization, the K-means objective $E$ is also the average
reconstruction error if the original points are approximated with the
cluster centers. Thus K-means is used not only to group the input
points into cluster, but also to `quantize` their values.
K-means is widely used in computer vision, for example in the
construction of vocabularies of visual features (visual words). In
these applications the number $n$ of points to cluster and/or the
number $K$ of clusters is often large. Unfortunately, minimizing the
objective $E$ is in general a difficult combinatorial problem, so
locally optimal or approximated solutions are sought instead.
The basic K-means algorithm alternate between re-estimating the
centers and the assignments (@ref kmeans-lloyd). Combined with a good
initialization strategy (@ref kmeans-init) and, potentially, by
re-running the optimization from a number of randomized starting
states, this algorithm may attain satisfactory solutions in practice.
However, despite its simplicity, Lloyd's algorithm is often too slow.
A good replacement is Elkan's algorithm (@ref kmeans-elkan), which
uses the triangular inequality to cut down significantly the cost of
Lloyd's algorithm. Since this algorithm is otherwise equivalent, it
should often be preferred.
For very large problems (millions of point to clusters and hundreds,
thousands, or more clusters to find), even Elkan's algorithm is not
sufficiently fast. In these cases, one can resort to a variant of
Lloyd's algorithm that uses an approximated nearest neighbors routine
(@ref kmeans-ann).
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-init Initialization methods
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
All the $K$-means algorithms considered here find locally optimal
solutions; as such the way they are initialized is important. @ref
kmeans.h supports the following initialization algorithms:
@par Random data samples
The simplest initialization method is to sample $K$ points at random
from the input data and use them as initial values for the cluster
centers.
@par K-means++
@cite{arthur07k-means} proposes a randomized initialization of the
centers which improves upon random selection. The first center $\bc_1$
is selected at random from the data points $\bx_1, \dots, \bx_n $ and
the distance from this center to all points $\|\bx_i - \bc_1\|_p^p$ is
computed. Then the second center $\bc_2$ is selected at random from
the data points with probability proportional to the distance. The
procedure is repeated to obtain the other centers by using the minimum
distance to the centers collected so far.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-lloyd Lloyd's algorithm
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The most common K-means method is Lloyd's algorithm
@cite{lloyd82least}. This algorithm is based on the observation that,
while jointly optimizing clusters and assignment is difficult,
optimizing one given the other is easy. Lloyd's algorithm alternates
the steps:
1. **Quantization.** Each point $\bx_i$ is reassigned to the center
$\bc_{q_j}$ closer to it. This requires finding for each point the
closest among $K$ other points, which is potentially slow.
2. **Center estimation.** Each center $\bc_q$ is updated to minimize
its average distances to the points assigned to it. It is easy to
show that the best center is the mean or median of the points,
respectively if the $l^2$ or $l^1$ norm is considered.
A naive implementation of the assignment step requires $O(dnK)$
operations, where $d$ is the dimensionality of the data, $n$ the
number of data points, and $K$ the number of centers. Updating the
centers is much cheaper: $O(dn)$ operations suffice to compute the $K$
means and a slightly higher cost is required for the medians. Clearly,
the bottleneck is the assignment computation, and this is what the
other K-means algorithm try to improve.
During the iterations, it can happen that a cluster becomes empty. In
this case, K-means automatically **“restarts” the
cluster** center by selecting a training point at random.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-elkan Elkan's algorithm
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
Elkan's algorithm @cite{elkan03using} is a variation of Lloyd
alternate optimization algorithm (@ref kmeans-lloyd) that uses the
triangular inequality to avoid many distance calculations when
assigning points to clusters. While much faster than Lloyd, Elkan's
method uses storage proportional to the umber of clusters by data
points, which makes it unpractical for a very large number of
clusters.
The idea of this algorithm is that, if a center update does not move
them much, then most of the point-to-center computations can be
avoided when the point-to-center assignments are recomputed. To detect
which distances need evaluation, the triangular inequality is used to
lower and upper bound distances after a center update.
Elkan algorithms uses two key observations. First, one has
\[
\|\bx_i - \bc_{q_i}\|_p \leq \|\bc - \bc_{q_i}\|_p / 2
\quad\Rightarrow\quad
\|\bx_i - \bc_{q_i}\|_p \leq \|\bx_i - \bc\|_p.
\]
Thus if the distance between $\bx_i$ and its current center
$\bc_{q_i}$ is less than half the distance of the center $\bc_{q_i}$
to another center $\bc$, then $\bc$ can be skipped when the new
assignment for $\bx_i$ is searched. Checking this requires keeping
track of all the inter-center distances, but centers are typically a
small fraction of the training data, so overall this can be a
significant saving. In particular, if this condition is satisfied for
all the centers $\bc \not= \bc_{q_i}$, the point $\bx_i$ can be
skipped completely. Furthermore, the condition can be tested also
based on an upper bound $UB_i$ of $\|\bx_i - \bc_{q_i}\|_p$.
Second, if a center $\bc$ is updated to $\hat{\bc}$, then the new
distance from $\bx$ to $\hat{\bc}$ is bounded from below and above by
\[
\|\bx - \bc\|_p - \|bc - \hat\bc\|_p
\leq
\|\bx - \hat{\bc}\|_p
\leq
\|\bx - \hat{\bc}\|_p + \|\bc + \hat{\bc}\|_p.
\]
This allows to maintain an upper bound on the distance of $\bx_i$ to
its current center $\bc_{q_i}$ and a lower bound to any other center
$\bc$:
@f{align*}
UB_i & \leftarrow UB_i + \|\bc_{q_i} - \hat{\bc}_{q_i} \|_p \\
LB_i(\bc) & \leftarrow LB_i(\bc) - \|\bc -\hat \bc\|_p.
@f}
Thus the K-means algorithm becomes:
1. **Initialization.** Compute $LB_i(\bc) = \|\bx_i -\hat \bc\|_p$ for
all points and centers. Find the current assignments $q_i$ and
bounds $UB_i$ by finding the closest centers to each point: $UB_i =
\min_{\bc} LB_i(\bc)$.
2. **Center estimation.**
1. Recompute all the centers based on the new means; call the updated
version $\hat{\bc}$.
2. Update all the bounds based on the distance $\|\bc - \hat\bc\|_p$
as explained above.
3. Set $\bc \leftarrow \hat\bc$ for all the centers and go to the next
iteration.
3. **Quantization.**
1. Skip any point $\bx_i$ such that $UB_i \leq \frac{1}{2} \|\bc_{q_i} - \bc\|_p$
for all centers $\bc \not= \bc_{q_i}$.
2. For each remaining point $\bx_i$ and center $\bc \not= \bc_{q_i}$:
1. Skip $\bc$ if
\[
UB_i \leq \frac{1}{2} \| \bc_{q_i} - \bc \|
\quad\text{or}\quad
UB_i \leq LB_i(\bc).
\]
The first condition reflects the first observation above; the
second uses the bounds to decide if $\bc$ can be closer than the
current center $\bc_{q_i}$ to the point $\bx_i$. If the center
cannot be skipped, continue as follows.
3. Skip $\bc$ if the condition above is satisfied after making the
upper bound tight:
\[
UB_i = LB_i(\bc_{q_i}) = \| \bx_i - \bc_{q_i} \|_p.
\]
Note that the latter calculation can be done only once for $\bx_i$.
If the center cannot be skipped still, continue as follows.
4. Tighten the lower bound too:
\[
LB_i(\bc) = \| \bx_i - \bc \|_p.
\]
At this point both $UB_i$ and $LB_i(\bc)$ are tight. If $LB_i <
UB_i$, then the point $\bx_i$ should be reassigned to
$\bc$. Update $q_i$ to the index of center $\bc$ and reset $UB_i
= LB_i(\bc)$.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-ann ANN algorithm
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The *Approximate Nearest Neighbor* (ANN) K-means algorithm
@cite{beis97shape} @cite{silpa-anan08optimised} @cite{muja09fast} is a
variant of Lloyd's algorithm (@ref kmeans-lloyd) uses a best-bin-first
randomized KD-tree algorithm to approximately (and quickly) find the
closest cluster center to each point. The KD-tree implementation is
based on @ref kdtree.
The algorithm can be summarized as follows:
1. **Quantization.** Each point $\bx_i$ is reassigned to the center
$\bc_{q_j}$ closer to it. This starts by indexing the $K$ centers
by a KD-tree and then using the latter to quickly find the closest
center for every training point. The search is approximated to
further improve speed. This opens up the possibility that a data
point may receive an assignment that is *worse* than the current
one. This is avoided by checking that the new assignment estimated
by using ANN is an improvement; otherwise the old assignment is
kept.
2. **Center estimation.** Each center $\bc_q$ is updated to minimize
its average distances to the points assigned to it. It is easy to
show that the best center is the mean or median of the points,
respectively if the $l^2$ or $l^1$ norm is considered.
The key is to trade-off carefully the speedup obtained by using the
ANN algorithm and the loss in accuracy when retrieving neighbors. Due
to the curse of dimensionality, KD-trees become less effective for
higher dimensional data, so that the search cost, which in the best
case is logarithmic with this data structure, may become effectively
linear. This is somehow mitigated by the fact that new a new KD-tree
is computed at each iteration, reducing the likelihood that points may
get stuck with sub-optimal assignments.
Experiments with the quantization of 128-dimensional SIFT features
show that the ANN algorithm may use one quarter of the comparisons of
Elkan's while retaining a similar solution accuracy.
*/
#include "kmeans.h"
#include "generic.h"
#include "mathop.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* ================================================================ */
#ifndef VL_KMEANS_INSTANTIATING
/** ------------------------------------------------------------------
** @brief Reset state
**
** The function reset the state of the KMeans object. It deletes
** any stored centers, releasing the corresponding memory. This
** cancels the effect of seeding or setting the centers, but
** does not change the other configuration parameters.
**/
VL_EXPORT void
vl_kmeans_reset (VlKMeans * self)
{
self->numCenters = 0 ;
self->dimension = 0 ;
if (self->centers) vl_free(self->centers) ;
if (self->centerDistances) vl_free(self->centerDistances) ;
self->centers = NULL ;
self->centerDistances = NULL ;
}
/** ------------------------------------------------------------------
** @brief Create a new KMeans object
** @param dataType type of data (::VL_TYPE_FLOAT or ::VL_TYPE_DOUBLE)
** @param distance distance.
** @return new KMeans object instance.
**/
VL_EXPORT VlKMeans *
vl_kmeans_new (vl_type dataType,
VlVectorComparisonType distance)
{
VlKMeans * self = vl_calloc(1, sizeof(VlKMeans)) ;
self->algorithm = VlKMeansLloyd ;
self->distance = distance ;
self->dataType = dataType ;
self->verbosity = 0 ;
self->maxNumIterations = 100 ;
self->minEnergyVariation = 1e-4 ;
self->numRepetitions = 1 ;
self->centers = NULL ;
self->centerDistances = NULL ;
self->numTrees = 3;
self->maxNumComparisons = 100;
vl_kmeans_reset (self) ;
return self ;
}
/** ------------------------------------------------------------------
** @brief Create a new KMeans object by copy
** @param kmeans KMeans object to copy.
** @return new copy.
**/
VL_EXPORT VlKMeans *
vl_kmeans_new_copy (VlKMeans const * kmeans)
{
VlKMeans * self = vl_malloc(sizeof(VlKMeans)) ;
self->algorithm = kmeans->algorithm ;
self->distance = kmeans->distance ;
self->dataType = kmeans->dataType ;
self->verbosity = kmeans->verbosity ;
self->maxNumIterations = kmeans->maxNumIterations ;
self->numRepetitions = kmeans->numRepetitions ;
self->dimension = kmeans->dimension ;
self->numCenters = kmeans->numCenters ;
self->centers = NULL ;
self->centerDistances = NULL ;
self->numTrees = kmeans->numTrees;
self->maxNumComparisons = kmeans->maxNumComparisons;
if (kmeans->centers) {
vl_size dataSize = vl_get_type_size(self->dataType) * self->dimension * self->numCenters ;
self->centers = vl_malloc(dataSize) ;
memcpy (self->centers, kmeans->centers, dataSize) ;
}
if (kmeans->centerDistances) {
vl_size dataSize = vl_get_type_size(self->dataType) * self->numCenters * self->numCenters ;
self->centerDistances = vl_malloc(dataSize) ;
memcpy (self->centerDistances, kmeans->centerDistances, dataSize) ;
}
return self ;
}
/** ------------------------------------------------------------------
** @brief Deletes a KMeans object
** @param self KMeans object instance.
**
** The function deletes the KMeans object instance created
** by ::vl_kmeans_new.
**/
VL_EXPORT void
vl_kmeans_delete (VlKMeans * self)
{
vl_kmeans_reset (self) ;
vl_free (self) ;
}
/* an helper structure */
typedef struct _VlKMeansSortWrapper {
vl_uint32 * permutation ;
void const * data ;
vl_size stride ;
} VlKMeansSortWrapper ;
/* ---------------------------------------------------------------- */
/* Instantiate shuffle algorithm */
#define VL_SHUFFLE_type vl_uindex
#define VL_SHUFFLE_prefix _vl_kmeans
#include "shuffle-def.h"
/* #ifdef VL_KMEANS_INSTANTITATING */
#endif
/* ================================================================ */
#ifdef VL_KMEANS_INSTANTIATING
/* ---------------------------------------------------------------- */
/* Set centers */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_set_centers_, SFX)
(VlKMeans * self,
TYPE const * centers,
vl_size dimension,
vl_size numCenters)
{
self->dimension = dimension ;
self->numCenters = numCenters ;
self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ;
memcpy ((TYPE*)self->centers, centers,
sizeof(TYPE) * dimension * numCenters) ;
}
/* ---------------------------------------------------------------- */
/* Random seeding */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_init_centers_with_rand_data_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_uindex i, j, k ;
VlRand * rand = vl_get_rand () ;
self->dimension = dimension ;
self->numCenters = numCenters ;
self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ;
{
vl_uindex * perm = vl_malloc (sizeof(vl_uindex) * numData) ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
TYPE * distances = vl_malloc (sizeof(TYPE) * numCenters) ;
/* get a random permutation of the data point */
for (i = 0 ; i < numData ; ++i) perm[i] = i ;
_vl_kmeans_shuffle (perm, numData, rand) ;
for (k = 0, i = 0 ; k < numCenters ; ++ i) {
/* compare the next data point to all centers collected so far
to detect duplicates (if there are enough left)
*/
if (numCenters - k < numData - i) {
vl_bool duplicateDetected = VL_FALSE ;
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distances,
dimension,
data + dimension * perm[i], 1,
(TYPE*)self->centers, k,
distFn) ;
for (j = 0 ; j < k ; ++j) {
duplicateDetected |= (distances[j] == 0) ;
}
if (duplicateDetected) continue ;
}
/* ok, it is not a duplicate so we can accept it! */
memcpy ((TYPE*)self->centers + dimension * k,
data + dimension * perm[i],
sizeof(TYPE) * dimension) ;
k ++ ;
}
vl_free(distances) ;
vl_free(perm) ;
}
}
/* ---------------------------------------------------------------- */
/* kmeans++ seeding */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_init_centers_plus_plus_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_uindex x, c ;
VlRand * rand = vl_get_rand () ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
TYPE * minDistances = vl_malloc (sizeof(TYPE) * numData) ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
self->dimension = dimension ;
self->numCenters = numCenters ;
self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ;
for (x = 0 ; x < numData ; ++x) {
minDistances[x] = (TYPE) VL_INFINITY_D ;
}
/* select the first point at random */
x = vl_rand_uindex (rand, numData) ;
c = 0 ;
while (1) {
TYPE energy = 0 ;
TYPE acc = 0 ;
TYPE thresh = (TYPE) vl_rand_real1 (rand) ;
memcpy ((TYPE*)self->centers + c * dimension,
data + x * dimension,
sizeof(TYPE) * dimension) ;
c ++ ;
if (c == numCenters) break ;
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)
(distances,
dimension,
(TYPE*)self->centers + (c - 1) * dimension, 1,
data, numData,
distFn) ;
for (x = 0 ; x < numData ; ++x) {
minDistances[x] = VL_MIN(minDistances[x], distances[x]) ;
energy += minDistances[x] ;
}
for (x = 0 ; x < numData - 1 ; ++x) {
acc += minDistances[x] ;
if (acc >= thresh * energy) break ;
}
}
vl_free(distances) ;
vl_free(minDistances) ;
}
/* ---------------------------------------------------------------- */
/* Quantization */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_quantize_, SFX)
(VlKMeans * self,
vl_uint32 * assignments,
TYPE * distances,
TYPE const * data,
vl_size numData)
{
vl_index i ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
#ifdef _OPENMP
#pragma omp parallel \
shared(self, distances, assignments, numData, distFn, data) \
num_threads(vl_get_max_threads())
#endif
{
/* vl_malloc cannot be used here if mapped to MATLAB malloc */
TYPE * distanceToCenters = malloc(sizeof(TYPE) * self->numCenters) ;
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0 ; i < (signed)numData ; ++i) {
vl_uindex k ;
TYPE bestDistance = (TYPE) VL_INFINITY_D ;
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distanceToCenters,
self->dimension,
data + self->dimension * i, 1,
(TYPE*)self->centers, self->numCenters,
distFn) ;
for (k = 0 ; k < self->numCenters ; ++k) {
if (distanceToCenters[k] < bestDistance) {
bestDistance = distanceToCenters[k] ;
assignments[i] = (vl_uint32)k ;
}
}
if (distances) distances[i] = bestDistance ;
}
free(distanceToCenters) ;
}
}
/* ---------------------------------------------------------------- */
/* ANN quantization */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_quantize_ann_, SFX)
(VlKMeans * self,
vl_uint32 * assignments,
TYPE * distances,
TYPE const * data,
vl_size numData,
vl_bool update)
{
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
VlKDForest * forest = vl_kdforest_new(self->dataType,self->dimension,self->numTrees, self->distance) ;
vl_kdforest_set_max_num_comparisons(forest,self->maxNumComparisons);
vl_kdforest_set_thresholding_method(forest,VL_KDTREE_MEDIAN);
vl_kdforest_build(forest,self->numCenters,self->centers);
//note #pragma omp parallel default(none)
#ifdef _OPENMP
#pragma omp parallel \
num_threads(vl_get_max_threads()) \
shared(self, forest, update, assignments, distances, data, numData, distFn)
#endif
{
VlKDForestNeighbor neighbor ;
VlKDForestSearcher * searcher ;
vl_index x;
#ifdef _OPENMP
#pragma omp critical
#endif
searcher = vl_kdforest_new_searcher (forest) ;
#ifdef _OPENMP
#pragma omp for
#endif
for(x = 0 ; x < (signed)numData ; ++x) {
vl_kdforestsearcher_query (searcher, &neighbor, 1, (TYPE const *) (data + x*self->dimension));
if (distances) {
if(!update) {
distances[x] = (TYPE) neighbor.distance;
assignments[x] = (vl_uint32) neighbor.index ;
} else {
TYPE prevDist = (TYPE) distFn(self->dimension,
data + self->dimension * x,
(TYPE*)self->centers + self->dimension *assignments[x]);
if (prevDist > (TYPE) neighbor.distance) {
distances[x] = (TYPE) neighbor.distance ;
assignments[x] = (vl_uint32) neighbor.index ;
} else {
distances[x] = prevDist ;
}
}
} else {
assignments[x] = (vl_uint32) neighbor.index ;
}
} /* end for */
} /* end of parallel region */
vl_kdforest_delete(forest);
}
/* ---------------------------------------------------------------- */
/* Helper functions */
/* ---------------------------------------------------------------- */
/* The sorting routine is used to find increasing permutation of each
* data dimension. This is used to quickly find the median for l1
* distance clustering. */
VL_INLINE TYPE
VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp)
(VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB)
{
return
((TYPE*)array->data) [array->permutation[indexA] * array->stride]
-
((TYPE*)array->data) [array->permutation[indexB] * array->stride] ;
}
VL_INLINE void
VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap)
(VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB)
{
vl_uint32 tmp = array->permutation[indexA] ;
array->permutation[indexA] = array->permutation[indexB] ;
array->permutation[indexB] = tmp ;
}
#define VL_QSORT_prefix VL_XCAT3(_vl_kmeans_, SFX, _qsort)
#define VL_QSORT_array VlKMeansSortWrapper*
#define VL_QSORT_cmp VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp)
#define VL_QSORT_swap VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap)
#include "qsort-def.h"
static void
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)
(VlKMeans * self, vl_uint32 * permutations, TYPE const * data, vl_size numData)
{
vl_uindex d, x ;
for (d = 0 ; d < self->dimension ; ++d) {
VlKMeansSortWrapper array ;
array.permutation = permutations + d * numData ;
array.data = data + d ;
array.stride = self->dimension ;
for (x = 0 ; x < numData ; ++x) {
array.permutation[x] = (vl_uint32)x ;
}
VL_XCAT3(_vl_kmeans_, SFX, _qsort_sort)(&array, numData) ;
}
}
/* ---------------------------------------------------------------- */
/* Lloyd refinement */
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
vl_size c, d, x, iteration ;
double previousEnergy = VL_INFINITY_D ;
double initialEnergy = VL_INFINITY_D ;
double energy ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ;
vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ;
vl_uint32 * permutations = NULL ;
vl_size * numSeenSoFar = NULL ;
VlRand * rand = vl_get_rand () ;
vl_size totNumRestartedCenters = 0 ;
vl_size numRestartedCenters = 0 ;
if (self->distance == VlDistanceL1) {
permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ;
numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ;
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ;
}
for (energy = VL_INFINITY_D,
iteration = 0;
1 ;
++ iteration) {
/* assign data to cluters */
VL_XCAT(_vl_kmeans_quantize_, SFX)(self, assignments, distances, data, numData) ;
/* compute energy */
energy = 0 ;
for (x = 0 ; x < numData ; ++x) energy += distances[x] ;
if (self->verbosity) {
VL_PRINTF("kmeans: Lloyd iter %d: energy = %g\n", iteration,
energy) ;
}
/* check termination conditions */
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("kmeans: Lloyd terminating because maximum number of iterations reached\n") ;
}
break ;
}
if (energy == previousEnergy) {
if (self->verbosity) {
VL_PRINTF("kmeans: Lloyd terminating because the algorithm fully converged\n") ;
}
break ;
}
if (iteration == 0) {
initialEnergy = energy ;
} else {
double eps = (previousEnergy - energy) / (initialEnergy - energy) ;
if (eps < self->minEnergyVariation) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the energy relative variation was less than %f\n", self->minEnergyVariation) ;
}
break ;
}
}
/* begin next iteration */
previousEnergy = energy ;
/* update clusters */
memset(clusterMasses, 0, sizeof(vl_size) * numData) ;
for (x = 0 ; x < numData ; ++x) {
clusterMasses[assignments[x]] ++ ;
}
numRestartedCenters = 0 ;
switch (self->distance) {
case VlDistanceL2:
memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ;
for (x = 0 ; x < numData ; ++x) {
TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ;
TYPE const * xpt = data + x * self->dimension ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] += xpt[d] ;
}
}
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
if (clusterMasses[c] > 0) {
TYPE mass = clusterMasses[c] ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] /= mass ;
}
} else {
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
case VlDistanceL1:
for (d = 0 ; d < self->dimension ; ++d) {
vl_uint32 * perm = permutations + d * numData ;
memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ;
for (x = 0; x < numData ; ++x) {
c = assignments[perm[x]] ;
if (2 * numSeenSoFar[c] < clusterMasses[c]) {
((TYPE*)self->centers) [d + c * self->dimension] =
data [d + perm[x] * self->dimension] ;
}
numSeenSoFar[c] ++ ;
}
/* restart the centers as required */
for (c = 0 ; c < self->numCenters ; ++c) {
if (clusterMasses[c] == 0) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
}
break ;
default:
abort();
} /* done compute centers */
totNumRestartedCenters += numRestartedCenters ;
if (self->verbosity && numRestartedCenters) {
VL_PRINTF("kmeans: Lloyd iter %d: restarted %d centers\n", iteration,
numRestartedCenters) ;
}
} /* next Lloyd iteration */
if (permutations) {
vl_free(permutations) ;
}
if (numSeenSoFar) {
vl_free(numSeenSoFar) ;
}
vl_free(distances) ;
vl_free(assignments) ;
vl_free(clusterMasses) ;
return energy ;
}
static double
VL_XCAT(_vl_kmeans_update_center_distances_, SFX)
(VlKMeans * self)
{
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
if (! self->centerDistances) {
self->centerDistances = vl_malloc (sizeof(TYPE) *
self->numCenters *
self->numCenters) ;
}
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(self->centerDistances,
self->dimension,
self->centers, self->numCenters,
NULL, 0,
distFn) ;
return self->numCenters * (self->numCenters - 1) / 2 ;
}
static double
VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
vl_size c, d, x, iteration ;
double initialEnergy = VL_INFINITY_D ;
double previousEnergy = VL_INFINITY_D ;
double energy ;
vl_uint32 * permutations = NULL ;
vl_size * numSeenSoFar = NULL ;
VlRand * rand = vl_get_rand () ;
vl_size totNumRestartedCenters = 0 ;
vl_size numRestartedCenters = 0 ;
vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ;
vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
if (self->distance == VlDistanceL1) {
permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ;
numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ;
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ;
}
for (energy = VL_INFINITY_D,
iteration = 0;
1 ;
++ iteration) {
/* assign data to cluters */
VL_XCAT(_vl_kmeans_quantize_ann_, SFX)(self, assignments, distances, data, numData, iteration > 0) ;
/* compute energy */
energy = 0 ;
for (x = 0 ; x < numData ; ++x) energy += distances[x] ;
if (self->verbosity) {
VL_PRINTF("kmeans: ANN iter %d: energy = %g\n", iteration,
energy) ;
}
/* check termination conditions */
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the maximum number of iterations has been reached\n") ;
}
break ;
}
if (energy == previousEnergy) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the algorithm fully converged\n") ;
}
break ;
}
if (iteration == 0) {
initialEnergy = energy ;
} else {
double eps = (previousEnergy - energy) / (initialEnergy - energy) ;
if (eps < self->minEnergyVariation) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the energy relative variation was less than %f\n", self->minEnergyVariation) ;
}
break ;
}
}
/* begin next iteration */
previousEnergy = energy ;
/* update clusters */
memset(clusterMasses, 0, sizeof(vl_size) * numData) ;
for (x = 0 ; x < numData ; ++x) {
clusterMasses[assignments[x]] ++ ;
}
numRestartedCenters = 0 ;
switch (self->distance) {
case VlDistanceL2:
memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ;
for (x = 0 ; x < numData ; ++x) {
TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ;
TYPE const * xpt = data + x * self->dimension ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] += xpt[d] ;
}
}
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
if (clusterMasses[c] > 0) {
TYPE mass = clusterMasses[c] ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] /= mass ;
}
} else {
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
case VlDistanceL1:
for (d = 0 ; d < self->dimension ; ++d) {
vl_uint32 * perm = permutations + d * numData ;
memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ;
for (x = 0; x < numData ; ++x) {
c = assignments[perm[x]] ;
if (2 * numSeenSoFar[c] < clusterMasses[c]) {
((TYPE*)self->centers) [d + c * self->dimension] =
data [d + perm[x] * self->dimension] ;
}
numSeenSoFar[c] ++ ;
}
/* restart the centers as required */
for (c = 0 ; c < self->numCenters ; ++c) {
if (clusterMasses[c] == 0) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
}
break ;
default:
VL_PRINT("bad distance set: %d\n",self->distance);
abort();
} /* done compute centers */
totNumRestartedCenters += numRestartedCenters ;
if (self->verbosity && numRestartedCenters) {
VL_PRINTF("kmeans: ANN iter %d: restarted %d centers\n", iteration,
numRestartedCenters) ;
}
}
if (permutations) {
vl_free(permutations) ;
}
if (numSeenSoFar) {
vl_free(numSeenSoFar) ;
}
vl_free(distances) ;
vl_free(assignments) ;
vl_free(clusterMasses) ;
return energy ;
}
/* ---------------------------------------------------------------- */
/* Elkan refinement */
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
vl_size d, iteration ;
vl_index x ;
vl_uint32 c, j ;
vl_bool allDone ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ;
vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ;
VlRand * rand = vl_get_rand () ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
TYPE * nextCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ;
TYPE * pointToClosestCenterUB = vl_malloc (sizeof(TYPE) * numData) ;
vl_bool * pointToClosestCenterUBIsStrict = vl_malloc (sizeof(vl_bool) * numData) ;
TYPE * pointToCenterLB = vl_malloc (sizeof(TYPE) * numData * self->numCenters) ;
TYPE * newCenters = vl_malloc(sizeof(TYPE) * self->dimension * self->numCenters) ;
TYPE * centerToNewCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ;
vl_uint32 * permutations = NULL ;
vl_size * numSeenSoFar = NULL ;
double energy ;
vl_size totDistanceComputationsToInit = 0 ;
vl_size totDistanceComputationsToRefreshUB = 0 ;
vl_size totDistanceComputationsToRefreshLB = 0 ;
vl_size totDistanceComputationsToRefreshCenterDistances = 0 ;
vl_size totDistanceComputationsToNewCenters = 0 ;
vl_size totDistanceComputationsToFinalize = 0 ;
vl_size totNumRestartedCenters = 0 ;
if (self->distance == VlDistanceL1) {
permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ;
numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ;
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ;
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Initialization */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* An iteration is: get_new_centers + reassign + get_energy.
This counts as iteration 0, where get_new_centers is assumed
to be performed before calling the train function by
the initialization function */
/* update distances between centers */
totDistanceComputationsToInit +=
VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ;
/* assigmen points to the initial centers and initialize bounds */
memset(pointToCenterLB, 0, sizeof(TYPE) * self->numCenters * numData) ;
for (x = 0 ; x < (signed)numData ; ++x) {
TYPE distance ;
/* do the first center */
assignments[x] = 0 ;
distance = distFn(self->dimension,
data + x * self->dimension,
(TYPE*)self->centers + 0) ;
pointToClosestCenterUB[x] = distance ;
pointToClosestCenterUBIsStrict[x] = VL_TRUE ;
pointToCenterLB[0 + x * self->numCenters] = distance ;
totDistanceComputationsToInit += 1 ;
/* do other centers */
for (c = 1 ; c < self->numCenters ; ++c) {
/* Can skip if the center assigned so far is twice as close
as its distance to the center under consideration */
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <=
((TYPE*)self->centerDistances)
[c + assignments[x] * self->numCenters]) {
continue ;
}
distance = distFn(self->dimension,
data + x * self->dimension,
(TYPE*)self->centers + c * self->dimension) ;
pointToCenterLB[c + x * self->numCenters] = distance ;
totDistanceComputationsToInit += 1 ;
if (distance < pointToClosestCenterUB[x]) {
pointToClosestCenterUB[x] = distance ;
assignments[x] = c ;
}
}
}
/* compute UB on energy */
energy = 0 ;
for (x = 0 ; x < (signed)numData ; ++x) {
energy += pointToClosestCenterUB[x] ;
}
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan iter 0: energy = %g, dist. calc. = %d\n",
energy, totDistanceComputationsToInit) ;
}
/* #define SANITY*/
#ifdef SANITY
{
int xx ;
int cc ;
TYPE tol = 1e-5 ;
VL_PRINTF("inconsistencies after initial assignments:\n");
for (xx = 0 ; xx < numData ; ++xx) {
for (cc = 0 ; cc < self->numCenters ; ++cc) {
TYPE a = pointToCenterLB[cc + xx * self->numCenters] ;
TYPE b = distFn(self->dimension,
data + self->dimension * xx,
(TYPE*)self->centers + self->dimension * cc) ;
if (cc == assignments[xx]) {
TYPE z = pointToClosestCenterUB[xx] ;
if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n",
cc, xx, z, b) ;
}
if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f\n",
cc, xx, a, b) ;
}
}
}
#endif
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Iterations */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
for (iteration = 1 ; 1; ++iteration) {
vl_size numDistanceComputationsToRefreshUB = 0 ;
vl_size numDistanceComputationsToRefreshLB = 0 ;
vl_size numDistanceComputationsToRefreshCenterDistances = 0 ;
vl_size numDistanceComputationsToNewCenters = 0 ;
vl_size numRestartedCenters = 0 ;
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Compute new centers */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
memset(clusterMasses, 0, sizeof(vl_size) * numData) ;
for (x = 0 ; x < (signed)numData ; ++x) {
clusterMasses[assignments[x]] ++ ;
}
switch (self->distance) {
case VlDistanceL2:
memset(newCenters, 0, sizeof(TYPE) * self->dimension * self->numCenters) ;
for (x = 0 ; x < (signed)numData ; ++x) {
TYPE * cpt = newCenters + assignments[x] * self->dimension ;
TYPE const * xpt = data + x * self->dimension ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] += xpt[d] ;
}
}
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE * cpt = newCenters + c * self->dimension ;
if (clusterMasses[c] > 0) {
TYPE mass = clusterMasses[c] ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] /= mass ;
}
} else {
/* restart the center */
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
case VlDistanceL1:
for (d = 0 ; d < self->dimension ; ++d) {
vl_uint32 * perm = permutations + d * numData ;
memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ;
for (x = 0; x < (signed)numData ; ++x) {
c = assignments[perm[x]] ;
if (2 * numSeenSoFar[c] < clusterMasses[c]) {
newCenters [d + c * self->dimension] =
data [d + perm[x] * self->dimension] ;
}
numSeenSoFar[c] ++ ;
}
}
/* restart the centers as required */
for (c = 0 ; c < self->numCenters ; ++c) {
if (clusterMasses[c] == 0) {
TYPE * cpt = newCenters + c * self->dimension ;
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
default:
abort();
} /* done compute centers */
/* compute the distance from the old centers to the new centers */
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE distance = distFn(self->dimension,
newCenters + c * self->dimension,
(TYPE*)self->centers + c * self->dimension) ;
centerToNewCenterDistances[c] = distance ;
numDistanceComputationsToNewCenters += 1 ;
}
/* make the new centers current */
{
TYPE * tmp = self->centers ;
self->centers = newCenters ;
newCenters = tmp ;
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Reassign points to a centers */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/*
Update distances between centers.
*/
numDistanceComputationsToRefreshCenterDistances
+= VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ;
for (c = 0 ; c < self->numCenters ; ++c) {
nextCenterDistances[c] = (TYPE) VL_INFINITY_D ;
for (j = 0 ; j < self->numCenters ; ++j) {
if (j == c) continue ;
nextCenterDistances[c] = VL_MIN(nextCenterDistances[c],
((TYPE*)self->centerDistances)
[j + c * self->numCenters]) ;
}
}
/*
Update upper bounds on point-to-closest-center distances
based on the center variation.
*/
for (x = 0 ; x < (signed)numData ; ++x) {
TYPE a = pointToClosestCenterUB[x] ;
TYPE b = centerToNewCenterDistances[assignments[x]] ;
if (self->distance == VlDistanceL1) {
pointToClosestCenterUB[x] = a + b ;
} else {
#if (FLT == VL_TYPE_FLOAT)
TYPE sqrtab = sqrtf (a * b) ;
#else
TYPE sqrtab = sqrt (a * b) ;
#endif
pointToClosestCenterUB[x] = a + b + 2.0 * sqrtab ;
}
pointToClosestCenterUBIsStrict[x] = VL_FALSE ;
}
/*
Update lower bounds on point-to-center distances
based on the center variation.
*/
#if defined(_OPENMP)
#pragma omp parallel for default(shared) private(x,c) num_threads(vl_get_max_threads())
#endif
for (x = 0 ; x < (signed)numData ; ++x) {
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE a = pointToCenterLB[c + x * self->numCenters] ;
TYPE b = centerToNewCenterDistances[c] ;
if (a < b) {
pointToCenterLB[c + x * self->numCenters] = 0 ;
} else {
if (self->distance == VlDistanceL1) {
pointToCenterLB[c + x * self->numCenters] = a - b ;
} else {
#if (FLT == VL_TYPE_FLOAT)
TYPE sqrtab = sqrtf (a * b) ;
#else
TYPE sqrtab = sqrt (a * b) ;
#endif
pointToCenterLB[c + x * self->numCenters] = a + b - 2.0 * sqrtab ;
}
}
}
}
#ifdef SANITY
{
int xx ;
int cc ;
TYPE tol = 1e-5 ;
VL_PRINTF("inconsistencies before assignments:\n");
for (xx = 0 ; xx < numData ; ++xx) {
for (cc = 0 ; cc < self->numCenters ; ++cc) {
TYPE a = pointToCenterLB[cc + xx * self->numCenters] ;
TYPE b = distFn(self->dimension,
data + self->dimension * xx,
(TYPE*)self->centers + self->dimension * cc) ;
if (cc == assignments[xx]) {
TYPE z = pointToClosestCenterUB[xx] ;
if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n",
cc, xx, z, b) ;
}
if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n",
cc, xx, a, b, assignments[xx]) ;
}
}
}
#endif
/*
Scan the data and do the reassignments. Use the bounds to
skip as many point-to-center distance calculations as possible.
*/
allDone = VL_TRUE ;
#if defined(_OPENMP)
#pragma omp parallel for \
shared(self,numData, \
pointToClosestCenterUB,pointToCenterLB, \
nextCenterDistances,pointToClosestCenterUBIsStrict, \
assignments,data,distFn,allDone) \
private(c,x) \
reduction(+:numDistanceComputationsToRefreshUB,numDistanceComputationsToRefreshLB) \
num_threads(vl_get_max_threads())
#endif
for (x = 0 ; x < (signed)numData ; ++ x) {
/*
A point x sticks with its current center assignmets[x]
the UB to d(x, c[assigmnets[x]]) is not larger than half
the distance of c[assigments[x]] to any other center c.
*/
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <= nextCenterDistances[assignments[x]]) {
continue ;
}
for (c = 0 ; c < self->numCenters ; ++c) {
vl_uint32 cx = assignments[x] ;
TYPE distance ;
/* The point is not reassigned to a given center c
if either:
0 - c is already the assigned center
1 - The UB of d(x, c[assignments[x]]) is smaller than half
the distance of c[assigments[x]] to c, OR
2 - The UB of d(x, c[assignmets[x]]) is smaller than the
LB of the distance of x to c.
*/
if (cx == c) {
continue ;
}
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances)
[c + cx * self->numCenters]) {
continue ;
}
if (pointToClosestCenterUB[x] <= pointToCenterLB
[c + x * self->numCenters]) {
continue ;
}
/* If the UB is loose, try recomputing it and test again */
if (! pointToClosestCenterUBIsStrict[x]) {
distance = distFn(self->dimension,
data + self->dimension * x,
(TYPE*)self->centers + self->dimension * cx) ;
pointToClosestCenterUB[x] = distance ;
pointToClosestCenterUBIsStrict[x] = VL_TRUE ;
pointToCenterLB[cx + x * self->numCenters] = distance ;
numDistanceComputationsToRefreshUB += 1 ;
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances)
[c + cx * self->numCenters]) {
continue ;
}
if (pointToClosestCenterUB[x] <= pointToCenterLB
[c + x * self->numCenters]) {
continue ;
}
}
/*
Now the UB is strict (equal to d(x, assignments[x])), but
we still could not exclude that x should be reassigned to
c. We therefore compute the distance, update the LB,
and check if a reassigmnet must be made
*/
distance = distFn(self->dimension,
data + x * self->dimension,
(TYPE*)self->centers + c * self->dimension) ;
numDistanceComputationsToRefreshLB += 1 ;
pointToCenterLB[c + x * self->numCenters] = distance ;
if (distance < pointToClosestCenterUB[x]) {
assignments[x] = c ;
pointToClosestCenterUB[x] = distance ;
allDone = VL_FALSE ;
/* the UB strict flag is already set here */
}
} /* assign center */
} /* next data point */
totDistanceComputationsToRefreshUB
+= numDistanceComputationsToRefreshUB ;
totDistanceComputationsToRefreshLB
+= numDistanceComputationsToRefreshLB ;
totDistanceComputationsToRefreshCenterDistances
+= numDistanceComputationsToRefreshCenterDistances ;
totDistanceComputationsToNewCenters
+= numDistanceComputationsToNewCenters ;
totNumRestartedCenters
+= numRestartedCenters ;
#ifdef SANITY
{
int xx ;
int cc ;
TYPE tol = 1e-5 ;
VL_PRINTF("inconsistencies after assignments:\n");
for (xx = 0 ; xx < numData ; ++xx) {
for (cc = 0 ; cc < self->numCenters ; ++cc) {
TYPE a = pointToCenterLB[cc + xx * self->numCenters] ;
TYPE b = distFn(self->dimension,
data + self->dimension * xx,
(TYPE*)self->centers + self->dimension * cc) ;
if (cc == assignments[xx]) {
TYPE z = pointToClosestCenterUB[xx] ;
if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n",
cc, xx, z, b) ;
}
if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n",
cc, xx, a, b, assignments[xx]) ;
}
}
}
#endif
/* compute UB on energy */
energy = 0 ;
for (x = 0 ; x < (signed)numData ; ++x) {
energy += pointToClosestCenterUB[x] ;
}
if (self->verbosity) {
vl_size numDistanceComputations =
numDistanceComputationsToRefreshUB +
numDistanceComputationsToRefreshLB +
numDistanceComputationsToRefreshCenterDistances +
numDistanceComputationsToNewCenters ;
VL_PRINTF("kmeans: Elkan iter %d: energy <= %g, dist. calc. = %d\n",
iteration,
energy,
numDistanceComputations) ;
if (numRestartedCenters) {
VL_PRINTF("kmeans: Elkan iter %d: restarted %d centers\n",
iteration,
energy,
numRestartedCenters) ;
}
if (self->verbosity > 1) {
VL_PRINTF("kmeans: Elkan iter %d: total dist. calc. per type: "
"UB: %.1f%% (%d), LB: %.1f%% (%d), "
"intra_center: %.1f%% (%d), "
"new_center: %.1f%% (%d)\n",
iteration,
100.0 * numDistanceComputationsToRefreshUB / numDistanceComputations,
numDistanceComputationsToRefreshUB,
100.0 *numDistanceComputationsToRefreshLB / numDistanceComputations,
numDistanceComputationsToRefreshLB,
100.0 * numDistanceComputationsToRefreshCenterDistances / numDistanceComputations,
numDistanceComputationsToRefreshCenterDistances,
100.0 * numDistanceComputationsToNewCenters / numDistanceComputations,
numDistanceComputationsToNewCenters) ;
}
}
/* check termination conditions */
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan terminating because maximum number of iterations reached\n") ;
}
break ;
}
if (allDone) {
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan terminating because the algorithm fully converged\n") ;
}
break ;
}
} /* next Elkan iteration */
/* compute true energy */
energy = 0 ;
for (x = 0 ; x < (signed)numData ; ++ x) {
vl_uindex cx = assignments [x] ;
energy += distFn(self->dimension,
data + self->dimension * x,
(TYPE*)self->centers + self->dimension * cx) ;
totDistanceComputationsToFinalize += 1 ;
}
{
vl_size totDistanceComputations =
totDistanceComputationsToInit +
totDistanceComputationsToRefreshUB +
totDistanceComputationsToRefreshLB +
totDistanceComputationsToRefreshCenterDistances +
totDistanceComputationsToNewCenters +
totDistanceComputationsToFinalize ;
double saving = (double)totDistanceComputations
/ (iteration * self->numCenters * numData) ;
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan: total dist. calc.: %d (%.2f %% of Lloyd)\n",
totDistanceComputations, saving * 100.0) ;
if (totNumRestartedCenters) {
VL_PRINTF("kmeans: Elkan: there have been %d restarts\n",
totNumRestartedCenters) ;
}
}
if (self->verbosity > 1) {
VL_PRINTF("kmeans: Elkan: total dist. calc. per type: "
"init: %.1f%% (%d), UB: %.1f%% (%d), LB: %.1f%% (%d), "
"intra_center: %.1f%% (%d), "
"new_center: %.1f%% (%d), "
"finalize: %.1f%% (%d)\n",
100.0 * totDistanceComputationsToInit / totDistanceComputations,
totDistanceComputationsToInit,
100.0 * totDistanceComputationsToRefreshUB / totDistanceComputations,
totDistanceComputationsToRefreshUB,
100.0 *totDistanceComputationsToRefreshLB / totDistanceComputations,
totDistanceComputationsToRefreshLB,
100.0 * totDistanceComputationsToRefreshCenterDistances / totDistanceComputations,
totDistanceComputationsToRefreshCenterDistances,
100.0 * totDistanceComputationsToNewCenters / totDistanceComputations,
totDistanceComputationsToNewCenters,
100.0 * totDistanceComputationsToFinalize / totDistanceComputations,
totDistanceComputationsToFinalize) ;
}
}
if (permutations) {
vl_free(permutations) ;
}
if (numSeenSoFar) {
vl_free(numSeenSoFar) ;
}
vl_free(distances) ;
vl_free(assignments) ;
vl_free(clusterMasses) ;
vl_free(nextCenterDistances) ;
vl_free(pointToClosestCenterUB) ;
vl_free(pointToClosestCenterUBIsStrict) ;
vl_free(pointToCenterLB) ;
vl_free(newCenters) ;
vl_free(centerToNewCenterDistances) ;
return energy ;
}
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_kmeans_refine_centers_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
switch (self->algorithm) {
case VlKMeansLloyd:
return
VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX)(self, data, numData) ;
break ;
case VlKMeansElkan:
return
VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX)(self, data, numData) ;
break ;
case VlKMeansANN:
return
VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX)(self, data, numData) ;
break ;
default:
abort() ;
}
}
/* VL_KMEANS_INSTANTIATING */
#else
#ifndef __DOXYGEN__
#define FLT VL_TYPE_FLOAT
#define TYPE float
#define SFX f
#define VL_KMEANS_INSTANTIATING
#include "kmeans.c"
#define FLT VL_TYPE_DOUBLE
#define TYPE double
#define SFX d
#define VL_KMEANS_INSTANTIATING
#include "kmeans.c"
#endif
/* VL_KMEANS_INSTANTIATING */
#endif
/* ================================================================ */
#ifndef VL_KMEANS_INSTANTIATING
/** ------------------------------------------------------------------
** @brief Set centers
** @param self KMeans object.
** @param centers centers to copy.
** @param dimension data dimension.
** @param numCenters number of centers.
**/
VL_EXPORT void
vl_kmeans_set_centers
(VlKMeans * self,
void const * centers,
vl_size dimension,
vl_size numCenters)
{
vl_kmeans_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_set_centers_f
(self, (float const *)centers, dimension, numCenters) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_set_centers_d
(self, (double const *)centers, dimension, numCenters) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief init centers by randomly sampling data
** @param self KMeans object.
** @param data data to sample from.
** @param dimension data dimension.
** @param numData nmber of data points.
** @param numCenters number of centers.
**
** The function inits the KMeans centers by randomly sampling
** the data @a data.
**/
VL_EXPORT void
vl_kmeans_init_centers_with_rand_data
(VlKMeans * self,
void const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_kmeans_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_init_centers_with_rand_data_f
(self, (float const *)data, dimension, numData, numCenters) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_init_centers_with_rand_data_d
(self, (double const *)data, dimension, numData, numCenters) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Seed centers by the KMeans++ algorithm
** @param self KMeans object.
** @param data data to sample from.
** @param dimension data dimension.
** @param numData nmber of data points.
** @param numCenters number of centers.
**/
VL_EXPORT void
vl_kmeans_init_centers_plus_plus
(VlKMeans * self,
void const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_kmeans_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_init_centers_plus_plus_f
(self, (float const *)data, dimension, numData, numCenters) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_init_centers_plus_plus_d
(self, (double const *)data, dimension, numData, numCenters) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Quantize data
** @param self KMeans object.
** @param assignments data to closest center assignments (output).
** @param distances data to closest center distance (output).
** @param data data to quantize.
** @param numData number of data points to quantize.
**/
VL_EXPORT void
vl_kmeans_quantize
(VlKMeans * self,
vl_uint32 * assignments,
void * distances,
void const * data,
vl_size numData)
{
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_quantize_f
(self, assignments, distances, (float const *)data, numData) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_quantize_d
(self, assignments, distances, (double const *)data, numData) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Quantize data using approximate nearest neighbours (ANN).
** @param self KMeans object.
** @param assignments data to centers assignments (output).
** @param distances data to closes center distance (output)
** @param data data to quantize.
** @param numData number of data points.
** @param update choose wether to update current assignments.
**
** The function uses an ANN procedure to compute the approximate
** nearest neighbours of the input data point.
**
** Setting @a update to ::VL_TRUE will cause the algorithm
** to *update existing assignments*. This means that each
** element of @a assignments and @a distances is updated ony if the
** ANN procedure can find a better assignment of the existing one.
**/
VL_EXPORT void
vl_kmeans_quantize_ann
(VlKMeans * self,
vl_uint32 * assignments,
void * distances,
void const * data,
vl_size numData,
vl_bool update)
{
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_quantize_ann_f
(self, assignments, distances, (float const *)data, numData, update) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_quantize_ann_d
(self, assignments, distances, (double const *)data, numData, update) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Refine center locations.
** @param self KMeans object.
** @param data data to quantize.
** @param numData number of data points.
** @return K-means energy at the end of optimization.
**
** The function calls the underlying K-means quantization algorithm
** (@ref VlKMeansAlgorithm) to quantize the specified data @a data.
** The function assumes that the cluster centers have already
** been assigned by using one of the seeding functions, or by
** setting them.
**/
VL_EXPORT double
vl_kmeans_refine_centers
(VlKMeans * self,
void const * data,
vl_size numData)
{
assert (self->centers) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
return
_vl_kmeans_refine_centers_f
(self, (float const *)data, numData) ;
case VL_TYPE_DOUBLE :
return
_vl_kmeans_refine_centers_d
(self, (double const *)data, numData) ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Cluster data.
** @param self KMeans object.
** @param data data to quantize.
** @param dimension data dimension.
** @param numData number of data points.
** @param numCenters number of clusters.
** @return K-means energy at the end of optimization.
**
** The function initializes the centers by using the initialization
** algorithm set by ::vl_kmeans_set_initialization and refines them
** by the quantization algorithm set by ::vl_kmeans_set_algorithm.
** The process is repeated one or more times (see
** ::vl_kmeans_set_num_repetitions) and the resutl with smaller
** energy is retained.
**/
VL_EXPORT double
vl_kmeans_cluster (VlKMeans * self,
void const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_uindex repetition ;
double bestEnergy = VL_INFINITY_D ;
void * bestCenters = NULL ;
for (repetition = 0 ; repetition < self->numRepetitions ; ++ repetition) {
double energy ;
double timeRef ;
if (self->verbosity) {
VL_PRINTF("kmeans: repetition %d of %d\n", repetition + 1, self->numRepetitions) ;
}
timeRef = vl_get_cpu_time() ;
switch (self->initialization) {
case VlKMeansRandomSelection :
vl_kmeans_init_centers_with_rand_data (self,
data, dimension, numData,
numCenters) ;
break ;
case VlKMeansPlusPlus :
vl_kmeans_init_centers_plus_plus (self,
data, dimension, numData,
numCenters) ;
break ;
default:
abort() ;
}
if (self->verbosity) {
VL_PRINTF("kmeans: K-means initialized in %.2f s\n",
vl_get_cpu_time() - timeRef) ;
}
timeRef = vl_get_cpu_time () ;
energy = vl_kmeans_refine_centers (self, data, numData) ;
if (self->verbosity) {
VL_PRINTF("kmeans: K-means terminated in %.2f s with energy %g\n",
vl_get_cpu_time() - timeRef, energy) ;
}
/* copy centers to output if current solution is optimal */
/* check repetition == 0 as well in case energy = NaN, which */
/* can happen if the data contain NaNs */
if (energy < bestEnergy || repetition == 0) {
void * temp ;
bestEnergy = energy ;
if (bestCenters == NULL) {
bestCenters = vl_malloc(vl_get_type_size(self->dataType) *
self->dimension *
self->numCenters) ;
}
/* swap buffers */
temp = bestCenters ;
bestCenters = self->centers ;
self->centers = temp ;
} /* better energy */
} /* next repetition */
vl_free (self->centers) ;
self->centers = bestCenters ;
return bestEnergy ;
}
/* VL_KMEANS_INSTANTIATING */
#endif
#undef SFX
#undef TYPE
#undef FLT
#undef VL_KMEANS_INSTANTIATING
|
GB_unop__acosh_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__acosh_fp64_fp64)
// op(A') function: GB (_unop_tran__acosh_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = acosh (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = acosh (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = acosh (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOSH || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__acosh_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = acosh (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = acosh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__acosh_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 32;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-7,8)),ceild(8*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(4*t1+Ny+5,32)),floord(8*t2+Ny+4,32)),floord(8*t1-8*t2+Nz+Ny+3,32));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(8*t2-Nz-60,64)),ceild(32*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(4*t1+Nx+5,64)),floord(8*t2+Nx+4,64)),floord(32*t3+Nx+28,64)),floord(8*t1-8*t2+Nz+Nx+3,64));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),32*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),32*t3+30),64*t4+62),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
VolumetricAdaptiveMaxPooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/VolumetricAdaptiveMaxPooling.c"
#else
#define START_IND(a,b,c) (int)floor((float)(a * c) / b)
#define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b)
// #define START_IND(a,b,c) a * c / b
// #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0
// 5d tensor B x D x T x H x W
static void THNN_(VolumetricAdaptiveMaxPooling_updateOutput_frame)(
real *input_p,
real *output_p,
THIndex_t *ind_p,
int64_t sizeD,
int64_t isizeT,
int64_t isizeH,
int64_t isizeW,
int64_t osizeT,
int64_t osizeH,
int64_t osizeW,
int64_t istrideD,
int64_t istrideT,
int64_t istrideH,
int64_t istrideW)
{
int64_t d;
#pragma omp parallel for private(d)
for (d = 0; d < sizeD; d++)
{
/* loop over output */
int64_t ot, oh, ow;
for(ot = 0; ot < osizeT; ot++)
{
int64_t istartT = START_IND(ot, osizeT, isizeT);
int64_t iendT = END_IND(ot, osizeT, isizeT);
int64_t kT = iendT - istartT;
for(oh = 0; oh < osizeH; oh++)
{
int64_t istartH = START_IND(oh, osizeH, isizeH);
int64_t iendH = END_IND(oh, osizeH, isizeH);
int64_t kH = iendH - istartH;
for(ow = 0; ow < osizeW; ow++)
{
int64_t istartW = START_IND(ow, osizeW, isizeW);
int64_t iendW = END_IND(ow, osizeW, isizeW);
int64_t kW = iendW - istartW;
/* local pointers */
real *ip = input_p + d*istrideD + istartT *istrideT + istartH*istrideH + istartW*istrideW;
real *op = output_p + d*osizeT*osizeH*osizeW + ot*osizeH*osizeW + oh*osizeW + ow;
THIndex_t *indp = ind_p + d*osizeT*osizeH*osizeW + ot*osizeH*osizeW + oh*osizeW + ow;
/* compute local max: */
int64_t maxindex = -1;
real maxval = -FLT_MAX;
int64_t it, ih, iw;
for(it = 0; it < kT; it++)
{
for(ih = 0; ih < kH; ih++)
{
for(iw = 0; iw < kW; iw++)
{
real val = *(ip + it*istrideT + ih*istrideH + iw*istrideW);
if ((val > maxval) || std::isnan(val))
{
maxval = val;
maxindex = (it+istartT)*isizeH*isizeW + (ih+istartH)*isizeW + (iw+istartW);
}
}
}
}
/* set output to local max */
*op = maxval;
/* store location of max */
*indp = maxindex + TH_INDEX_BASE;
}
}
}
}
}
void THNN_(VolumetricAdaptiveMaxPooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THIndexTensor *indices,
int osizeT,
int osizeW,
int osizeH)
{
int dimD = 0;
int dimT = 1;
int dimH = 2;
int dimW = 3;
int64_t sizeB = 1;
int64_t sizeD = 0;
int64_t isizeT = 0;
int64_t isizeH = 0;
int64_t isizeW = 0;
int64_t istrideB = 0;
int64_t istrideD = 0;
int64_t istrideT = 0;
int64_t istrideH = 0;
int64_t istrideW = 0;
real *input_data = nullptr;
real *output_data = nullptr;
THIndex_t *indices_data = nullptr;
THNN_ARGCHECK(!input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input,
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s");
if (input->dim() == 5)
{
istrideB = input->stride(0);
sizeB = input->size(0);
dimD++;
dimT++;
dimH++;
dimW++;
}
/* sizes */
sizeD = input->size(dimD);
isizeT = input->size(dimT);
isizeH = input->size(dimH);
isizeW = input->size(dimW);
/* strides */
istrideD = input->stride(dimD);
istrideT = input->stride(dimT);
istrideH = input->stride(dimH);
istrideW = input->stride(dimW);
/* resize output */
if (input->dim() == 4)
{
THTensor_(resize4d)(output, sizeD, osizeT, osizeH, osizeW);
/* indices will contain max input locations for each output point */
THIndexTensor_(resize4d)(indices, sizeD, osizeT, osizeH, osizeW);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THIndexTensor_(data)(indices);
THNN_(VolumetricAdaptiveMaxPooling_updateOutput_frame)(input_data, output_data,
indices_data,
sizeD,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW,
istrideD, istrideT,
istrideH, istrideW);
}
else
{
int64_t b;
THTensor_(resize5d)(output, sizeB, sizeD, osizeT, osizeH, osizeW);
/* indices will contain max input locations for each output point */
THIndexTensor_(resize5d)(indices, sizeB, sizeD, osizeT, osizeH, osizeW);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THIndexTensor_(data)(indices);
#pragma omp parallel for private(b)
for (b = 0; b < sizeB; b++)
{
THNN_(VolumetricAdaptiveMaxPooling_updateOutput_frame)(input_data+b*istrideB, output_data+b*sizeD*osizeT*osizeH*osizeW,
indices_data+b*sizeD*osizeT*osizeH*osizeW,
sizeD,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW,
istrideD, istrideT,
istrideH, istrideW);
}
}
}
static void THNN_(VolumetricAdaptiveMaxPooling_updateGradInput_frame)(
real *gradInput_p,
real *gradOutput_p,
THIndex_t *ind_p,
int64_t sizeD,
int64_t isizeT,
int64_t isizeH,
int64_t isizeW,
int64_t osizeT,
int64_t osizeH,
int64_t osizeW)
{
int64_t d;
#pragma omp parallel for private(d)
for (d = 0; d < sizeD; d++)
{
real *gradInput_p_d = gradInput_p + d*isizeT*isizeH*isizeW;
real *gradOutput_p_d = gradOutput_p + d*osizeT*osizeH*osizeW;
THIndex_t *ind_p_d = ind_p + d*osizeT*osizeH*osizeW;
/* calculate max points */
int64_t ot, oh, ow;
for(ot = 0; ot < osizeT; ot++)
{
for(oh = 0; oh < osizeH; oh++)
{
for(ow = 0; ow < osizeW; ow++)
{
/* retrieve position of max */
int64_t maxp = ind_p_d[ot*osizeH*osizeW + oh*osizeW + ow] - TH_INDEX_BASE;
/* update gradient */
gradInput_p_d[maxp] += gradOutput_p_d[ot*osizeH*osizeW + oh*osizeW + ow];
}
}
}
}
}
void THNN_(VolumetricAdaptiveMaxPooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THIndexTensor *indices)
{
int dimD = 0;
int dimT = 1;
int dimH = 2;
int dimW = 3;
int64_t sizeB = 1;
int64_t sizeD;
int64_t isizeT;
int64_t isizeH;
int64_t isizeW;
int64_t osizeT;
int64_t osizeH;
int64_t osizeW;
real *gradInput_data;
real *gradOutput_data;
THIndex_t *indices_data;
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->dim() == 5) {
sizeB = input->size(0);
dimD++;
dimT++;
dimH++;
dimW++;
}
/* sizes */
sizeD = input->size(dimD);
isizeT = input->size(dimT);
isizeH = input->size(dimH);
isizeW = input->size(dimW);
osizeT = gradOutput->size(dimT);
osizeH = gradOutput->size(dimH);
osizeW = gradOutput->size(dimW);
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
indices_data = THIndexTensor_(data)(indices);
/* backprop */
if (input->dim() == 4)
{
THNN_(VolumetricAdaptiveMaxPooling_updateGradInput_frame)(gradInput_data, gradOutput_data,
indices_data,
sizeD,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW);
}
else
{
int64_t b;
#pragma omp parallel for private(b)
for (b = 0; b < sizeB; b++)
{
THNN_(VolumetricAdaptiveMaxPooling_updateGradInput_frame)(gradInput_data+b*sizeD*isizeT*isizeH*isizeW, gradOutput_data+b*sizeD*osizeT*osizeH*osizeW,
indices_data+b*sizeD*osizeT*osizeH*osizeW,
sizeD,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
}
#endif
|
omp_loop3.c | /* vim: set ts=4 sw=4: */
/* Filename : omp_loop3.c
* Description : simple OpenMP model
* Author : SunYoung Kim <sunyzero@gmail.com>
* Notes : omp_get_thread_num
*/
#include <stdio.h>
#include <omp.h>
int main()
{
int i;
/* combine two clauses */
#pragma omp parallel for
for (i=0; i<8; i++) {
printf("[%d] Hello OpenMP (%d)\n", i, omp_get_thread_num());
}
/* implicit barrier */
return 0;
}
|
SpatialSubSampling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialSubSampling.c"
#else
static inline void THNN_(SpatialSubSampling_shapeCheck)(
THTensor *input,
THTensor *gradOutput,
THTensor *weight,
int kW, int kH) {
THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input,
"3D or 4D input tensor expected but got: %s");
THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous");
int nInputPlane = THTensor_(size)(weight, 0);
int dimw = 2;
int dimh = 1;
int64_t inputWidth;
int64_t inputHeight;
if (input->nDimension == 4) {
dimw++;
dimh++;
}
inputWidth = input->size[dimw];
inputHeight = input->size[dimh];
THArgCheck(input->size[dimh-1] == nInputPlane, 2, "invalid number of input planes");
THArgCheck(inputWidth >= kW && inputHeight >= kH, 2, "input image smaller than kernel size");
}
void THNN_(SpatialSubSampling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias,
int kW, int kH,
int dW, int dH)
{
THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous");
real *weight_data = THTensor_(data)(weight);
real *bias_data = THTensor_(data)(bias);
real *output_data;
real *input_data;
int dimw = 2;
int dimh = 1;
int64_t nbatch = 1;
int64_t inputWidth;
int64_t inputHeight;
int64_t outputWidth;
int64_t outputHeight;
int nInputPlane = THTensor_(size)(weight,0);
int64_t k;
THNN_(SpatialSubSampling_shapeCheck)(input, NULL, weight, kW, kH);
if (input->nDimension == 4) {
nbatch = input->size[0];
dimw++;
dimh++;
}
inputWidth = input->size[dimw];
inputHeight = input->size[dimh];
outputWidth = (inputWidth - kW) / dW + 1;
outputHeight = (inputHeight - kH) / dH + 1;
if (input->nDimension == 3)
THTensor_(resize3d)(output, nInputPlane, outputHeight, outputWidth);
else
THTensor_(resize4d)(output, input->size[0], nInputPlane, outputHeight, outputWidth);
input = THTensor_(newContiguous)(input);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
#pragma omp parallel for private(k)
for(k = 0; k < nInputPlane; k++)
{
int64_t p;
for(p = 0; p < nbatch; p++)
{
int64_t xx, yy;
/* For all output pixels... */
real *ptr_output = output_data + p*nInputPlane*outputWidth*outputHeight + k*outputWidth*outputHeight;
/* Get the good mask for (k,i) (k out, i in) */
real the_weight = weight_data[k];
/* Initialize to the bias */
real z = bias_data[k];
int64_t i;
for(i = 0; i < outputWidth*outputHeight; i++)
ptr_output[i] = z;
for(yy = 0; yy < outputHeight; yy++)
{
for(xx = 0; xx < outputWidth; xx++)
{
/* Compute the mean of the input image... */
real *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW;
real sum = 0;
int64_t kx, ky;
for(ky = 0; ky < kH; ky++)
{
for(kx = 0; kx < kW; kx++)
sum += ptr_input[kx];
ptr_input += inputWidth; /* next input line */
}
/* Update output */
*ptr_output++ += the_weight*sum;
}
}
}
}
THTensor_(free)(input);
}
void THNN_(SpatialSubSampling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *weight,
int kW, int kH,
int dW, int dH)
{
THNN_(SpatialSubSampling_shapeCheck)(input, gradOutput, weight, kW, kH);
int dimw = 2;
int dimh = 1;
int64_t nbatch = 1;
int64_t inputWidth;
int64_t inputHeight;
int64_t outputWidth;
int64_t outputHeight;
int nInputPlane = THTensor_(size)(weight,0);
real *weight_data;
real *gradOutput_data;
real *gradInput_data;
int64_t k;
if (input->nDimension == 4) {
nbatch = input->size[0];
dimw++;
dimh++;
}
inputWidth = input->size[dimw];
inputHeight = input->size[dimh];
outputWidth = (inputWidth - kW) / dW + 1;
outputHeight = (inputHeight - kH) / dH + 1;
weight_data = THTensor_(data)(weight);
gradOutput = THTensor_(newContiguous)(gradOutput);
gradOutput_data = THTensor_(data)(gradOutput);
THTensor_(resizeAs)(gradInput, input);
gradInput_data = THTensor_(data)(gradInput);
#pragma omp parallel for private(k)
for(k = 0; k < nInputPlane; k++)
{
int64_t p;
for(p = 0; p < nbatch; p++)
{
real the_weight = weight_data[k];
real *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight;
int64_t xx, yy;
real* ptr_gi = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight;
int64_t i;
for(i=0; i<inputWidth*inputHeight; i++)
ptr_gi[i] = 0.0;
for(yy = 0; yy < outputHeight; yy++)
{
for(xx = 0; xx < outputWidth; xx++)
{
real *ptr_gradInput = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW;
real z = *ptr_gradOutput++ * the_weight;
int64_t kx, ky;
for(ky = 0; ky < kH; ky++)
{
for(kx = 0; kx < kW; kx++)
ptr_gradInput[kx] += z;
ptr_gradInput += inputWidth;
}
}
}
}
}
THTensor_(free)(gradOutput);
}
void THNN_(SpatialSubSampling_accGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
int kW, int kH,
int dW, int dH,
accreal scale_)
{
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THNN_(SpatialSubSampling_shapeCheck)(input, gradOutput, gradWeight, kW, kH);
int64_t nbatch = 1;
int64_t dimw = 2;
int64_t dimh = 1;
int64_t inputWidth;
int64_t inputHeight;
int64_t outputWidth;
int64_t outputHeight;
int nInputPlane = THTensor_(size)(gradWeight,0);
real *gradWeight_data;
real *gradBias_data;
real *gradOutput_data;
real *input_data;
int64_t k;
if (input->nDimension == 4) {
dimw++;
dimh++;
nbatch = input->size[0];
}
inputWidth = input->size[dimw];
inputHeight = input->size[dimh];
outputWidth = (inputWidth - kW) / dW + 1;
outputHeight = (inputHeight - kH) / dH + 1;
gradWeight_data = THTensor_(data)(gradWeight);
gradBias_data = THTensor_(data)(gradBias);
gradOutput = THTensor_(newContiguous)(gradOutput);
gradOutput_data = THTensor_(data)(gradOutput);
input = THTensor_(newContiguous)(input);
input_data = THTensor_(data)(input);
#pragma omp parallel for private(k)
for(k = 0; k < nInputPlane; k++)
{
int64_t p;
for(p = 0; p < nbatch; p++)
{
real *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight;
real sum;
int64_t xx, yy;
int64_t i;
sum = 0;
for(i = 0; i < outputWidth*outputHeight; i++)
sum += ptr_gradOutput[i];
gradBias_data[k] += scale*sum;
sum = 0;
for(yy = 0; yy < outputHeight; yy++)
{
for(xx = 0; xx < outputWidth; xx++)
{
real *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW;
real z = *ptr_gradOutput++;
int64_t kx, ky;
for(ky = 0; ky < kH; ky++)
{
for(kx = 0; kx < kW; kx++)
sum += z * ptr_input[kx];
ptr_input += inputWidth;
}
}
}
gradWeight_data[k] += scale*sum;
}
}
THTensor_(free)(input);
THTensor_(free)(gradOutput);
}
#endif
|
lister.c | /*
* `Restless reachability in temporal graphs: algebraic methods and applications`
*
* This experimental source code is supplied to accompany the
* aforementioned paper.
*
* The source code is configured for a gcc build to a native
* microarchitecture that must support the AVX2 and PCLMULQDQ
* instruction set extensions. Other builds are possible but
* require manual configuration of 'Makefile' and 'builds.h'.
*
* The source code is subject to the following license.
*
* The MIT License (MIT)
*
* Copyright (c) 2020 Anonymous authors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#include<stdio.h>
#include<stdlib.h>
#include<assert.h>
#include<time.h>
#include<sys/utsname.h>
#include<string.h>
#include<stdarg.h>
#include<assert.h>
#include<ctype.h>
#include<omp.h>
/************************************************************* Configuration. */
#define MAX_K 32
#define MAX_SHADES 32
#define PREFETCH_PAD 32
#define MAX_THREADS 128
#define UNDEFINED -1
#define MATH_INF ((index_t)0x3FFFFFFF)
#include"builds.h" // get build config
typedef long int index_t; // default to 64-bit indexing
#include"gf.h" // finite fields
#include"ffprng.h" // fast-forward pseudorandom number generator
#define MIN(x,y) (x)<(y) ? (x) : (y)
#define MAX(x,y) (x)>(y) ? (x) : (y)
/********************************************************************* Flags. */
/************************************************************* Common macros. */
/* Linked list navigation macros. */
#define pnlinknext(to,el) { (el)->next = (to)->next; (el)->prev = (to); (to)->next->prev = (el); (to)->next = (el); }
#define pnlinkprev(to,el) { (el)->prev = (to)->prev; (el)->next = (to); (to)->prev->next = (el); (to)->prev = (el); }
#define pnunlink(el) { (el)->next->prev = (el)->prev; (el)->prev->next = (el)->next; }
#define pnrelink(el) { (el)->next->prev = (el); (el)->prev->next = (el); }
/*********************************************************** Error reporting. */
#define ERROR(...) error(__FILE__,__LINE__,__func__,__VA_ARGS__);
static void error(const char *fn, int line, const char *func,
const char *format, ...)
{
va_list args;
va_start(args, format);
fprintf(stderr,
"ERROR [file = %s, line = %d]\n"
"%s: ",
fn,
line,
func);
vfprintf(stderr, format, args);
fprintf(stderr, "\n");
va_end(args);
abort();
}
/********************************************************* Get the host name. */
#define MAX_HOSTNAME 256
const char *sysdep_hostname(void)
{
static char hn[MAX_HOSTNAME];
struct utsname undata;
uname(&undata);
strcpy(hn, undata.nodename);
return hn;
}
/********************************************************* Available threads. */
index_t num_threads(void)
{
#ifdef BUILD_PARALLEL
return omp_get_max_threads();
#else
return 1;
#endif
}
/********************************************** Memory allocation & tracking. */
#define MALLOC(x) malloc_wrapper(x)
#define FREE(x) free_wrapper(x)
index_t malloc_balance = 0;
struct malloc_track_struct
{
void *p;
size_t size;
struct malloc_track_struct *prev;
struct malloc_track_struct *next;
};
typedef struct malloc_track_struct malloc_track_t;
malloc_track_t malloc_track_root;
size_t malloc_total = 0;
#define MEMTRACK_STACK_CAPACITY 256
size_t memtrack_stack[MEMTRACK_STACK_CAPACITY];
index_t memtrack_stack_top = -1;
void *malloc_wrapper(size_t size)
{
if(malloc_balance == 0) {
malloc_track_root.prev = &malloc_track_root;
malloc_track_root.next = &malloc_track_root;
}
void *p = malloc(size);
if(p == NULL)
ERROR("malloc fails");
malloc_balance++;
malloc_track_t *t = (malloc_track_t *) malloc(sizeof(malloc_track_t));
t->p = p;
t->size = size;
pnlinkprev(&malloc_track_root, t);
malloc_total += size;
for(index_t i = 0; i <= memtrack_stack_top; i++)
if(memtrack_stack[i] < malloc_total)
memtrack_stack[i] = malloc_total;
return p;
}
void free_wrapper(void *p)
{
malloc_track_t *t = malloc_track_root.next;
for(;
t != &malloc_track_root;
t = t->next) {
if(t->p == p)
break;
}
if(t == &malloc_track_root)
ERROR("FREE issued on a non-tracked pointer %p", p);
malloc_total -= t->size;
pnunlink(t);
free(t);
free(p);
malloc_balance--;
}
index_t *alloc_idxtab(index_t n)
{
index_t *t = (index_t *) MALLOC(sizeof(index_t)*n);
return t;
}
void push_memtrack(void)
{
assert(memtrack_stack_top + 1 < MEMTRACK_STACK_CAPACITY);
memtrack_stack[++memtrack_stack_top] = malloc_total;
}
size_t pop_memtrack(void)
{
assert(memtrack_stack_top >= 0);
return memtrack_stack[memtrack_stack_top--];
}
size_t current_mem(void)
{
return malloc_total;
}
double inGiB(size_t s)
{
return (double) s / (1 << 30);
}
void print_current_mem(void)
{
fprintf(stdout, "{curr: %.2lfGiB}", inGiB(current_mem()));
fflush(stdout);
}
void print_pop_memtrack(void)
{
fprintf(stdout, "{peak: %.2lfGiB}", inGiB(pop_memtrack()));
fflush(stdout);
}
/******************************************************** Timing subroutines. */
#define TIME_STACK_CAPACITY 256
double start_stack[TIME_STACK_CAPACITY];
index_t start_stack_top = -1;
void push_time(void)
{
assert(start_stack_top + 1 < TIME_STACK_CAPACITY);
start_stack[++start_stack_top] = omp_get_wtime();
}
double pop_time(void)
{
double wstop = omp_get_wtime();
assert(start_stack_top >= 0);
double wstart = start_stack[start_stack_top--];
return (double) (1000.0*(wstop-wstart));
}
/******************************************************************* Sorting. */
void shellsort(index_t n, index_t *a)
{
index_t h = 1;
index_t i;
for(i = n/3; h < i; h = 3*h+1)
;
do {
for(i = h; i < n; i++) {
index_t v = a[i];
index_t j = i;
do {
index_t t = a[j-h];
if(t <= v)
break;
a[j] = t;
j -= h;
} while(j >= h);
a[j] = v;
}
h /= 3;
} while(h > 0);
}
#define LEFT(x) (x<<1)
#define RIGHT(x) ((x<<1)+1)
#define PARENT(x) (x>>1)
void heapsort_indext(index_t n, index_t *a)
{
/* Shift index origin from 0 to 1 for convenience. */
a--;
/* Build heap */
for(index_t i = 2; i <= n; i++) {
index_t x = i;
while(x > 1) {
index_t y = PARENT(x);
if(a[x] <= a[y]) {
/* heap property ok */
break;
}
/* Exchange a[x] and a[y] to enforce heap property */
index_t t = a[x];
a[x] = a[y];
a[y] = t;
x = y;
}
}
/* Repeat delete max and insert */
for(index_t i = n; i > 1; i--) {
index_t t = a[i];
/* Delete max */
a[i] = a[1];
/* Insert t */
index_t x = 1;
index_t y, z;
while((y = LEFT(x)) < i) {
z = RIGHT(x);
if(z < i && a[y] < a[z]) {
index_t s = z;
z = y;
y = s;
}
/* Invariant: a[y] >= a[z] */
if(t >= a[y]) {
/* ok to insert here without violating heap property */
break;
}
/* Move a[y] up the heap */
a[x] = a[y];
x = y;
}
/* Insert here */
a[x] = t;
}
}
/*************************************************** Random numbers and such. */
index_t irand(void)
{
return (((index_t) rand())<<31)^((index_t) rand());
}
/***************************************************** (Parallel) prefix sum. */
index_t prefixsum(index_t n, index_t *a, index_t k)
{
#ifdef BUILD_PARALLEL
index_t s[MAX_THREADS];
index_t nt = num_threads();
assert(nt < MAX_THREADS);
index_t length = n;
index_t block_size = length/nt;
#pragma omp parallel for
for(index_t t = 0; t < nt; t++) {
index_t start = t*block_size;
index_t stop = (t == nt-1) ? length-1 : (start+block_size-1);
index_t tsum = (stop-start+1)*k;
for(index_t u = start; u <= stop; u++)
tsum += a[u];
s[t] = tsum;
}
index_t run = 0;
for(index_t t = 1; t <= nt; t++) {
index_t v = s[t-1];
s[t-1] = run;
run += v;
}
s[nt] = run;
#pragma omp parallel for
for(index_t t = 0; t < nt; t++) {
index_t start = t*block_size;
index_t stop = (t == nt-1) ? length-1 : (start+block_size-1);
index_t trun = s[t];
for(index_t u = start; u <= stop; u++) {
index_t tv = a[u];
a[u] = trun;
trun += tv + k;
}
assert(trun == s[t+1]);
}
#else
index_t run = 0;
for(index_t u = 0; u < n; u++) {
index_t tv = a[u];
a[u] = run;
run += tv + k;
}
#endif
return run;
}
/************************************************************* Parallel sum. */
index_t parallelsum(index_t n, index_t *a)
{
index_t sum = 0;
#ifdef BUILD_PARALLEL
index_t s[MAX_THREADS];
index_t nt = num_threads();
assert(nt < MAX_THREADS);
index_t length = n;
index_t block_size = length/nt;
#pragma omp parallel for
for(index_t t = 0; t < nt; t++) {
index_t start = t*block_size;
index_t stop = (t == nt-1) ? length-1 : (start+block_size-1);
index_t tsum = 0;
for(index_t u = start; u <= stop; u++)
tsum += a[u];
s[t] = tsum;
}
for(index_t t = 0; t < nt; t++)
sum += s[t];
#else
for(index_t i = 0; i < n; i++) {
sum += a[i];
}
#endif
return sum;
}
// count number of non-zero values in an array
index_t parallelcount(index_t n, index_t *a)
{
index_t total_cnt = 0;
#ifdef BUILD_PARALLEL
index_t nt = num_threads();
index_t block_size = n/nt;
index_t *cnt_nt = alloc_idxtab(nt);
#pragma omp parallel for
for(index_t th = 0; th <nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
index_t cnt = 0;
for(index_t i = start; i <= stop; i++)
cnt += (a[i] ? 1 : 0);
cnt_nt[th] = cnt;
}
for(index_t th = 0; th < nt; th++)
total_cnt += cnt_nt[th];
#else
for(index_t i = 0; i < n; i++)
total_cnt += (a[i] ? 1 : 0);
#endif
return total_cnt;
}
/************************ Search for an interval of values in a sorted array. */
index_t get_interval(index_t n, index_t *a,
index_t lo_val, index_t hi_val,
index_t *iv_start, index_t *iv_end)
{
assert(n >= 0);
if(n == 0) {
*iv_start = 0;
return 0;
}
assert(lo_val <= hi_val);
// find first element in interval (if any) with binary search
index_t lo = 0;
index_t hi = n-1;
// at or above lo, and at or below hi (if any)
while(lo < hi) {
index_t mid = (lo+hi)/2; // lo <= mid < hi
index_t v = a[mid];
if(hi_val < v) {
hi = mid-1; // at or below hi (if any)
} else {
if(v < lo_val)
lo = mid+1; // at or above lo (if any), lo <= hi
else
hi = mid; // at or below hi (exists)
}
// 0 <= lo <= n-1
}
if(a[lo] < lo_val || a[lo] > hi_val) {
// array contains no values in interval
if(a[lo] < lo_val) {
lo++;
assert(lo == n || a[lo+1] > hi_val);
} else {
assert(lo == 0 || a[lo-1] < lo_val);
}
*iv_start = lo;
*iv_end = hi;
return 0;
}
assert(lo_val <= a[lo] && a[lo] <= hi_val);
*iv_start = lo;
// find interval end (last index in interval) with binary search
lo = 0;
hi = n-1;
// last index (if any) is at or above lo, and at or below hi
while(lo < hi) {
index_t mid = (lo+hi+1)/2; // lo < mid <= hi
index_t v = a[mid];
if(hi_val < v) {
hi = mid-1; // at or below hi, lo <= hi
} else {
if(v < lo_val)
lo = mid+1; // at or above lo
else
lo = mid; // at or above lo, lo <= hi
}
}
assert(lo == hi);
*iv_end = lo; // lo == hi
return 1+*iv_end-*iv_start; // return cut size
}
/******************************************************************** Stack. */
typedef struct stack_node {
index_t u;
index_t l;
index_t t;
} stack_node_t;
typedef struct stack {
index_t size; // size of stack
index_t n; // number of elements
stack_node_t *a;
}stk_t;
stk_t * stack_alloc(index_t size)
{
stk_t *s = (stk_t *) malloc(sizeof(stk_t));
s->size = size;
s->n = 0;
s->a = (stack_node_t *) malloc(s->size*sizeof(stack_node_t));
#ifdef DEBUG
for(index_t i = 0; i < s->n; i++) {
stack_node_t *e = s->a + i;
e->u = UNDEFINED;
e->l = UNDEFINED;
e->t = UNDEFINED;
}
#endif
return s;
}
void stack_free(stk_t *s)
{
free(s->a);
free(s);
}
void stack_push(stk_t *s, stack_node_t *e_in)
{
assert(s->n < s->size);
stack_node_t *e = s->a + s->n;
e->u = e_in->u;
e->l = e_in->l;
e->t = e_in->t;
s->n++;
}
void stack_pop(stk_t *s, stack_node_t *e_out)
{
assert(s->n > 0);
s->n--;
stack_node_t *e = s->a + s->n;
e_out->u = e->u;
e_out->l = e->l;
e_out->t = e->t;
#ifdef DEBUG
e->u = UNDEFINED;
e->l = UNDEFINED;
e->t = UNDEFINED;
#endif
}
void stack_top(stk_t *s, stack_node_t *e_out)
{
assert(s->n >= 0);
stack_node_t *e = s->a + s->n-1;
e_out->u = e->u;
e_out->l = e->l;
e_out->t = e->t;
}
void stack_empty(stk_t *s)
{
s->n = 0;
}
void stack_get_vertices(stk_t *s, index_t *uu)
{
for(index_t i = 0; i < s->n; i++) {
stack_node_t *e = s->a + i;
uu[i] = e->u;
}
}
void stack_get_timestamps(stk_t *s, index_t *tt)
{
for(index_t i = 0; i < s->n; i++) {
stack_node_t *e = s->a + i;
tt[i] = e->t;
}
}
#ifdef DEBUG
void print_stack(stk_t *s)
{
fprintf(stdout, "-----------------------------------------------\n");
fprintf(stdout, "print stack\n");
fprintf(stdout, "-----------------------------------------------\n");
fprintf(stdout, "size: %ld\n", s->size);
fprintf(stdout, "n: %ld\n", s->n);
fprintf(stdout, "a: ");
for(index_t i = 0; i < s->n; i++) {
stack_node_t *e = s->a + i;
fprintf(stdout, "[%ld, %ld, %ld]%s", e->u, e->l, e->t, (i==s->n-1)?"\n":" ");
}
fprintf(stdout, "-----------------------------------------------\n");
}
void print_stacknode(stack_node_t *e)
{
fprintf(stdout, "print stack-node: [%ld, %ld, %ld]\n", e->u, e->l, e->t);
}
#endif
/****************************************************************** Sieving. */
long long int num_muls;
long long int trans_bytes;
#define SHADE_LINES ((MAX_SHADES+SCALARS_IN_LINE-1)/SCALARS_IN_LINE)
typedef unsigned int shade_map_t;
void constrained_sieve_pre(index_t n,
index_t k,
index_t g,
index_t pfx,
index_t num_shades,
shade_map_t *d_s,
ffprng_scalar_t seed,
line_array_t *d_x)
{
assert(g == SCALARS_IN_LINE);
assert(num_shades <= MAX_SHADES);
line_t wdj[SHADE_LINES*MAX_K];
ffprng_t base;
FFPRNG_INIT(base, seed);
for(index_t j = 0; j < k; j++) {
for(index_t dl = 0; dl < SHADE_LINES; dl++) {
index_t jsdl = j*SHADE_LINES+dl;
LINE_SET_ZERO(wdj[jsdl]);
for(index_t a = 0; a < SCALARS_IN_LINE; a++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, base);
scalar_t rs = (scalar_t) rnd;
LINE_STORE_SCALAR(wdj[jsdl], a, rs); // W: [cached]
}
}
}
index_t nt = num_threads();
index_t length = n;
index_t block_size = length/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
ffprng_t gen;
index_t start = t*block_size;
index_t stop = (t == nt-1) ? length-1 : (start+block_size-1);
FFPRNG_FWD(gen, SHADE_LINES*SCALARS_IN_LINE*start, base);
line_t vd[SHADE_LINES];
for(index_t j = 0; j < SHADE_LINES; j++) {
LINE_SET_ZERO(vd[j]); // to cure an annoying compiler warning
}
for(index_t u = start; u <= stop; u++) {
scalar_t uu[MAX_K];
shade_map_t shades_u = d_s[u]; // R: n shade_map_t
for(index_t dl = 0; dl < SHADE_LINES; dl++) {
for(index_t a = 0; a < SCALARS_IN_LINE; a++) {
index_t d = dl*SCALARS_IN_LINE + a;
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
scalar_t rs = (scalar_t) rnd;
rs = rs & (-((scalar_t)((shades_u >> d)&(d < num_shades))));
LINE_STORE_SCALAR(vd[dl], a, rs); // W: [cached]
}
}
for(index_t j = 0; j < k; j++) {
scalar_t uj;
SCALAR_SET_ZERO(uj);
for(index_t dl = 0; dl < SHADE_LINES; dl++) {
index_t jsdl = j*SHADE_LINES+dl;
line_t ln;
LINE_MUL(ln, wdj[jsdl], vd[dl]); // R: [cached]
// MUL: n*SHADE_LINES*g*k
scalar_t lns;
LINE_SUM(lns, ln);
SCALAR_ADD(uj, uj, lns);
}
uu[j] = uj;
}
line_t ln;
LINE_SET_ZERO(ln);
for(index_t a = 0; a < SCALARS_IN_LINE; a++) {
index_t ap = a < (1L << k) ? pfx+a : 0;
scalar_t xua;
SCALAR_SET_ZERO(xua);
for(index_t j = 0; j < k; j++) {
scalar_t z_uj = uu[j]; // R: [cached]
z_uj = z_uj & (-((scalar_t)(((ap) >> j)&1)));
SCALAR_ADD(xua, xua, z_uj);
}
LINE_STORE_SCALAR(ln, a, xua);
}
LINE_STORE(d_x, u, ln); // W: ng scalar_t
}
}
num_muls += n*SHADE_LINES*g*k;
trans_bytes += sizeof(scalar_t)*n*g + sizeof(shade_map_t)*n;
}
/***************************************************************** Line sum. */
scalar_t line_sum(index_t l,
index_t g,
line_array_t *d_s)
{
index_t nt = num_threads();
index_t block_size = l/nt;
assert(nt < MAX_THREADS);
scalar_t ts[MAX_THREADS];
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
SCALAR_SET_ZERO(ts[t]);
index_t start = t*block_size;
index_t stop = (t == nt-1) ? l-1 : (start+block_size-1);
line_t ln;
line_t acc;
LINE_SET_ZERO(acc);
for(index_t i = start; i <= stop; i++) {
LINE_LOAD(ln, d_s, i); // R: lg scalar_t
LINE_ADD(acc, acc, ln);
}
scalar_t lsum;
LINE_SUM(lsum, acc);
ts[t] = lsum;
}
scalar_t sum;
SCALAR_SET_ZERO(sum);
for(index_t t = 0; t < nt; t++) {
SCALAR_ADD(sum, sum, ts[t]);
}
trans_bytes += sizeof(scalar_t)*l*g;
return sum;
}
void vertex_acc(index_t l, // n
index_t g, // g
index_t stride, // k
line_array_t *d_s,
scalar_t *out)
{
index_t nt = num_threads();
index_t block_size = l/nt;
assert(nt < MAX_THREADS);
//scalar_t ts[MAX_THREADS];
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
//SCALAR_SET_ZERO(ts[t]);
index_t start = th*block_size;
index_t stop = (th == nt-1) ? l-1 : (start+block_size-1);
line_t ln;
scalar_t lsum;
for(index_t i = start; i <= stop; i++) {
LINE_LOAD(ln, d_s, i); // R: lg scalar_t
LINE_SUM(lsum, ln);
out[i] ^= lsum; // R: scalar_t, W: scalar_t
}
}
//scalar_t sum;
//SCALAR_SET_ZERO(sum);
//for(index_t t = 0; t < nt; t++) {
// SCALAR_ADD(sum, sum, ts[t]);
//}
trans_bytes += sizeof(scalar_t)*(l*g+2);
}
scalar_t line_sum_stride(index_t l,
index_t g,
index_t stride,
line_array_t *d_s)
{
index_t nt = num_threads();
index_t block_size = l/nt;
assert(nt < MAX_THREADS);
scalar_t ts[MAX_THREADS];
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
SCALAR_SET_ZERO(ts[th]);
index_t start = th*block_size;
index_t stop = (th == nt-1) ? l-1 : (start+block_size-1);
line_t ln;
line_t acc;
LINE_SET_ZERO(acc);
for(index_t i = start; i <= stop; i++) {
index_t ii = i*stride;
LINE_LOAD(ln, d_s, ii); // R: lg scalar_t
LINE_ADD(acc, acc, ln);
}
scalar_t lsum;
LINE_SUM(lsum, acc);
ts[th] = lsum;
}
scalar_t sum;
SCALAR_SET_ZERO(sum);
for(index_t th = 0; th < nt; th++) {
SCALAR_ADD(sum, sum, ts[th]);
}
trans_bytes += sizeof(scalar_t)*l*g;
return sum;
}
void vertex_acc_stride(index_t l,
index_t g,
index_t stride,
line_array_t *d_s,
scalar_t *out)
{
index_t nt = num_threads();
index_t block_size = l/nt;
assert(nt < MAX_THREADS);
//scalar_t ts[MAX_THREADS];
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
//SCALAR_SET_ZERO(ts[th]);
index_t start = th*block_size;
index_t stop = (th == nt-1) ? l-1 : (start+block_size-1);
line_t ln;
scalar_t lsum;
for(index_t i = start; i <= stop; i++) {
index_t ii = i*stride;
LINE_LOAD(ln, d_s, ii); // R: lg scalar_t
LINE_SUM(lsum, ln);
out[i] ^= lsum; // R: scalar_t, W: scalar_t
}
}
//scalar_t sum;
//SCALAR_SET_ZERO(sum);
//for(index_t th = 0; th < nt; th++) {
// SCALAR_ADD(sum, sum, ts[th]);
//}
trans_bytes += sizeof(scalar_t)*(l*g+2);
}
/***************************************** k-temppath generating function. */
#ifdef DEBUG
#define PRINT_LINE(source) \
{ \
scalar_t *s = (scalar_t *)&source; \
for(index_t i = 0; i < SCALARS_IN_LINE; i++) { \
fprintf(stdout, SCALAR_FORMAT_STRING"%s", \
(long) s[i], \
i==SCALARS_IN_LINE-1 ? "\n":" "); \
} \
}
#endif
#if BUILD_GENF == 2
#define TEMP_PATH_LINE_IDX2(n, k, tmax, u, l, i) (((u)*(tmax+1))+(i))
#ifdef DEBUG
void print_ds(index_t n,
index_t tmax,
line_array_t *d_s)
{
for(index_t u = 0; u < n; u++) {
fprintf(stdout, "--------------------------------------------------\n");
fprintf(stdout, "u: %ld\n", u+1);
fprintf(stdout, "--------------------------------------------------\n");
for(index_t i = 0; i <= tmax; i++) {
fprintf(stdout, "%ld: ", i);
index_t i_uli = TEMP_PATH_LINE_IDX2(n, k, tmax, 1, i, u);
line_t p_uli;
LINE_LOAD(p_uli, d_s, i_uli);
PRINT_LINE(p_uli);
scalar_t sum;
LINE_SUM(sum, p_uli);
fprintf(stdout, "line sum: "SCALAR_FORMAT_STRING"\n",sum);
}
}
}
#endif
scalar_t vloc_finegrain(index_t n,
index_t g,
index_t k,
index_t tmax,
line_array_t *d_s,
scalar_t *out)
{
index_t nt = num_threads();
index_t block_size = n/nt;
assert(nt < MAX_THREADS);
scalar_t tsum[MAX_THREADS];
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
SCALAR_SET_ZERO(tsum[th]);
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
line_t ln;
scalar_t lsum;
scalar_t acc;
SCALAR_SET_ZERO(acc);
for(index_t u = start; u <= stop; u++) {
index_t i_ul0 = TEMP_PATH_LINE_IDX2(n, k, tmax, u, k, 0);
index_t i_u0 = (u*(tmax+1));
for(index_t i = 0; i <= tmax; i++) {
index_t i_uli = i_ul0 + i;
index_t i_ui = i_u0 + i;
LINE_LOAD(ln, d_s, i_uli);
LINE_SUM(lsum, ln);
out[i_ui] ^= lsum;
acc ^= lsum;
}
}
tsum[th] = acc;
}
scalar_t sum;
SCALAR_SET_ZERO(sum);
for(index_t th = 0; th < nt; th++)
SCALAR_ADD(sum, sum, tsum[th]);
//TODO: update bandwidth computation
trans_bytes += LINE_ARRAY_SIZE((tmax+1)*n*g);
return sum;
}
void init_ds(index_t n,
index_t k,
index_t tmax,
line_array_t *d_s)
{
line_t p_zero;
LINE_SET_ZERO(p_zero);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t i_u10 = TEMP_PATH_LINE_IDX2(n, k, tmax, u, 1, 0);
for(index_t i= 0; i <= tmax; i++) {
index_t i_u1i = i_u10 + i;
LINE_STORE(d_s, i_u1i, p_zero);
}
}
}
void k_temp_path_round(index_t n,
index_t m,
index_t k,
index_t tmax,
index_t rtmax,
index_t t,
index_t g,
index_t l,
index_t *d_pos,
index_t *d_adj,
index_t yl_seed,
index_t *rtime,
line_array_t *d_x,
line_array_t *d_l1,
line_array_t *d_l)
{
assert(g == SCALARS_IN_LINE);
index_t nt = num_threads();
index_t length = n;
index_t block_size = length/nt;
index_t i = t;
ffprng_t y_base;
FFPRNG_INIT(y_base, yl_seed);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? length-1 : (start+block_size-1);
ffprng_t y_gen;
// forward the psuedo-random number generator
index_t y_pos = (d_pos[start] - start) * rtmax;
FFPRNG_FWD(y_gen, y_pos, y_base);
for(index_t u = start; u <= stop; u++) {
index_t pu = d_pos[n*(i-1)+u];
index_t deg = d_adj[pu];
line_t p_uli;
LINE_SET_ZERO(p_uli);
for(index_t d = 1; d <= deg; d++) {
index_t v = d_adj[pu+d];
index_t dv = (i > rtime[v]) ? rtime[v] : i-1; // i-j > 0
index_t i_vl1idv = TEMP_PATH_LINE_IDX2(n, k, tmax, v, l-1, i-dv);
for(index_t j = 0; j <= dv; j++) {
line_t p_vl1ij;
index_t i_vl1ij = i_vl1idv + j;
LINE_LOAD(p_vl1ij, d_l1, i_vl1ij);
#ifdef BUILD_PREFETCH
// prefetch next line P_{v,l-1,i-j+1}
index_t i_vl1ij1 = i_vl1idv + (j==dv) ? dv : j+1;
LINE_PREFETCH(d_l1, i_vl1ij1);
#endif
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, y_gen);
scalar_t y_uvlij = (scalar_t) rnd;
line_t sy;
LINE_MUL_SCALAR(sy, p_vl1ij, y_uvlij);
LINE_ADD(p_uli, p_uli, sy);
}
}
line_t xu;
LINE_LOAD(xu, d_x, u);
LINE_MUL(p_uli, p_uli, xu);
index_t i_uli = TEMP_PATH_LINE_IDX2(n, k, tmax, u, l, i);
LINE_STORE(d_l, i_uli, p_uli); // W: ng scalar_t
}
}
//TODO: update bandwidth computation
// total edges at time `i`
index_t m_i = d_pos[n*(i-1) + n-1] - d_pos[n*(i-1)] - (n-1) +
d_adj[d_pos[n*(i-1)+(n-1)]];
trans_bytes += ((2*n*tmax)+m_i)*sizeof(index_t) + (2*n+m_i)*g*sizeof(scalar_t);
num_muls += (n*g+m_i);
}
scalar_t k_temp_path(index_t n,
index_t m,
index_t k,
index_t tmax,
index_t rtmax,
index_t g,
index_t vert_loc,
index_t *d_pos,
index_t *d_adj,
ffprng_scalar_t y_seed,
index_t *rtime,
line_array_t *d_x,
scalar_t *vsum)
{
assert( g == SCALARS_IN_LINE);
assert( k >= 1);
line_array_t *d_l1 = (line_array_t *) MALLOC(LINE_ARRAY_SIZE((tmax+1)*n*g));
line_array_t *d_l = (line_array_t *) MALLOC(LINE_ARRAY_SIZE((tmax+1)*n*g));
init_ds(n, 1, tmax, d_l);
// initialise: l = 1
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t i_u10 = TEMP_PATH_LINE_IDX2(n, k, tmax, u, 1, 0);
for(index_t i = 0; i <= tmax; i++) {
line_t xu;
LINE_LOAD(xu, d_x, u);
index_t i_u1i = i_u10 + i;
LINE_STORE(d_l1, i_u1i, xu);
}
}
srand(y_seed);
for(index_t l = 2; l <= k; l++) {
for(index_t i = l-1; i <= tmax; i++) {
ffprng_scalar_t yl_seed = irand(); // new seed for each l
k_temp_path_round(n, m, k, tmax, rtmax, i, g, l,
d_pos, d_adj, yl_seed, rtime, d_x,
d_l1, d_l);
}
// swap and initialise
line_array_t *d_temp = d_l1;
d_l1 = d_l;
d_l = d_temp;
init_ds(n, 1, tmax, d_l);
}
// sum up
//index_t ii = TEMP_PATH_LINE_IDX2(n, k, tmax, 1, tmax, 0);
scalar_t sum = vloc_finegrain(n, g, k, tmax, d_l1, vsum);
// free memory
FREE(d_l1);
FREE(d_l);
return sum;
}
#endif
/************************************************************ The oracle(s). */
index_t temppath_oracle(index_t n,
index_t k,
index_t tmax,
index_t rtmax,
index_t *h_pos,
index_t *h_adj,
index_t num_shades,
index_t *rtime,
shade_map_t *h_s,
ffprng_scalar_t y_seed,
ffprng_scalar_t z_seed,
index_t vert_loc,
scalar_t *master_vsum)
{
push_memtrack();
assert(k >= 1 && k < 31);
//index_t m = h_pos[n-1]+h_adj[h_pos[n-1]]+1-n;
index_t m = h_pos[n*(tmax-1)+n-1]+h_adj[h_pos[n*(tmax-1)+n-1]]+1-(n*tmax);
index_t sum_size = 1 << k;
index_t g = SCALARS_IN_LINE;
index_t outer = (sum_size + g-1) / g;
// number of iterations for outer loop
num_muls = 0;
trans_bytes = 0;
index_t *d_pos = h_pos;
index_t *d_adj = h_adj;
line_array_t *d_x = (line_array_t *) MALLOC(LINE_ARRAY_SIZE(n*g));
/* Run the work & time it. */
push_time();
scalar_t master_sum;
SCALAR_SET_ZERO(master_sum);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t i_u0 = (u*(tmax+1));
for(index_t i = 0; i <= tmax; i++) {
index_t i_ui = i_u0 + i;
SCALAR_SET_ZERO(master_vsum[i_ui]);
}
}
for(index_t out = 0; out < outer; out++) {
// Eq. (3)
constrained_sieve_pre(n, k, g, g*out, num_shades, h_s, z_seed, d_x);
#define GENF_TYPE "restless_path_genf"
// Eq. (4)
scalar_t sum = k_temp_path(n, m, k, tmax, rtmax, g, vert_loc,
d_pos, d_adj, y_seed, rtime, d_x,
master_vsum);
SCALAR_ADD(master_sum, master_sum, sum);
}
double time = pop_time();
//double trans_rate = trans_bytes / (time/1000.0);
//double mul_rate = num_muls / time;
FREE(d_x);
fprintf(stdout,
SCALAR_FORMAT_STRING
" %.2lf ms"
//" [%.2lfGiB/s, %.2lfGHz]"
" %d",
(long) master_sum,
time,
//trans_rate/((double) (1 << 30)),
//mul_rate/((double) 1e6),
master_sum != 0);
fprintf(stdout, " ");
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fflush(stdout);
return master_sum != 0;
}
/************************************************* Rudimentary graph builder. */
typedef struct
{
index_t is_directed;
index_t num_vertices;
index_t num_edges;
index_t max_time;
index_t max_resttime;
index_t edge_capacity;
index_t *edges;
index_t *rest_time;
} graph_t;
static index_t *enlarge(index_t m, index_t m_was, index_t *was)
{
assert(m >= 0 && m_was >= 0);
index_t *a = (index_t *) MALLOC(sizeof(index_t)*m);
index_t i;
if(was != (void *) 0) {
for(i = 0; i < m_was; i++) {
a[i] = was[i];
}
FREE(was);
}
return a;
}
graph_t *graph_alloc(index_t n)
{
assert(n >= 0);
graph_t *g = (graph_t *) MALLOC(sizeof(graph_t));
g->is_directed = 0; // default: undirected graph
g->num_vertices = n;
g->num_edges = 0;
g->edge_capacity = 100;
g->edges = enlarge(3*g->edge_capacity, 0, (void *) 0);
g->rest_time = (index_t *) MALLOC(sizeof(index_t)*n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
g->rest_time[u] = UNDEFINED;
return g;
}
void graph_free(graph_t *g)
{
FREE(g->edges);
FREE(g->rest_time);
FREE(g);
}
void graph_add_edge(graph_t *g, index_t u, index_t v, index_t t)
{
assert(u >= 0 &&
v >= 0 &&
u < g->num_vertices &&
v < g->num_vertices);
assert(t>=0);
//assert(t>=0 && t < g->max_time);
if(g->num_edges == g->edge_capacity) {
g->edges = enlarge(6*g->edge_capacity, 3*g->edge_capacity, g->edges);
g->edge_capacity *= 2;
}
assert(g->num_edges < g->edge_capacity);
index_t *e = g->edges + 3*g->num_edges;
e[0] = u;
e[1] = v;
e[2] = t;
g->num_edges++;
}
index_t *graph_edgebuf(graph_t *g, index_t cap)
{
g->edges = enlarge(3*g->edge_capacity+3*cap, 3*g->edge_capacity, g->edges);
index_t *e = g->edges + 3*g->num_edges;
g->edge_capacity += cap;
g->num_edges += cap;
return e;
}
//void graph_set_color(graph_t *g, index_t u, index_t c)
//{
// assert(u >= 0 && u < g->num_vertices && c >= 0);
// g->colors[u] = c;
//}
void graph_set_is_directed(graph_t *g, index_t is_dir)
{
assert(is_dir == 0 || is_dir == 1);
g->is_directed = is_dir;
}
void graph_set_max_time(graph_t *g, index_t tmax)
{
assert(tmax > 0);
g->max_time = tmax;
}
void graph_set_resttime(graph_t *g, index_t u, index_t rt)
{
assert(u >= 0 && u < g->num_vertices && rt >= 0 && rt <= g->max_resttime);
g->rest_time[u] = rt;
}
void graph_set_max_resttime(graph_t *g, index_t rtmax)
{
assert(rtmax > 0);
g->max_resttime = rtmax;
}
#ifdef DEBUG
void print_graph(graph_t *g)
{
index_t n = g->num_vertices;
index_t m = g->num_edges;
index_t tmax = g->max_time;
index_t rtmax = g->max_resttime;
index_t is_dir = g->is_directed;
fprintf(stdout, "p motif %ld %ld %ld %ld %ld\n", n, m, tmax, rtmax, is_dir);
index_t *e = g->edges;
for(index_t i = 0; i < 3*m; i+=3) {
fprintf(stdout, "e %ld %ld %ld\n",
e[i]+1, e[i+1]+1, e[i+2]+1);
}
index_t *c = g->colors;
for(index_t i = 0; i < n; i++)
fprintf(stdout, "n %ld %ld\n", i+1, c[i]==UNDEFINED ? c[i] : c[i]+1);
index_t *rt = g->rest_time;
for(index_t i = 0; i < n; i++)
fprintf(stdout, "r %ld %ld\n", i+1, rt[i]);
}
#endif
/************************************* Basic motif query processing routines. */
struct temppathq_struct
{
index_t is_stub;
index_t n;
index_t k;
index_t tmax;
index_t *pos;
index_t *adj;
index_t nl;
index_t *l;
index_t ns;
shade_map_t *shade;
index_t rtmax;
index_t *rtime;
index_t vert_loc;
scalar_t *vsum;
};
typedef struct temppathq_struct temppathq_t;
void adjsort(index_t n, index_t *pos, index_t *adj)
{
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t pu = pos[u];
index_t deg = adj[pu];
heapsort_indext(deg, adj + pu + 1);
}
}
void temppathq_free(temppathq_t *q)
{
if(!q->is_stub) {
FREE(q->pos);
FREE(q->adj);
FREE(q->l);
FREE(q->shade);
FREE(q->rtime);
FREE(q->vsum);
}
FREE(q);
}
index_t temppathq_execute(temppathq_t *q)
{
if(q->is_stub)
return 0;
return temppath_oracle(q->n, q->k, q->tmax, q->rtmax, q->pos, q->adj, q->ns, q->rtime, q->shade,
irand(), irand(), q->vert_loc, q->vsum);
}
#ifdef DEBUG
void print_temppathq(temppathq_t *q)
{
index_t n = q->n;
index_t k = q->k;
index_t tmax = q->tmax;
index_t *pos = q->pos;
index_t *adj = q->adj;
fprintf(stdout, "-----------------------------------------------\n");
fprintf(stdout, "printing temppathq\n");
fprintf(stdout, "is_stub = %ld\n", q->is_stub);
fprintf(stdout, "n = %ld\n", n);
fprintf(stdout, "k = %ld\n", k);
fprintf(stdout, "tmax = %ld\n", tmax);
fprintf(stdout, "pos\n");
fprintf(stdout, "----\n ");
for(index_t i = 0; i < n*tmax; i++) {
fprintf(stdout, "%ld%s", pos[i], i%n==n-1 ? "\n ":" ");
}
fprintf(stdout, "adjacency list:\n");
fprintf(stdout, "---------------\n");
for(index_t t = 0; t < tmax; t++) {
fprintf(stdout, "t: %ld\n", t+1);
fprintf(stdout, "---------------\n");
index_t *pos_t = pos + n*t;
for(index_t u = 0; u < n; u++) {
index_t pu = pos_t[u];
index_t nu = adj[pu];
index_t *adj_u = adj + pu + 1;
fprintf(stdout, "%4ld:", u+1);
for(index_t i = 0; i < nu; i++) {
fprintf(stdout, " %4ld", adj_u[i]+1);
}
fprintf(stdout, "\n");
}
}
index_t nl = q->nl;
index_t *l = q->l;
fprintf(stdout, "nl = %ld\n", nl);
fprintf(stdout, "l:\n");
for(index_t i = 0; i < nl; i++)
fprintf(stdout, "%8ld : %8ld\n", nl, l[i]);
index_t ns = q ->ns;
shade_map_t *shade = q->shade;
fprintf(stdout, "ns : %ld\n", ns);
fprintf(stdout, "shades:\n");
for(index_t u = 0; u < n; u++)
fprintf(stdout, "%10ld : 0x%08X\n", u+1, shade[u]);
index_t rtmax = q->rtmax;
index_t *rtime = q->rtime;
fprintf(stdout, "rtmax: %ld", rtmax);
fprintf(stdout, "rest time:\n");
for(index_t u = 0; u < n; u++)
fprintf(stdout, "%10ld : %8ld\n", u+1, rtime[u]);
scalar_t *vsum = q->vsum;
fprintf(stdout, "vert_loc: %ld\n", q->vert_loc);
fprintf(stdout, "vsum:\n");
for(index_t u = 0; u < n; u++)
fprintf(stdout, "%10ld : "SCALAR_FORMAT_STRING"\n", u+1, vsum[u]);
fprintf(stdout, "-----------------------------------------------\n");
}
void print_array(const char *name, index_t n, index_t *a, index_t offset)
{
fprintf(stdout, "%s (%ld):", name, n);
for(index_t i = 0; i < n; i++) {
fprintf(stdout, " %ld", a[i] == -1 ? -1 : a[i]+offset);
}
fprintf(stdout, "\n");
}
#endif
/******************************************************** Root query builder. */
// Query builder for directed graphs
temppathq_t *build_temppathq_dir(graph_t *g)
{
push_memtrack();
index_t n = g->num_vertices;
index_t m = g->num_edges;
index_t tmax = g->max_time;
index_t *pos = alloc_idxtab(n*tmax);
index_t *adj = alloc_idxtab(n*tmax+2*m);
index_t *rtime = (index_t *) MALLOC(sizeof(index_t)*n);
//index_t ns = k;
shade_map_t *shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*n);
temppathq_t *root = (temppathq_t *) MALLOC(sizeof(temppathq_t));
root->is_stub = 0;
root->n = g->num_vertices;
//root->k = k;
root->tmax = tmax;
root->pos = pos;
root->adj = adj;
root->nl = 0;
root->l = (index_t *) MALLOC(sizeof(index_t)*root->nl);
//root->ns = ns;
root->shade = shade;
root->rtime = rtime;
root->vert_loc = 1;
root->vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*(root->n)*(root->tmax+1));
//assert(tmax >= k-1);
push_time();
fprintf(stdout, "build query: ");
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n*tmax; u++)
pos[u] = 0;
double time = pop_time();
fprintf(stdout, "[zero: %.2lf ms] ", time);
fflush(stdout);
push_time();
index_t *e = g->edges;
#ifdef BUILD_PARALLEL
// Parallel occurrence count
// -- each thread is responsible for a group of bins,
// all threads scan the entire list of edges
index_t nt = num_threads();
index_t block_size = n/nt;
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
for(index_t j = 0; j < 3*m; j+=3) {
//index_t u = e[j];
index_t v = e[j+1];
index_t t = e[j+2];
index_t *pos_t = (pos + (n*t));
//if(start <= u && u <= stop) {
// // I am responsible for u, record adjacency to u
// pos_t[u]++;
//}
if(start <= v && v <= stop) {
// I am responsible for v, record adjacency to v
pos_t[v]++;
}
}
}
#else
for(index_t j = 0; j < 3*m; j+=3) {
//index_t u = e[j];
index_t v = e[j+1];
index_t t = e[j+2];
index_t *pos_t = pos + n*t;
//pos_t[u]++;
pos_t[v]++;
}
#endif
index_t run = prefixsum(n*tmax, pos, 1);
assert(run == (n*tmax+m));
time = pop_time();
fprintf(stdout, "[pos: %.2lf ms] ", time);
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n*tmax; u++)
adj[pos[u]] = 0;
e = g->edges;
#ifdef BUILD_PARALLEL
// Parallel aggregation to bins
// -- each thread is responsible for a group of bins,
// all threads scan the entire list of edges
nt = num_threads();
block_size = n/nt;
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j+0];
index_t v = e[j+1];
index_t t = e[j+2];
//if(start <= u && u <= stop) {
// // I am responsible for u, record adjacency to u
// index_t pu = pos[n*t+u];
// adj[pu + 1 + adj[pu]++] = v;
//}
if(start <= v && v <= stop) {
// I am responsible for v, record adjacency to v
index_t pv = pos[n*t+v];
adj[pv + 1 + adj[pv]++] = u;
}
}
}
#else
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j+0];
index_t v = e[j+1];
index_t t = e[j+2];
//index_t pu = pos[n*t+u];
index_t pv = pos[n*t+v];
//adj[pu + 1 + adj[pu]++] = v;
adj[pv + 1 + adj[pv]++] = u;
}
#endif
time = pop_time();
fprintf(stdout, "[adj: %.2lf ms] ", time);
fflush(stdout);
//print_temppathq(root);
push_time();
adjsort(n*tmax, pos, adj);
time = pop_time();
fprintf(stdout, "[adjsort: %.2lf ms] ", time);
fflush(stdout);
// copy shades
//push_time();
//#ifdef BUILD_PARALLEL
//#pragma omp parallel for
//#endif
//for(index_t u = 0; u < n; u++) {
// shade_map_t s = 0;
// for(index_t j = 0; j < k; j++)
// if(colors[u] == kk[j])
// s |= 1UL << j;
// shade[u] = s;
// //fprintf(stdout, "%4ld: 0x%08X\n", u, shade[u]);
//}
//time = pop_time();
//fprintf(stdout, "[shade: %.2lf ms] ", time);
//fflush(stdout);
// copy resting time
push_time();
index_t *rest_time = g->rest_time;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
rtime[u] = rest_time[u];
time = pop_time();
fprintf(stdout, "[rtime: %.2lf ms] ", time);
fflush(stdout);
time = pop_time();
fprintf(stdout, "done. [%.2lf ms] ", time);
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
return root;
}
// Query builder for undirected graphs
//
temppathq_t *build_temppathq(graph_t *g)
{
push_memtrack();
index_t n = g->num_vertices;
index_t m = g->num_edges;
index_t tmax = g->max_time;
index_t *pos = alloc_idxtab(n*tmax);
index_t *adj = alloc_idxtab(n*tmax+2*m);
index_t *rtime = (index_t *) MALLOC(sizeof(index_t)*n);
//index_t ns = k;
shade_map_t *shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*n);
temppathq_t *root = (temppathq_t *) MALLOC(sizeof(temppathq_t));
root->is_stub = 0;
root->n = g->num_vertices;
//root->k = k;
root->tmax = tmax;
root->pos = pos;
root->adj = adj;
root->nl = 0;
root->l = (index_t *) MALLOC(sizeof(index_t)*root->nl);
//root->ns = ns;
root->shade = shade;
root->rtime = rtime;
root->vert_loc = 0;
root->vsum = (scalar_t *) MALLOC(sizeof(index_t)*(root->n)*(root->tmax+1));
//assert(tmax >= k-1);
push_time();
fprintf(stdout, "build query: ");
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n*tmax; u++)
pos[u] = 0;
double time = pop_time();
fprintf(stdout, "[zero: %.2lf ms] ", time);
fflush(stdout);
push_time();
index_t *e = g->edges;
#ifdef BUILD_PARALLEL
// Parallel occurrence count
// -- each thread is responsible for a group of bins,
// all threads scan the entire list of edges
index_t nt = num_threads();
index_t block_size = n/nt;
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j];
index_t v = e[j+1];
index_t t = e[j+2];
index_t *pos_t = (pos + (n*t));
if(start <= u && u <= stop) {
// I am responsible for u, record adjacency to u
pos_t[u]++;
}
if(start <= v && v <= stop) {
// I am responsible for v, record adjacency to v
pos_t[v]++;
}
}
}
#else
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j];
index_t v = e[j+1];
index_t t = e[j+2];
index_t *pos_t = pos + n*t;
pos_t[u]++;
pos_t[v]++;
}
#endif
index_t run = prefixsum(n*tmax, pos, 1);
assert(run == (n*tmax+2*m));
time = pop_time();
fprintf(stdout, "[pos: %.2lf ms] ", time);
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n*tmax; u++) {
adj[pos[u]] = 0;
}
e = g->edges;
#ifdef BUILD_PARALLEL
// Parallel aggregation to bins
// -- each thread is responsible for a group of bins,
// all threads scan the entire list of edges
nt = num_threads();
block_size = n/nt;
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j+0];
index_t v = e[j+1];
index_t t = e[j+2];
if(start <= u && u <= stop) {
// I am responsible for u, record adjacency to u
index_t pu = pos[n*t+u];
adj[pu + 1 + adj[pu]++] = v;
}
if(start <= v && v <= stop) {
// I am responsible for v, record adjacency to v
index_t pv = pos[n*t+v];
adj[pv + 1 + adj[pv]++] = u;
}
}
}
#else
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j+0];
index_t v = e[j+1];
index_t t = e[j+2];
index_t pu = pos[n*t+u];
index_t pv = pos[n*t+v];
adj[pu + 1 + adj[pu]++] = v;
adj[pv + 1 + adj[pv]++] = u;
}
#endif
/*
// TODO: works only for single source
// update this part later
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < tmax; t++) {
index_t *pos_t = (pos + (n*t));
for(index_t i = 0; i < num_srcs; i++) {
index_t s = sources[i];
index_t ps = pos_t[s];
adj[ps] = 0;
}
}
*/
time = pop_time();
fprintf(stdout, "[adj: %.2lf ms] ", time);
fflush(stdout);
push_time();
adjsort(n*tmax, pos, adj);
time = pop_time();
fprintf(stdout, "[adjsort: %.2lf ms] ", time);
fflush(stdout);
// copy shades
//push_time();
//#ifdef BUILD_PARALLEL
//#pragma omp parallel for
//#endif
// for(index_t u = 0; u < n; u++) {
// shade_map_t s = 0;
// for(index_t j = 0; j < k; j++)
// if(colors[u] == kk[j])
// s |= 1UL << j;
// shade[u] = s;
// fprintf(stdout, "%4ld: 0x%08X\n", u, shade[u]);
// //
// }
// time = pop_time();
// fprintf(stdout, "[shade: %.2lf ms] ", time);
// fflush(stdout);
// copy resting time
push_time();
index_t *rest_time = g->rest_time;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
rtime[u] = rest_time[u];
time = pop_time();
fprintf(stdout, "[rtime: %.2lf ms] ", time);
fflush(stdout);
time = pop_time();
fprintf(stdout, "done. [%.2lf ms] ", time);
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
//print_temppathq(root);
return root;
}
void update_sources_adj(index_t n, index_t tmax, index_t num_srcs,
index_t *sources, index_t *pos, index_t *adj)
{
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < tmax; t++) {
index_t *pos_t = (pos + (n*t));
for(index_t i = 0; i < num_srcs; i++) {
index_t s = sources[i];
index_t ps = pos_t[s];
adj[ps] = 0;
}
}
}
void update_colors(index_t n, index_t k, index_t num_srcs, index_t *sources,
index_t num_seps, index_t *separators, index_t *color)
{
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
color[u] = 2;
// currently handling single source
for(index_t i = 0; i < num_srcs; i++) {
index_t u = sources[i];
color[u] = 1;
}
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t i = 0; i < num_seps; i++) {
index_t u = separators[i];
color[u] = 3;
}
}
void get_motif_colors(index_t k, index_t *kk)
{
kk[0] = 1;
// not worth parallelising
for(index_t i = 1; i < k; i++)
kk[i] = 2;
}
void temppathq_update_shades(index_t k, index_t *kk, index_t *color, temppathq_t *root)
{
shade_map_t *shade = root->shade;
index_t n = root->n;
root->k = k;
root->ns = k;
//update shades
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
shade_map_t s = 0;
for(index_t j = 0; j < k; j++)
if(color[u] == kk[j])
s |= 1UL << j;
shade[u] = s;
//fprintf(stdout, "%4ld: 0x%08X\n", u, shade[u]);
}
}
/****************************************************** Input reader (ASCII). */
void skipws(FILE *in)
{
int c;
do {
c = fgetc(in);
if(c == '#') {
do {
c = fgetc(in);
} while(c != EOF && c != '\n');
}
} while(c != EOF && isspace(c));
if(c != EOF)
ungetc(c, in);
}
#define CMD_NOP 0
#define CMD_RUN_ORACLE 6
#define CMD_VLOC 7
#define CMD_VLOC_FINEGRAIN 8
#define CMD_EXHAUST_SEARCH 9
char *cmd_legend[] = { "no operation", "run oracle", "localised", "localised (fine-grained)", "exhaustive search"};
void reader_ascii(FILE *in,
graph_t **g_out,
index_t *num_srcs_out, index_t **sources_out,
index_t *num_seps_out, index_t **separators_out)
{
push_time();
push_memtrack();
index_t n = 0;
index_t m = 0;
index_t tmax = 0;
index_t rtmax = 0;
index_t is_dir = 0;
graph_t *g = (graph_t *) 0;
index_t num_srcs = 0;
index_t *sources = (index_t *) 0;
index_t num_seps = 0;
index_t *separators = (index_t *) 0;
index_t i, j, t, rt;
skipws(in);
while(!feof(in)) {
skipws(in);
int c = fgetc(in);
switch(c) {
case 'p':
if(g != (graph_t *) 0)
ERROR("duplicate parameter line");
skipws(in);
if(fscanf(in, "motif %ld %ld %ld %ld %ld", &n, &m, &tmax, &rtmax, &is_dir) != 5)
ERROR("invalid parameter line");
if(n <= 0 || m < 0 || tmax < 1 || rtmax < 1) {
ERROR("invalid input parameters (n = %ld, m = %ld, tmax = %ld, rtmax = %ld)",
n, m, tmax, rtmax);
}
g = graph_alloc(n);
graph_set_is_directed(g, is_dir);
graph_set_max_time(g, tmax);
graph_set_max_resttime(g, rtmax);
break;
case 'e':
if(g == (graph_t *) 0)
ERROR("parameter line must be given before edges");
skipws(in);
if(fscanf(in, "%ld %ld %ld", &i, &j, &t) != 3)
ERROR("invalid edge line");
graph_add_edge(g, i-1, j-1, t-1);
break;
case 'r':
if(g == (graph_t *) 0)
ERROR("parameter line must be given before motif");
skipws(in);
if(fscanf(in, "%ld %ld", &i, &rt) != 2)
ERROR("invalid rest time line");
if(i < 1 || i > n || rt < 1 || rt > rtmax)
ERROR("invalid rest time line (u = %ld, rt = %ld with n = %ld and rtmax = %ld)",
i, rt, n, rtmax);
graph_set_resttime(g, i-1, rt);
break;
case 's':
if(g == (graph_t *) 0)
ERROR("parameter line must be given before sources");
skipws(in);
if(fscanf(in, "%ld", &num_srcs) != 1)
ERROR("invalid sources line");
if(num_srcs < 1 || num_srcs > n)
ERROR("invalid sources line (num-sources = %ld with n = %d)", num_srcs, n);
if(num_srcs > 1)
ERROR("current implementation only support single source (num-sources = %ld)", num_srcs);
sources = alloc_idxtab(num_srcs);
for(index_t i = 0; i < num_srcs; i++) {
index_t s;
skipws(in);
if(fscanf(in, "%ld", &s) != 1)
ERROR("error parsing sources line");
if(s < 1 || s > n)
ERROR("invalid sources line (s = %ld)", s);
sources[i] = s-1;
}
break;
case 't':
if(g == (graph_t *) 0)
ERROR("parameter line must be given before separators");
skipws(in);
if(fscanf(in, "%ld", &num_seps) != 1)
ERROR("invalid separators line");
if(num_seps < 1 || num_seps > n)
ERROR("invalid separators line (num-separators = %ld with n = %d)", num_seps, n);
separators = alloc_idxtab(num_seps);
for(index_t i = 0; i < num_seps; i++) {
index_t s;
skipws(in);
if(fscanf(in, "%ld", &s) != 1)
ERROR("error parsing sources line");
if(s < 1 || s > n)
ERROR("invalid separator (s = %ld)", s);
separators[i] = s-1;
}
break;
case EOF:
break;
default:
ERROR("parse error");
}
}
if(g == (graph_t *) 0)
ERROR("no graph given in input");
double time = pop_time();
fprintf(stdout,
"input: n = %ld, m = %ld, t = %ld, rt = %ld [%.2lf ms] ",
g->num_vertices,
g->num_edges,
g->max_time,
g->max_resttime,
time);
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fprintf(stdout, "sources [%ld]: ", num_srcs);
for(index_t i = 0; i < num_srcs; i++)
fprintf(stdout, " %ld", sources[i]+1);
fprintf(stdout, "\n");
fprintf(stdout, "separators [%ld]: ", num_seps);
for(index_t i = 0; i < num_seps; i++)
fprintf(stdout, "%ld", separators[i]+1);
fprintf(stdout, "\n");
*g_out = g;
*num_srcs_out = num_srcs;
*sources_out = sources;
*num_seps_out = num_seps;
*separators_out = separators;
}
/************************************************************ Temporal DFS. */
index_t temp_dfs(index_t n, index_t k, index_t tmax, index_t *pos,
index_t *adj, index_t *in_stack, index_t *rtime,
index_t *reach_time, stk_t *s)
{
// reached depth 'k'
if(s->n == k) // TODO: fix this to s->n == k
return 1;
stack_node_t e;
stack_top(s, &e);
index_t u = e.u;
index_t l = e.l;
index_t min_t = e.t;
index_t max_t = MIN(tmax, e.t + rtime[u]);
for(index_t t = min_t; t <= max_t; t++) {
index_t *pos_t = pos + (t-1)*n;
index_t pu = pos_t[u];
index_t nu = adj[pu];
if(nu == 0) continue;
index_t *adj_u = adj + pu;
for(index_t i = 1; i <= nu; i++) {
index_t v = adj_u[i];
if(in_stack[v]) continue;
stack_node_t e;
e.u = v;
e.l = l+1;
e.t = t;
stack_push(s, &e);
in_stack[v] = 1;
reach_time[v] = MIN(reach_time[v], t);
temp_dfs(n, k, tmax, pos, adj, in_stack, rtime, reach_time, s);
stack_pop(s, &e);
in_stack[v] = 0;
}
}
return 0; // not found
}
void exhaustive_search(temppathq_t *root, index_t src, index_t *reach_time)
{
push_time();
index_t n = root->n;
index_t k = root->k;
index_t tmax = root->tmax;
index_t *pos = root->pos;
index_t *adj = root->adj;
index_t *rtime = root->rtime;
index_t *in_stack = alloc_idxtab(n);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
in_stack[u] = 0;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
reach_time[u] = MATH_INF;
rtime[src] = tmax;
stk_t *s = stack_alloc(k);
stack_node_t es;
es.u = src;
es.l = 0;
es.t = 1;
stack_push(s, &es);
in_stack[src] = 1;
double preproc_time = pop_time();
push_time();
temp_dfs(n, k, tmax, pos, adj, in_stack, rtime, reach_time, s);
double dfs_time = pop_time();
FREE(in_stack);
stack_free(s);
fprintf(stdout, " [%.2lfms %.2lfms %.2lfms]", preproc_time, dfs_time, pop_time());
}
/**************************************************** get minimum timestamp. */
void vloc_min_timestamp(index_t n, index_t k, index_t *vloc_time,
index_t *vloc_min_time)
{
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t *vloc_time_u = vloc_time + (u*(k-1));
index_t min_time = MATH_INF;
for(index_t l = 2; l <= k; l++) {
index_t cur_time = vloc_time_u[l-2];
if(cur_time < min_time)
min_time = cur_time;
}
vloc_min_time[u] = min_time;
}
}
void vloc_table_out(FILE *out, index_t n, index_t k, index_t *vloc_time,
index_t *vloc_min_time, char *out_type)
{
if(!strcmp(out_type, "csv")) {
for(index_t u = 0; u < n; u++) {
for(index_t l = 2; l <=k; l++) {
index_t t_vloc = vloc_time[u*(k-1) + (l-2)];
fprintf(out, "%ld;", t_vloc==MATH_INF?UNDEFINED:t_vloc);
}
fprintf(out, "%ld\n", vloc_min_time[u]==MATH_INF?UNDEFINED:vloc_min_time[u]);
fflush(out);
}
} else {
for(index_t u = 0; u < n; u++) {
fprintf(out, "%5ld:", u+1);
for(index_t l = 2; l <=k; l++) {
//index_t t_vloc = vloc_time[(l-2)*n + u];
index_t t_vloc = vloc_time[u*(k-1) + (l-2)];
fprintf(out, " %6ld", t_vloc==MATH_INF?UNDEFINED:t_vloc);
}
fprintf(out, " %6ld\n", vloc_min_time[u]==MATH_INF?UNDEFINED:vloc_min_time[u]);
fflush(out);
}
}
}
/****************************************************** program entry point. */
int main(int argc, char **argv)
{
GF_PRECOMPUTE;
push_time();
push_memtrack();
index_t arg_cmd = CMD_NOP;
index_t flag_help = 0;
index_t flag_test = 0;
index_t have_seed = 0;
index_t have_input = 0;
index_t have_output = 0;
index_t have_karg = 0;
index_t k_arg = 0;
index_t k = 0;
index_t seed = 123456789;
char *filename = (char *) 0;
char *filename_out = (char *) 0;
for(index_t f = 1; f < argc; f++) {
if(argv[f][0] == '-') {
if(!strcmp(argv[f], "-h") || !strcmp(argv[f], "-help")) {
flag_help = 1;
break;
}
if(!strcmp(argv[f], "-oracle")) {
arg_cmd = CMD_RUN_ORACLE;
}
if(!strcmp(argv[f], "-vloc")) {
arg_cmd = CMD_VLOC;
}
if(!strcmp(argv[f], "-vloc-finegrain")) {
arg_cmd = CMD_VLOC_FINEGRAIN;
}
if(!strcmp(argv[f], "-baseline")) {
arg_cmd = CMD_EXHAUST_SEARCH;
}
if(!strcmp(argv[f], "-seed")) {
if(f == argc - 1)
ERROR("random seed missing from command line");
seed = atol(argv[++f]);
have_seed = 1;
}
if(!strcmp(argv[f], "-k")) {
if(f == argc -1)
ERROR("path length missing from command line");
k_arg = atol(argv[++f]);
have_karg = 1;
}
if(!strcmp(argv[f], "-in")) {
if(f == argc - 1)
ERROR("input file missing from command line");
have_input = 1;
filename = argv[++f];
}
if(!strcmp(argv[f], "-out")) {
if(f == argc - 1)
ERROR("output file missing from command line");
have_output = 1;
filename_out = argv[++f];
}
if(!strcmp(argv[f], "-test")) {
flag_test = 1;
}
}
}
fprintf(stdout, "invoked as:");
for(index_t f = 0; f < argc; f++)
fprintf(stdout, " %s", argv[f]);
fprintf(stdout, "\n");
if(flag_help) {
fprintf(stdout,
"usage: %s -pre <value> -optimal -<command-type> -seed <value> -in <input-file> -<file-type> \n"
" %s -h/help\n"
"\n"
" -<command-type> : oracle - decide existence of a solution\n"
" vloc - single run of vertex localisation\n"
" vloc-finegrain - fine-grained evaluation of the oracle\n"
" baseline - exhaustive-search algorithm\n"
" -k <value> : integer value in range 1 to n-1\n"
" -seed <value> : integer value in range 1 to 2^32 -1\n"
" default value `%ld`\n"
" -in <input-file> : path to input file, `stdin` by default \n"
" -out <output-file> : path to output file, `reachability.out` by default \n"
" -min : reports minimum reachable time for each vertex to `output-file`\n"
" -h or -help : help\n"
"\n"
, argv[0], argv[0], seed);
return 0;
}
if(have_seed == 0) {
fprintf(stdout,
"no random seed given, defaulting to %ld\n", seed);
}
fprintf(stdout, "random seed = %ld\n", seed);
FILE *in = stdin;
if(have_input) {
in = fopen(filename, "r");
if(in == NULL) {
ERROR("unable to open file '%s'", filename);
} else {
fprintf(stdout, "no input file specified, defaulting to stdin\n");
}
fflush(stdout);
}
FILE *out = stdout;
if(have_output) {
out = fopen(filename_out, "w");
if(out == NULL)
ERROR("unable to open file '%s'", filename_out);
} else {
out = fopen("reachability.out", "w");
fprintf(stdout, "no output file specified, defaulting to `reachability.out`\n");
}
fflush(stdout);
if(have_karg) {
k = k_arg;
fprintf(stdout, "path length specified in command line, changing to `k = %ld`\n", k);
} else {
k = 2;
fprintf(stdout, "no path length specified, defaulting to `k = %ld`\n", k);
}
fflush(stdout);
// initilize random number generator
srand(seed);
index_t num_srcs;
index_t num_seps;
graph_t *g = (graph_t *) 0;
index_t *kk = (index_t *) 0;
index_t *sources = (index_t *) 0;
index_t *separators = (index_t *) 0;
index_t *color = (index_t *) 0;
index_t cmd = arg_cmd; // by default execute command in input stream
// read input graph : current implementation supports only ascii inputs
reader_ascii(in, &g, &num_srcs, &sources, &num_seps, &separators);
if(have_input) fclose(in); //close file-descriptor
// build root query
temppathq_t *root = (temppathq_t *) 0;
if(g->is_directed) {
root = build_temppathq_dir(g);
} else {
root = build_temppathq(g);
}
graph_free(g); // free graph
if(arg_cmd != CMD_EXHAUST_SEARCH) {
// update adjacency list
update_sources_adj(root->n, root->tmax, num_srcs, sources, root->pos, root->adj);
// update vertex colors
color = alloc_idxtab(root->n);
update_colors(root->n, k, num_srcs, sources, num_seps, separators, color);
}
push_time();
// execute command
switch(cmd) {
case CMD_NOP:
{
// no operation
temppathq_free(root);
break;
}
case CMD_RUN_ORACLE:
{
// --- run oracle ---
fprintf(stdout, "oracle [temppath]: ");
fflush(stdout);
if(temppathq_execute(root))
fprintf(stdout, " -- true\n");
else
fprintf(stdout, " -- false\n");
temppathq_free(root);
}
break;
case CMD_VLOC:
{
// --- run oracle ---
fprintf(stdout, "oracle [temppath]: ");
fflush(stdout);
kk = alloc_idxtab(k);
get_motif_colors(k, kk);
temppathq_update_shades(k, kk, color, root);
if(temppathq_execute(root))
fprintf(stdout, " -- true\n");
else
fprintf(stdout, " -- false\n");
scalar_t *vsum = root->vsum;
index_t n = root->n;
index_t tmax = root->tmax;
index_t *vloc_time = (index_t *) MALLOC(sizeof(index_t)*n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
vloc_time[u] = UNDEFINED;
index_t i_u0 = (u*(tmax+1));
for(index_t i = 0; i <= tmax; i++) {
index_t i_ui = i_u0 + i;
if(vsum[i_ui]) {
vloc_time[u] = i;
break;
}
}
}
fprintf(stdout, "time:\n");
for(index_t u = 0; u < n; u++) {
if(vloc_time[u] != UNDEFINED)
fprintf(stdout,"%10ld: %4ld\n", u+1, vloc_time[u]);
}
fflush(stdout);
FREE(vloc_time);
temppathq_free(root);
}
break;
case CMD_VLOC_FINEGRAIN:
{
index_t n = root->n;
//index_t k = root->k;
index_t tmax = root->tmax;
index_t *vloc_time = (index_t *) MALLOC(sizeof(index_t)*n*(k-1));
kk = alloc_idxtab(k);
for(index_t l = 2; l <= k; l++) {
push_time();
push_time(); // time update shades
get_motif_colors(l, kk);
root->k = l;
temppathq_update_shades(l, kk, color, root);
fprintf(stdout, "finegrained-oracle [%ld, shade:%.2fms]: ", l, pop_time());
push_time(); // run oracle and time it
//execute oracle
if(temppathq_execute(root)) {
fprintf(stdout, " [%.2lf ms]-- true", pop_time());
} else {
fprintf(stdout, " [%.2lf ms]-- false", pop_time());
}
push_time();
scalar_t *vsum = root->vsum;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
vloc_time[u*(k-1) + l-2] = MATH_INF;
index_t i_u0 = (u*(tmax+1));
for(index_t i = 0; i <= tmax; i++) {
index_t i_ui = i_u0 + i;
if(vsum[i_ui]) {
vloc_time[u*(k-1) + l-2] = i;
break;
}
}
}
fprintf(stdout, " [%.2lfms, %.2lfms]\n", pop_time(), pop_time());
fflush(stdout);
}
index_t *vloc_min_time = alloc_idxtab(n);
vloc_min_timestamp(n, k, vloc_time, vloc_min_time);
if(flag_test) {
for(index_t u = 0; u < n; u++) {
fprintf(out, "%ld\n", vloc_min_time[u]==MATH_INF?UNDEFINED:vloc_min_time[u]);
}
fflush(out);
} else {
if(have_output) {
vloc_table_out(out, n, k, vloc_time, vloc_min_time, "csv");
} else {
vloc_table_out(out, n, k, vloc_time, vloc_min_time, "default");
}
}
FREE(vloc_time);
FREE(vloc_min_time);
temppathq_free(root);
}
break;
case CMD_EXHAUST_SEARCH:
{
push_time();
index_t n = root->n;
index_t *reach_time = alloc_idxtab(n);
kk = alloc_idxtab(k);
get_motif_colors(k, kk);
root->k = k;
fprintf(stdout, "exhaustive-search [%ld]:", k);
exhaustive_search(root, sources[0], reach_time);
push_time();
if(have_output) {
for(index_t u = 0; u < n; u++) {
fprintf(out, "%ld\n", reach_time[u]==MATH_INF?UNDEFINED:reach_time[u]);
}
fflush(out);
} else {
for(index_t u = 0; u < n; u++) {
fprintf(out, "%6ld\n", reach_time[u]==MATH_INF?UNDEFINED:reach_time[u]);
}
fflush(out);
}
// free memory
FREE(reach_time);
temppathq_free(root);
fprintf(stdout, " [%.2lfms %.2lfms]\n", pop_time(), pop_time());
fflush(stdout);
}
break;
default:
assert(0);
break;
}
if(kk != (index_t *) 0) { FREE(kk); }
if(sources != (index_t *) 0) { FREE(sources); }
if(separators != (index_t *) 0) { FREE(separators); }
if(color != (index_t *) 0) { FREE(color); }
if(have_output) fclose(out); //clode file descriptor
double cmd_time = pop_time();
double time = pop_time();
fprintf(stdout, "command done [ %.2lf ms %.2lf ms]\n",
cmd_time, time);
//if(input_cmd != CMD_NOP)
// FREE(cmd_args);
//time = pop_time();
fprintf(stdout, "grand total [%.2lf ms] ", time);
print_pop_memtrack();
fprintf(stdout, "\n");
fprintf(stdout, "host: %s\n", sysdep_hostname());
fprintf(stdout,
"build: %s, %s, %s, %ld x %s\n",
#ifdef BUILD_PARALLEL
"multithreaded",
#else
"single thread",
#endif
#ifdef BUILD_PREFETCH
"prefetch",
#else
"no prefetch",
#endif
GENF_TYPE,
LIMBS_IN_LINE,
LIMB_TYPE);
fprintf(stdout,
"compiler: gcc %d.%d.%d\n",
__GNUC__,
__GNUC_MINOR__,
__GNUC_PATCHLEVEL__);
fflush(stdout);
assert(malloc_balance == 0);
assert(memtrack_stack_top < 0);
assert(start_stack_top < 0);
return 0;
}
|
GB_unaryop__ainv_int8_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int8_uint16
// op(A') function: GB_tran__ainv_int8_uint16
// C type: int8_t
// A type: uint16_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int8_uint16
(
int8_t *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int8_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rnd.c | #include "rnd.h"
static const float saub = FLT_MAX / 16;
static const float caub = FLT_MAX / 16;
static const double daub = DBL_MAX / 16;
static const double zaub = DBL_MAX / 16;
static void gen_rand8(const size_t n, uint8_t r[static restrict 1])
{
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,r)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < n; i += (size_t)2u) {
const uint16_t w = uwrand();
r[i] = (uint8_t)w;
const size_t j = i + (size_t)1u;
if (j < n)
r[j] = (uint8_t)(w >> 8u);
}
}
static void gen_rand16(const size_t n, uint16_t r[static restrict 1])
{
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,r)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < n; ++i)
r[i] = uwrand();
}
static void gen_rand32(const size_t n, uint32_t r[static restrict 1])
{
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,r)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < n; ++i)
r[i] = udrand();
}
static void gen_rand64(const size_t n, uint64_t r[static restrict 1])
{
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,r)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < n; ++i)
r[i] = uqrand();
}
fint gen_rand_(const size_t n[static restrict 1], const size_t s[static restrict 1], void *restrict r)
{
if (!*n)
return 0;
if (!*s)
return -2;
if (!r)
return -3;
const uintptr_t p = (uintptr_t)r;
unsigned a = 1u;
do {
if (p & (uintptr_t)a)
break;
a <<= 1u;
} while (a <= 4u);
for (size_t b = ((*n) * (*s)); a && b; a >>= 1u) {
const size_t e = b / a;
if (e) {
switch (a) {
case 8u:
gen_rand64(e, (uint64_t*)r);
r = ((uint64_t*)r + e);
break;
case 4u:
gen_rand32(e, (uint32_t*)r);
r = ((uint32_t*)r + e);
break;
case 2u:
gen_rand16(e, (uint16_t*)r);
r = ((uint16_t*)r + e);
break;
case 1u:
gen_rand8(e, (uint8_t*)r);
r = ((uint8_t*)r + e);
break;
default: /* should never happen */
return -1;
}
b -= (e * a);
}
}
return 0;
}
void gensfrand_(const size_t n[static restrict 1], const float aub[static restrict 1], float r[static restrict 1])
{
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,aub,r)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < *n; ++i)
r[i] = sfrand(*aub);
}
void gendfrand_(const size_t n[static restrict 1], const double aub[static restrict 1], double r[static restrict 1])
{
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,aub,r)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < *n; ++i)
r[i] = dfrand(*aub);
}
/* f = c^2 * (l1 + l2 * t^2) */
/* g = c^2 * (l1 * t^2 + l2) */
/* h = c^2 * exp(-alpha * I) * t * (l1 - l2) */
void ssym2rand_(const size_t n[static restrict 1], float l1[static restrict 1], float l2[static restrict 1], float f[static restrict 1], float g[static restrict 1], float h[static restrict 1])
{
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,saub,l1,l2,f,g,h)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < *n; ++i) {
wide w1, w2, t, c;
w1 = l1[i] = sfrand(saub);
w2 = l2[i] = sfrand(saub);
wo2rand(&t, &c);
h[i] = (float)((t * (w1 - w2)) / c);
t *= t;
f[i] = (float)(fmaw(w2, t, w1) / c);
g[i] = (float)(fmaw(w1, t, w2) / c);
}
}
void dsym2rand_(const size_t n[static restrict 1], double l1[static restrict 1], double l2[static restrict 1], double f[static restrict 1], double g[static restrict 1], double h[static restrict 1])
{
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,daub,l1,l2,f,g,h)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < *n; ++i) {
wide w1, w2, t, c;
w1 = l1[i] = dfrand(daub);
w2 = l2[i] = dfrand(daub);
wo2rand(&t, &c);
h[i] = (double)((t * (w1 - w2)) / c);
t *= t;
f[i] = (double)(fmaw(w2, t, w1) / c);
g[i] = (double)(fmaw(w1, t, w2) / c);
}
}
void cher2rand_(const size_t n[static restrict 1], float l1[static restrict 1], float l2[static restrict 1], float f[static restrict 1], float g[static restrict 1], float hr[static restrict 1], float hi[static restrict 1], float hs[static restrict 1])
{
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,caub,l1,l2,f,g,hr,hi,hs)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < *n; ++i) {
wide t, c, r, j;
const wide w1 = l1[i] = sfrand(caub);
const wide w2 = l2[i] = sfrand(caub);
const double m = wu2rand(&t, &c, &r, &j);
const wide h = (t * (w1 - w2)) / c;
hr[i] = (float)(r * h);
hi[i] = (float)(j * h);
t *= t;
f[i] = (float)(fmaw(w2, t, w1) / c);
g[i] = (float)(fmaw(w1, t, w2) / c);
hs[i] = (m ? -(float)h : (float)h);
}
}
void zher2rand_(const size_t n[static restrict 1], double l1[static restrict 1], double l2[static restrict 1], double f[static restrict 1], double g[static restrict 1], double hr[static restrict 1], double hi[static restrict 1], double hd[static restrict 1])
{
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,zaub,l1,l2,f,g,hr,hi,hd)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < *n; ++i) {
wide t, c, r, j;
const wide w1 = l1[i] = dfrand(zaub);
const wide w2 = l2[i] = dfrand(zaub);
const bool m = wu2rand(&t, &c, &r, &j);
const wide h = (t * (w1 - w2)) / c;
hr[i] = (double)(r * h);
hi[i] = (double)(j * h);
t *= t;
f[i] = (double)(fmaw(w2, t, w1) / c);
g[i] = (double)(fmaw(w1, t, w2) / c);
hd[i] = (m ? -(double)h : (double)h);
}
}
|
64.h | /*
This file is part of Primer Pooler (c) Silas S. Brown. For Wen.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/* Anything in sixty four dot h calls sixty four bit funcs
and 128.h is auto-generated: rplace w.128 throughout
(similarly for 32.h for 32-bit architectures).
These files should be included ONLY by bit-common.h.
The general interface is in all-primers.h.
*/
typedef struct { bit64 AorT, GorT, valid; } Primer64;
typedef struct { bit64 MaybeA, MaybeC, MaybeG, MaybeT; } DegeneratePrimer64; /* more than one possibility for each base */
typedef struct {
union {
DegeneratePrimer64 D;
Primer64 notD;
} p;
int isD;
} MaybeDegeneratePrimer64;
static MaybeDegeneratePrimer64 wrapPrimer64(Primer64 p) {
MaybeDegeneratePrimer64 r;
r.p.notD = p; r.isD = 0; return r;
}
static MaybeDegeneratePrimer64 wrapDegeneratePrimer64(DegeneratePrimer64 p) {
MaybeDegeneratePrimer64 r;
r.p.D = p; r.isD = 1; return r;
}
static inline DegeneratePrimer64 upgradeToDegenerate64(MaybeDegeneratePrimer64 p) {
if(p.isD) return p.p.D;
DegeneratePrimer64 d;
Primer64 notD = p.p.notD;
d.MaybeA = notD.valid & notD.AorT & ~notD.GorT;
d.MaybeC = notD.valid & ~(notD.AorT | notD.GorT);
d.MaybeG = notD.valid & ~notD.AorT & notD.GorT;
d.MaybeT = notD.valid & notD.AorT & notD.GorT;
return d;
}
static inline bit64 DegenerateValid64(DegeneratePrimer64 p) {
/* returns the valid bits of p (we could store this separately, but it's redundant, and might be better pay-off to keep things small and cacheable) */
return p.MaybeA | p.MaybeC | p.MaybeG | p.MaybeT;
}
static Primer64 parsePrimer64(const char *ACGT) {
Primer64 r={0,0,0};
while(1) {
char l=fast_toUpper(*ACGT++);
if(!l) return r;
if(strchr("AGCT",l)) {
r.AorT = (r.AorT<<1) | (l=='A'||l=='T');
r.GorT = (r.GorT<<1) | (l=='G'||l=='T');
r.valid = (r.valid<<1) | 1;
} else reportUnrecognisedBase(l);
}
}
static DegeneratePrimer64 parseDegeneratePrimer64(const char *ABC) {
DegeneratePrimer64 r={0,0,0,0};
while(1) {
char l=fast_toUpper(*ABC++);
if(!l) return r;
const char *combo = strchr(degenerateCombos,l);
if(combo) {
int c=combo-degenerateCombos + 1;
r.MaybeA = (r.MaybeA<<1) | ((c&8)!=0);
r.MaybeC = (r.MaybeC<<1) | ((c&4)!=0);
r.MaybeG = (r.MaybeG<<1) | ((c&2)!=0);
r.MaybeT = (r.MaybeT<<1) | ((c&1)!=0);
} else reportUnrecognisedBase(l);
}
}
static MaybeDegeneratePrimer64 parseMaybeDegeneratePrimer64(const char *ABC) {
size_t l=strcspn(ABC,"BDHKMNRSVWYbdhkmnrsvwy");
if(ABC[l]) return wrapDegeneratePrimer64(parseDegeneratePrimer64(ABC));
else return wrapPrimer64(parsePrimer64(ABC));
}
static inline void PrimerComplement64(Primer64 *p) {
// A->T, T->A, C->G, G->C. So AorT stays, GorT flipped.
p->GorT = ~(p->GorT);
}
static inline void PrimerComplement64D(DegeneratePrimer64 *p) {
bit64 tmp=p->MaybeA; p->MaybeA=p->MaybeT; p->MaybeT=tmp;
tmp=p->MaybeG; p->MaybeG=p->MaybeC; p->MaybeC=tmp;
}
static inline void PrimerComplement64MaybeD(MaybeDegeneratePrimer64 *p) {
if(p->isD) PrimerComplement64D(&(p->p.D));
else PrimerComplement64(&(p->p.notD));
}
static int NumPossibilities64D_32bases(DegeneratePrimer64 d) {
/* number of possible primers this degenerate primer might be equal to (assumes aligned right) */
int r = 1, sR, poss;
for(sR=0; sR<32; sR++) { /* 32, not sixty four etc, because we want the return value to work with Make2bitFrom64D */
poss = ((d.MaybeA >> sR) & 1) + ((d.MaybeC >> sR) & 1)
+ ((d.MaybeG >> sR) & 1) + ((d.MaybeT >> sR) & 1);
if (poss) r *= poss; else break;
}
return r;
}
static int NumPossibilities64MaybeD_32bases(MaybeDegeneratePrimer64 d) {
if(d.isD) return NumPossibilities64D_32bases(d.p.D);
else return 1;
}
static int Make2bitFrom64D(DegeneratePrimer64 d,ULL *out,ULL *outValid,int possNo,int nPoss) {
/* note: ULL across all 3 .h variants
- more than 32 bases here will be truncated
(return value is 1 if it got truncated).
TODO: we could make a "not degenerate" version of
this which just shifts bits around, but I'd be very
surprised if this function is anywhere near the top
of a profile trace, so for now I'll leave it as you
have to call upgradeToDegenerate64 from the Maybe.
Note: for ease of binary search (see amplicons.c),
bases are shifted in from the LEFT (not from the
right as in the other functions), and the result
reads backwards from the 'end' cursor at left.
*/
int sR, poss;
ULL toOut=0,toOutValid=0;
for(sR=0; sR<32; sR++) { /* 32, not sixty four etc */
int MaybeA = ((d.MaybeA >> sR) & 1),
MaybeC = ((d.MaybeC >> sR) & 1),
MaybeG = ((d.MaybeG >> sR) & 1),
MaybeT = ((d.MaybeT >> sR) & 1);
poss = MaybeA + MaybeC + MaybeG + MaybeT;
if (poss) {
int partitionSize = nPoss / poss;
int possToTake = possNo / partitionSize;
possNo %= partitionSize; nPoss /= poss;
ULL bits = 0; /* we set it to a value to stop the "might be uninitialised" warning */
if (MaybeT) possToTake--; /* if(!possToTake--) bits=0; but it's at 0 anyway */
if (MaybeC && !possToTake--) bits=1;
if (MaybeA && !possToTake--) bits=2;
if (MaybeG && !possToTake) bits=3;
int sL = 62-2*sR; /* IMPORTANT: don't write (64-2) as it'll be changed to 32-2 in 32.h; this is ULL */
toOut |= (bits << sL);
toOutValid |= ((ULL)3 << sL);
} else break;
} *out=toOut; *outValid = toOutValid;
#define Is_64bit /* will change to Is_32bit in 32.h */
#ifdef Is_32bit
return 0; // avoid compiler warnings
#else
return ((d.MaybeA >> 32) | (d.MaybeC >> 32) | (d.MaybeG >> 32) | (d.MaybeT >> 32)) != 0;
#endif
}
static inline Primer64 PrimerReverse64(Primer64 p) {
/* assumes 'valid' is right-aligned */
bit64 v=p.valid,i1=p.AorT,i2=p.GorT,o1=0,o2=0;
while(v) {
o1=(o1<<1)|(i1&1); o2=(o2<<1)|(i2&1);
i1>>=1; i2>>=1; v>>=1;
}
p.AorT = o1; p.GorT = o2; return p;
}
static inline DegeneratePrimer64 DegeneratePrimerReverse64(DegeneratePrimer64 p) {
/* assumes right-aligned */
bit64 i1=p.MaybeA, i2=p.MaybeC, i3=p.MaybeG, i4=p.MaybeT, o1=0, o2=0, o3=0, o4=0;
while(i1 || i2 || i3 || i4) {
o1=(o1<<1)|(i1&1); o2=(o2<<1)|(i2&1);
o3=(o3<<1)|(i3&1); o4=(o4<<1)|(i4&1);
i1>>=1; i2>>=1; i3>>=1; i4>>=1;
}
p.MaybeA = o1; p.MaybeC = o2; p.MaybeG = o3; p.MaybeT = o4; return p;
}
static inline MaybeDegeneratePrimer64 MaybeDegeneratePrimerReverse64(MaybeDegeneratePrimer64 p) {
if(p.isD) return wrapDegeneratePrimer64(DegeneratePrimerReverse64(p.p.D));
else return wrapPrimer64(PrimerReverse64(p.p.notD));
}
static inline void PrimerTag64(Primer64 *p,Primer64 tag) {
/* assumes 'valid' is right-aligned in both p and tag */
int sL = popcount64(p->valid); /* = 64-leading0_64 */
p->valid |= (tag.valid << sL);
p->AorT |= (tag.AorT << sL); p->GorT|=(tag.GorT << sL);
}
static inline void DegeneratePrimerTag64(DegeneratePrimer64 *p,DegeneratePrimer64 tag) {
int sL = popcount64(p->MaybeA | p->MaybeC | p->MaybeG | p->MaybeT);
p->MaybeA |= (tag.MaybeA << sL);
p->MaybeC |= (tag.MaybeC << sL);
p->MaybeG |= (tag.MaybeG << sL);
p->MaybeT |= (tag.MaybeT << sL);
}
static inline void MaybeDegeneratePrimerTag64(MaybeDegeneratePrimer64 *p,MaybeDegeneratePrimer64 tag) {
if (tag.isD && !(p->isD)) { /* a degenerate tag (is this possible? well if it is, we're ready...) */
p->p.D = upgradeToDegenerate64(*p); p->isD = 1;
}
if(p->isD) DegeneratePrimerTag64(&(p->p.D),upgradeToDegenerate64(tag));
else PrimerTag64(&(p->p.notD),tag.p.notD);
}
static inline void PrimerTag64B(Primer64 *p,Primer64 tag) {
/* for backward primers: reverse the tag first, and add it to the lsb of the primer rather than the msb */
tag = PrimerReverse64(tag);
int sL = popcount64(tag.valid);
p->valid = ((p->valid) << sL) | tag.valid;
p->AorT = ((p->AorT) << sL) | tag.AorT;
p->GorT = ((p->GorT) << sL) | tag.GorT;
}
static inline void DegeneratePrimerTag64B(DegeneratePrimer64 *p,DegeneratePrimer64 tag) {
tag = DegeneratePrimerReverse64(tag);
int sL = popcount64(tag.MaybeA | tag.MaybeC | tag.MaybeG | tag.MaybeT);
p->MaybeA = ((p->MaybeA) << sL) | tag.MaybeA;
p->MaybeC = ((p->MaybeC) << sL) | tag.MaybeC;
p->MaybeG = ((p->MaybeG) << sL) | tag.MaybeG;
p->MaybeT = ((p->MaybeT) << sL) | tag.MaybeT;
}
static inline void MaybeDegeneratePrimerTag64B(MaybeDegeneratePrimer64 *p,MaybeDegeneratePrimer64 tag) {
if (tag.isD && !(p->isD)) {
p->p.D = upgradeToDegenerate64(*p); p->isD = 1;
}
if(p->isD) DegeneratePrimerTag64B(&(p->p.D),upgradeToDegenerate64(tag));
else PrimerTag64B(&(p->p.notD),tag.p.notD);
}
static inline void PrimerRmTag64(Primer64 *p,Primer64 tag) {
int toRM = popcount64(tag.valid);
bit64 mask = ~(tag.valid << (popcount64(p->valid) - toRM));
p->valid &= mask; p->AorT &= mask; p->GorT &= mask;
}
static inline void DegeneratePrimerRmTag64(DegeneratePrimer64 *p,DegeneratePrimer64 tag) {
bit64 tValid = tag.MaybeA | tag.MaybeC | tag.MaybeG | tag.MaybeT,
pValid = p->MaybeA | p->MaybeC | p->MaybeG | p->MaybeT;
int toRM = popcount64(tValid);
bit64 mask = ~(tValid << (popcount64(pValid) - toRM));
p->MaybeA &= mask; p->MaybeC &= mask; p->MaybeG &= mask;
p->MaybeT &= mask;
}
static inline void MaybeDegeneratePrimerRmTag64(MaybeDegeneratePrimer64 *p,MaybeDegeneratePrimer64 tag) {
if (tag.isD && !(p->isD)) {
p->p.D = upgradeToDegenerate64(*p); p->isD = 1;
}
if(p->isD) DegeneratePrimerRmTag64(&(p->p.D),upgradeToDegenerate64(tag));
else PrimerRmTag64(&(p->p.notD),tag.p.notD);
}
static inline void PrimerRmTag64B(Primer64 *p,Primer64 tag) {
int sR = popcount64(tag.valid);
p->valid >>= sR; p->AorT >>= sR; p->GorT >>= sR;
}
static inline void DegeneratePrimerRmTag64B(DegeneratePrimer64 *p,DegeneratePrimer64 tag) {
int sR = popcount64(tag.MaybeA | tag.MaybeC | tag.MaybeG | tag.MaybeT);
p->MaybeA >>= sR; p->MaybeC >>= sR; p->MaybeG >>= sR;
p->MaybeT >>= sR;
}
static inline void MaybeDegeneratePrimerRmTag64B(MaybeDegeneratePrimer64 *p,MaybeDegeneratePrimer64 tag) {
if (tag.isD && !(p->isD)) {
p->p.D = upgradeToDegenerate64(*p); p->isD = 1;
}
if(p->isD) DegeneratePrimerRmTag64B(&(p->p.D),upgradeToDegenerate64(tag));
else PrimerRmTag64B(&(p->p.notD),tag.p.notD);
}
static inline int score64(Primer64 p1,Primer64 p2) {
/* score the interaction between p1 and p2, fast.
p2 must have been PrimerReverse64'd by the caller. */
int sL=(64 - 1/*threshold*/) - leading0_64(p2.valid),
maxScore = 0; /* this initial value of maxScore is also the minimum score that can be returned. Do not make it negative without reviewing code that assumes it's >=0 */
Primer64 p1B; /* we start with p1 shifted left */
p1B.AorT = p1.AorT << sL; p1B.GorT = p1.GorT << sL;
p1B.valid = p1.valid << sL;
int reload = sL - leading0_64(p1.valid);
if(reload<0) reload=0;
/* TODO: if rewritten into 2 loops, can reload only once: load in the middle, shift to the right until gone, reload in the middle <<1, while overlap test + shift left. Of course if the above reload <= 0 then do just the 1 loop as below because it'll be faster in that case. */
while(1) {
bit64 overlap = p1B.valid & p2.valid;
if(!overlap) return maxScore; /* all done */
bit64 bonds = (~(p1B.AorT ^ p2.AorT)) & (p1B.GorT ^ p2.GorT) & overlap;
int score = 2*popcount64(bonds) - popcount64(overlap);
maxScore = (score > maxScore ? score : maxScore);
if(reload) {
--sL;
p1B.AorT = p1.AorT << sL; p1B.GorT = p1.GorT << sL;
p1B.valid = p1.valid << sL; --reload;
} else {
p1B.AorT >>=1; p1B.GorT >>=1; p1B.valid >>=1;
}
}
}
static inline int score64D(DegeneratePrimer64 p1,DegeneratePrimer64 p2) {
bit64 p1Valid = DegenerateValid64(p1),
p2Valid = DegenerateValid64(p2);
int sL=(64 - 1/*threshold*/) - leading0_64(p2Valid),
maxScore = 0;
DegeneratePrimer64 p1B;
p1B.MaybeA = p1.MaybeA << sL;
p1B.MaybeC = p1.MaybeC << sL;
p1B.MaybeG = p1.MaybeG << sL;
p1B.MaybeT = p1.MaybeT << sL;
bit64 p1Bvalid = p1Valid << sL;
int reload = sL - leading0_64(p1Valid);
if(reload<0) reload=0;
while(1) {
bit64 overlap = p1Bvalid & p2Valid;
if(!overlap) return maxScore;
bit64 bonds = (p1B.MaybeA & p2.MaybeT) |
(p1B.MaybeC & p2.MaybeG) |
(p1B.MaybeG & p2.MaybeC) |
(p1B.MaybeT & p2.MaybeA);
int score = 2*popcount64(bonds) - popcount64(overlap);
maxScore = (score > maxScore ? score : maxScore);
if(reload) {
--sL;
p1B.MaybeA = p1.MaybeA << sL;
p1B.MaybeC = p1.MaybeC << sL;
p1B.MaybeG = p1.MaybeG << sL;
p1B.MaybeT = p1.MaybeT << sL;
p1Bvalid = p1Valid << sL; --reload;
} else {
p1B.MaybeA >>=1; p1B.MaybeC >>=1; p1B.MaybeG >>=1;
p1B.MaybeT >>=1; p1Bvalid >>=1;
}
}
}
static inline int score64MaybeD(MaybeDegeneratePrimer64 p1,MaybeDegeneratePrimer64 p2) {
if(p1.isD || p2.isD) return score64D(upgradeToDegenerate64(p1),upgradeToDegenerate64(p2));
else return score64(p1.p.notD,p2.p.notD);
}
static inline int count64(Primer64 p1,Primer64 p2,
int *tried) {
/* count the number of alignments of >0 bonds,
for information only. Similar to score64, but called
only when outputting interaction data.
(TODO: could make this return maxScore or minDG as
well, to save having to call that func separately,
but low priority because this is called only when
printing out bonds in excess of threshold) */
int sL=(64 - 1/*threshold*/) - leading0_64(p2.valid),
count = 0;
Primer64 p1B; /* we start with p1 shifted left */
p1B.AorT = p1.AorT << sL; p1B.GorT = p1.GorT << sL;
p1B.valid = p1.valid << sL;
int reload = sL - leading0_64(p1.valid);
if(reload<0) reload=0;
while(1) {
bit64 overlap = p1B.valid & p2.valid;
if(!overlap) return count;
(*tried)++;
if((~(p1B.AorT ^ p2.AorT)) & (p1B.GorT ^ p2.GorT) & overlap) count++;
if(reload) {
--sL;
p1B.AorT = p1.AorT << sL; p1B.GorT = p1.GorT << sL;
p1B.valid = p1.valid << sL; --reload;
} else {
p1B.AorT >>=1; p1B.GorT >>=1; p1B.valid >>=1;
}
}
}
static inline int count64D(DegeneratePrimer64 p1,DegeneratePrimer64 p2,int *tried) {
bit64 p1Valid = DegenerateValid64(p1),
p2Valid = DegenerateValid64(p2);
int sL=(64 - 1/*threshold*/) - leading0_64(p2Valid),
count = 0;
DegeneratePrimer64 p1B;
p1B.MaybeA = p1.MaybeA << sL;
p1B.MaybeC = p1.MaybeC << sL;
p1B.MaybeG = p1.MaybeG << sL;
p1B.MaybeT = p1.MaybeT << sL;
bit64 p1Bvalid = p1Valid << sL;
int reload = sL - leading0_64(p1Valid);
if(reload<0) reload=0;
while(1) {
bit64 overlap = p1Bvalid & p2Valid;
if(!overlap) return count;
(*tried)++;
if ((p1B.MaybeA & p2.MaybeT) |
(p1B.MaybeC & p2.MaybeG) |
(p1B.MaybeG & p2.MaybeC) |
(p1B.MaybeT & p2.MaybeA)) count++;
if(reload) {
--sL;
p1B.MaybeA = p1.MaybeA << sL;
p1B.MaybeC = p1.MaybeC << sL;
p1B.MaybeG = p1.MaybeG << sL;
p1B.MaybeT = p1.MaybeT << sL;
p1Bvalid = p1Valid << sL; --reload;
} else {
p1B.MaybeA >>=1; p1B.MaybeC >>=1; p1B.MaybeG >>=1;
p1B.MaybeT >>=1; p1Bvalid >>=1;
}
}
}
static inline void count64MaybeD(MaybeDegeneratePrimer64 p1,MaybeDegeneratePrimer64 p2,FILE *f) {
/* Put clarification for any beginner users who haven't
been informed that we automatically try all positions
and print only the worst case */
int tried = 0;
int c = ((p1.isD || p2.isD) ? count64D(upgradeToDegenerate64(p1),upgradeToDegenerate64(p2),&tried) : count64(p1.p.notD,p2.p.notD,&tried));
fprintf(f,"Positions tried: %d\nBonding positions: %d%s\n",tried,c,(c>1)?" (worst one shown here)":"");
}
static void printBases64(Primer64 p,FILE *f) {
bit64 i = (bit64)1 << (64-1-leading0_64(p.valid));
for(; i&p.valid; i>>=1)
fputc(
(p.AorT & i) ?
((p.GorT & i)?'T':'A') :
((p.GorT & i)?'G':'C'), f);
}
static void printBases64D(DegeneratePrimer64 p,FILE *f) {
bit64 valid = DegenerateValid64(p);
bit64 i = (bit64)1 << (64-1-leading0_64(valid));
for(; i&valid; i>>=1) {
int j = (((p.MaybeA & i)!=0)<<3) |
(((p.MaybeC & i)!=0)<<2) |
(((p.MaybeG & i)!=0)<<1) |
((p.MaybeT & i)!=0);
fputc(degenerateCombos[j-1],f);
}
}
static void printBases64MaybeD(MaybeDegeneratePrimer64 p,FILE *f) {
if(p.isD) printBases64D(p.p.D,f);
else printBases64(p.p.notD,f);
}
static void print64_inner(Primer64 p1,int sL,Primer64 p2,bit64 overlap,bit64 bonds,FILE* f) {
/* code common to print64 and dGprint64 */
int i1 = leading0_64(p1.valid)-sL, i2 = leading0_64(p2.valid), iMid = leading0_64(overlap); if(i1<i2) { i2-=i1; iMid-=i1; i1=0; } else { i1-=i2; iMid-=i2; i2=0; }
indent(i1,f); fputs("5'-",f); printBases64(p1,f); fputs("-3'\n",f);
indent(iMid+(sizeof("5'-")-1), f);
bit64 bond = (bit64)1 << (64-1-leading0_64(overlap));
for(; bond&overlap; bond>>=1) fputc((bond&bonds)?'|':'x',f);
fputc('\n',f); indent(i2,f);
fputs("3'-",f); printBases64(p2,f);fputs("-5'\n",f);
}
static void print64D_inner(DegeneratePrimer64 p1,int sL,DegeneratePrimer64 p2,bit64 overlap,bit64 bonds,FILE* f) {
int i1 = leading0_64(DegenerateValid64(p1))-sL, i2 = leading0_64(DegenerateValid64(p2)), iMid = leading0_64(overlap); if(i1<i2) { i2-=i1; iMid-=i1; i1=0; } else { i1-=i2; iMid-=i2; i2=0; }
indent(i1,f); fputs("5'-",f); printBases64D(p1,f); fputs("-3'\n",f);
indent(iMid+(sizeof("5'-")-1), f);
bit64 bond = (bit64)1 << (64-1-leading0_64(overlap));
for(; bond&overlap; bond>>=1) fputc((bond&bonds)?'|':'x',f);
fputc('\n',f); indent(i2,f);
fputs("3'-",f); printBases64D(p2,f); fputs("-5'\n",f);
}
static void print64(Primer64 p1,Primer64 p2,int maxScore,FILE *f) {
/* maxScore has been found by score64; print a representation
of the interaction, along with the score */
int sL=(64 - 1) - leading0_64(p2.valid);
int sR = 0; Primer64 p1B;
while(1) {
if(sL) {
p1B.AorT = p1.AorT << sL; p1B.GorT = p1.GorT << sL;
p1B.valid = p1.valid << sL;
} else {
/* this function is allowed to be a bit slower than
score64, and we need to keep all bits */
p1B.AorT = p1.AorT >> sR; p1B.GorT = p1.GorT >> sR;
p1B.valid = p1.valid >> sR;
}
bit64 overlap = p1B.valid & p2.valid;
bit64 bonds = (~(p1B.AorT ^ p2.AorT)) & (p1B.GorT ^ p2.GorT) & overlap;
int score = 2*popcount64(bonds) - popcount64(overlap);
if(score == maxScore) {
/* TODO: if more than one ==maxScore, prioritise
any that has more C-G links, stronger than A-T */
fprintf(f,"Matches = %d\n",popcount64(bonds));
fprintf(f,"Score = %d\n",maxScore);
print64_inner(p1,sL-sR,p2,overlap,bonds,f);
//return; /* comment out to print ALL maxScore matches */
}
if(!overlap) return; /* needed if not returning above */
if(sL) sL--; else sR++;
}
}
static void print64D(DegeneratePrimer64 p1,DegeneratePrimer64 p2,int maxScore,FILE *f) {
int sL=(64 - 1) - leading0_64(DegenerateValid64(p2));
int sR = 0; DegeneratePrimer64 p1B;
while(1) {
if(sL) {
p1B.MaybeA = p1.MaybeA << sL;
p1B.MaybeC = p1.MaybeC << sL;
p1B.MaybeG = p1.MaybeG << sL;
p1B.MaybeT = p1.MaybeT << sL;
} else {
p1B.MaybeA = p1.MaybeA >> sR;
p1B.MaybeC = p1.MaybeC >> sR;
p1B.MaybeG = p1.MaybeG >> sR;
p1B.MaybeT = p1.MaybeT >> sR;
}
bit64 overlap = DegenerateValid64(p1B) & DegenerateValid64(p2);
bit64 bonds = (p1B.MaybeA & p2.MaybeT) |
(p1B.MaybeC & p2.MaybeG) |
(p1B.MaybeG & p2.MaybeC) |
(p1B.MaybeT & p2.MaybeA);
int score = 2*popcount64(bonds) - popcount64(overlap);
if(score == maxScore) {
/* TODO: if more than one ==maxScore, how to
prioritise the links in the degenerate case? */
fprintf(f,"Matches = %d\n",popcount64(bonds));
fprintf(f,"Score = %d\n",maxScore);
print64D_inner(p1,sL-sR,p2,overlap,bonds,f);
//return; /* comment out to print ALL maxScore matches */
}
if(!overlap) return; /* needed if not returning above */
if(sL) sL--; else sR++;
}
}
static void print64MaybeD(MaybeDegeneratePrimer64 p1,MaybeDegeneratePrimer64 p2,const char *name1,const char *name2,int maxScore,FILE *f) {
if(!name1 || !*name1) name1="(no name)";
if(!name2 || !*name2) name2="(no name)";
fprintf(f,"%s versus %s\n",name1,name2);
count64MaybeD(p1,p2,f);
if(p1.isD || p2.isD) print64D(upgradeToDegenerate64(p1),upgradeToDegenerate64(p2),maxScore,f);
else print64(p1.p.notD,p2.p.notD,maxScore,f);
fputc('\n',f);
}
static void parseFASTA64(char *fileData,MaybeDegeneratePrimer64 *buf,MaybeDegeneratePrimer64 *tags,int *whichTag,char* *names) {
/* (note: adds extra 0 bytes to fileData) */
char *seqName=NULL; int p=0;
int lastByte_to_whichTag[256]; memset(&lastByte_to_whichTag,0xFF,sizeof(lastByte_to_whichTag));
char check_not_last[256]={0};
int nextTag = 0;
fileData += strspn(fileData,"\r\n\xef\xbb\xbf");
while(*fileData) {
size_t lineEnd,start=0; do { // multiline seq?
lineEnd = strcspn(fileData+start,"\r\n\xef\xbb\xbf") + start; /* see comment in load-common.c re stray BOMs */
start = strspn(fileData+lineEnd,"\r\n\xef\xbb\xbf") + lineEnd;
} while(*fileData!='>' && fileData[start] && fileData[start]!='>');
char o=fileData[lineEnd];
if (*fileData == '>') {
seqName = fileData+1;
while(*seqName==' ') seqName++; /* ignore spaces between > and label */
while(fileData[--lineEnd]==' '); /* and after end of label */
fileData[++lineEnd]=0;
} else if (lineEnd) {
fileData[lineEnd] = 0;
MaybeDegeneratePrimer64 mdp =
parseMaybeDegeneratePrimer64(fileData);
if(!strncmp(seqName,"tag",3) && strlen(seqName)==4){
unsigned char tagType=fast_toUpper(seqName[3]);
if (lastByte_to_whichTag[tagType] == -1) {
/* This tag type hasn't been set before, so
retroactively apply it to previous primers */
int p2;
for(p2=0; p2<p; p2++)
if(tagType==(unsigned char)fast_toUpper(names[p2][strlen(names[p2])-1]))
whichTag[p2] = nextTag;
} else check_not_last[tagType] = 1;
lastByte_to_whichTag[tagType] = nextTag;
tags[nextTag++] = mdp;
} else {
names[p] = seqName;
unsigned char tagType=fast_toUpper(seqName[strlen(seqName)-1]);
whichTag[p] = lastByte_to_whichTag[tagType];
check_not_last[tagType] = 0;
buf[p++] = mdp;
}
}
fileData += start; if(!o) break; /* no \n at end ?? */
}
p = 0;
for(nextTag=0; nextTag<256; nextTag++)
if(check_not_last[nextTag]) {
if(p) fprintf(stderr,"WARNING: Same applies to >tag%c\n",nextTag);
else {
p = 1;
fprintf(stderr,"\nWARNING: You have multiple >tag%c sequences\n and the last one does not precede a >...%c primer.\n This probably means you've made a mistake.\n Apart from the first >tag%c, all >tag%c tags will apply to\n >...%c primers AFTER the >tag%c (not before it).\n",nextTag,nextTag,nextTag,nextTag,nextTag,nextTag);
}
}
}
static void counts64(const MaybeDegeneratePrimer64 *forward,const MaybeDegeneratePrimer64 *backward,int np,FILE *f) {
int i,j;
int counts[64]={0},maxS = 0;
for(i=0; i<np; i++) for(j=i; j<np; j++) {
int score = score64MaybeD(forward[i],backward[j]);
counts[score]++; if(score>maxS) maxS=score;
}
for(i=0; i<=maxS; i++) fprintf(f,"%d\t%d\n",i,counts[i]);
if(f!=stdout) fclose(f);
}
static int pCounts64(const MaybeDegeneratePrimer64 *forward,const MaybeDegeneratePrimer64 *backward,int np,const int *pools,const int *precalcScores) {
/* like counts64 but includes combinations only if they're in the same pool + count invalid/overlap scores */
int i,j;
int counts[64]={0},maxS = 0, other=0;
for(i=0; i<np; i++) for(j=i; j<np; j++) if(pools[i]==pools[j]) {
int score = precalcScores ? *precalcScores++ : score64MaybeD(forward[i],backward[j]);
if(score<64) {
counts[score]++; if(score>maxS) maxS=score;
} else other++;
} else if(precalcScores) precalcScores++;
int first = 1;
for(i=0; i<=maxS; i++)
fprintf(stderr,"%s%d\t%d",first?((first=0),""):"\n",i,counts[i]);
if(other) fprintf(stderr,"%sOverlaps\t%d",first?"":"\n",other);
return other;
}
static void printBonds64(const MaybeDegeneratePrimer64 *forward,const MaybeDegeneratePrimer64 *backward,int np,FILE *f,int threshold,char* *names,const int *pools) {
int i,j;
ScoreRecord* sr=malloc(t_Nitems(np)*sizeof(ScoreRecord));
ScoreRecord* sr2 = sr;
for(i=0; i<np; i++) for(j=i; j<np; j++) {
if(!pools || pools[i]==pools[j]) {
int score = score64MaybeD(forward[i],backward[j]);
if (score >= threshold) {
if(sr) {
sr2->score = score; sr2->i = i; (sr2++)->j = j;
} else print64MaybeD(forward[i],backward[j],names[i],names[j],score,f); /* fallback: print in any order if can't sort by highest score 1st */
}
}
}
if(sr) {
qsort(sr,sr2-sr,sizeof(ScoreRecord),highestScore1st);
ScoreRecord *s;
for(s=sr; s<sr2; s++)
print64MaybeD(forward[s->i],backward[s->j],names[s->i],names[s->j],s->score,f);
free(sr);
}
}
static int* triangle64(const MaybeDegeneratePrimer64 *forward,const MaybeDegeneratePrimer64 *backward,int np) {
/* Triangular score cache for pooling purposes */
int* scores = malloc(t_Nitems(np)*sizeof(int));
if(scores) {
int i,j,*p=scores;
for(i=0; i<np; i++) for(j=i; j<np; j++)
*p++ = (i==j ? 0 /* ignore self-interaction */ :
score64MaybeD(forward[i],backward[j]));
} return scores;
}
static inline bit64 rm_unstable_bonds64(bit64 bonds,bit64 overlap) {
/* MPprimer_dimer_check.pl's "primer_dimer" function
does this before its deltaG calculation. Says
single matched bases surrounded by mismatches are
unstable, removes 01 when not followed by valid 1 */
bit64 lone_1s = bonds & ((~bonds)<<1) & ((~bonds)>>1);
lone_1s &= (overlap>>1); /* left-hand bit is never got rid of */
return bonds & ~lone_1s;
}
static inline float deltaG64(Primer64 p1,Primer64 p2,const float* table) {
/* like score64 but does deltaG instead */
int sL=(64 - 1/*threshold*/) - leading0_64(p2.valid);
float minDG = INFINITY;
Primer64 p1B;
p1B.AorT = p1.AorT << sL; p1B.GorT = p1.GorT << sL;
p1B.valid = p1.valid << sL;
int reload = sL - leading0_64(p1.valid);
if(reload<0) reload=0;
while(1) {
bit64 overlap = p1B.valid & p2.valid;
if(!overlap) return minDG;
bit64 bonds = rm_unstable_bonds64((~(p1B.AorT ^ p2.AorT)) & (p1B.GorT ^ p2.GorT) & overlap, overlap);
int shift = 64-2-leading0_64(bonds); bit64 mask=(bit64)3 << shift,
maskEnd = (bit64)3 << trail0_64(bonds);
float dG = table[256+((p1B.AorT & mask)>>(shift+1))]; // init
for(; mask>=maskEnd; mask>>=1,shift--)
dG += table[(((p1B.AorT & mask)>>shift)<<6) | (((p1B.GorT & mask)>>shift)<<4) | (((p2.AorT & mask)>>shift)<<2) | ((p2.GorT & mask)>>shift)];
dG += table[256+((p1B.AorT & mask)>>(shift+1))]; // init at end
minDG = (dG < minDG ? dG : minDG);
if(reload) {
--sL;
p1B.AorT = p1.AorT << sL; p1B.GorT = p1.GorT << sL;
p1B.valid = p1.valid << sL; --reload;
} else {
p1B.AorT >>=1; p1B.GorT >>=1; p1B.valid >>=1;
}
}
}
static inline float deltaG64D(DegeneratePrimer64 p1,DegeneratePrimer64 p2,const float* table) {
bit64 p1Valid = DegenerateValid64(p1),
p2Valid = DegenerateValid64(p2);
int sL=(64 - 1/*threshold*/) - leading0_64(p2Valid);
float minDG = INFINITY;
DegeneratePrimer64 p1B;
p1B.MaybeA = p1.MaybeA << sL;
p1B.MaybeC = p1.MaybeC << sL;
p1B.MaybeG = p1.MaybeG << sL;
p1B.MaybeT = p1.MaybeT << sL;
bit64 p1Bvalid = p1Valid << sL;
int reload = sL - leading0_64(p1Valid);
if(reload<0) reload=0;
while(1) {
bit64 overlap = p1Bvalid & p2Valid;
if(!overlap) return minDG;
bit64 bonds = rm_unstable_bonds64((p1B.MaybeA & p2.MaybeT) |
(p1B.MaybeC & p2.MaybeG) |
(p1B.MaybeG & p2.MaybeC) |
(p1B.MaybeT & p2.MaybeA),overlap);
int shift = 64-2-leading0_64(bonds); bit64 mask=(bit64)3 << shift,
maskEnd = (bit64)3 << trail0_64(bonds);
float dG = table[256+!(((p1B.MaybeC|p1B.MaybeG) & mask)>>(shift+1))]; // init (worst-case scenario is C or G)
for(; mask>=maskEnd; mask>>=1,shift--)
dG += minDGdegenerate((p1B.MaybeA & mask)>>shift,(p1B.MaybeC & mask)>>shift,(p1B.MaybeG & mask)>>shift,(p1B.MaybeT & mask)>>shift,(p2.MaybeA & mask)>>shift,(p2.MaybeC & mask)>>shift,(p2.MaybeG & mask)>>shift,(p2.MaybeT & mask)>>shift,table);
dG += table[256+!(((p1B.MaybeC|p1B.MaybeG) & mask)>>(shift+1))];
minDG = (dG < minDG ? dG : minDG);
if(reload) {
--sL;
p1B.MaybeA = p1.MaybeA << sL;
p1B.MaybeC = p1.MaybeC << sL;
p1B.MaybeG = p1.MaybeG << sL;
p1B.MaybeT = p1.MaybeT << sL;
p1Bvalid = p1Valid << sL; --reload;
} else {
p1B.MaybeA >>=1; p1B.MaybeC >>=1; p1B.MaybeG >>=1;
p1B.MaybeT >>=1; p1Bvalid >>=1;
}
}
}
static inline float deltaG64MaybeD(MaybeDegeneratePrimer64 p1,MaybeDegeneratePrimer64 p2,const float* table) {
if(p1.isD || p2.isD) return deltaG64D(upgradeToDegenerate64(p1),upgradeToDegenerate64(p2),table);
else return deltaG64(p1.p.notD,p2.p.notD,table);
}
static void dGprint64(Primer64 p1,Primer64 p2,float minDG,FILE *f,const float *table) {
int sL=(64 - 1) - leading0_64(p2.valid);
int sR = 0; Primer64 p1B;
while(1) {
if(sL) {
p1B.AorT = p1.AorT << sL; p1B.GorT = p1.GorT << sL;
p1B.valid = p1.valid << sL;
} else {
p1B.AorT = p1.AorT >> sR; p1B.GorT = p1.GorT >> sR;
p1B.valid = p1.valid >> sR;
assert(p1B.valid); /* if this breaks, check the range of nearlyEqual */
}
bit64 overlap = p1B.valid & p2.valid;
bit64 bonds0 = (~(p1B.AorT ^ p2.AorT)) & (p1B.GorT ^ p2.GorT) & overlap,
bonds = rm_unstable_bonds64(bonds0, overlap);
int shift = 64-2-leading0_64(bonds); bit64 mask=(bit64)3 << shift,
maskEnd = (bit64)3 << trail0_64(bonds);
float dG = table[256+((p1B.AorT & mask)>>(shift+1))]; // init
for(; mask>=maskEnd; mask>>=1,shift--)
dG += table[(((p1B.AorT & mask)>>shift)<<6) | (((p1B.GorT & mask)>>shift)<<4) | (((p2.AorT & mask)>>shift)<<2) | ((p2.GorT & mask)>>shift)];
dG += table[256+((p1B.AorT & mask)>>(shift+1))]; // init at end
if(nearlyEqual(dG,minDG)) {
fprintf(f,"dG = %.3g\n",dG);
print64_inner(p1,sL-sR,p2,overlap,bonds0,f);
return;
}
if(sL) sL--; else sR++;
}
}
static void dGprint64D(DegeneratePrimer64 p1,DegeneratePrimer64 p2,float minDG,FILE *f,const float *table) {
int sL=(64 - 1) - leading0_64(DegenerateValid64(p2));
int sR = 0; DegeneratePrimer64 p1B;
while(1) {
if(sL) {
p1B.MaybeA = p1.MaybeA << sL;
p1B.MaybeC = p1.MaybeC << sL;
p1B.MaybeG = p1.MaybeG << sL;
p1B.MaybeT = p1.MaybeT << sL;
} else {
p1B.MaybeA = p1.MaybeA >> sR;
p1B.MaybeC = p1.MaybeC >> sR;
p1B.MaybeG = p1.MaybeG >> sR;
p1B.MaybeT = p1.MaybeT >> sR;
assert(sR < 64); /* if this breaks, check the range of nearlyEqual */
}
bit64 overlap = DegenerateValid64(p1B) & DegenerateValid64(p2);
bit64 bonds0 = (p1B.MaybeA & p2.MaybeT) |
(p1B.MaybeC & p2.MaybeG) |
(p1B.MaybeG & p2.MaybeC) |
(p1B.MaybeT & p2.MaybeA),
bonds = rm_unstable_bonds64(bonds0,overlap);
int shift = 64-2-leading0_64(bonds); bit64 mask=(bit64)3 << shift,
maskEnd = (bit64)3 << trail0_64(bonds);
float dG = table[256+!(((p1B.MaybeC|p1B.MaybeG) & mask)>>(shift+1))]; // init (worst-case scenario is C or G)
for(; mask>=maskEnd; mask>>=1,shift--)
dG += minDGdegenerate((p1B.MaybeA & mask)>>shift,(p1B.MaybeC & mask)>>shift,(p1B.MaybeG & mask)>>shift,(p1B.MaybeT & mask)>>shift,(p2.MaybeA & mask)>>shift,(p2.MaybeC & mask)>>shift,(p2.MaybeG & mask)>>shift,(p2.MaybeT & mask)>>shift,table);
dG += table[256+!(((p1B.MaybeC|p1B.MaybeG) & mask)>>(shift+1))];
if(nearlyEqual(dG,minDG)) {
fprintf(f,"dG = %.3g\n",dG);
print64D_inner(p1,sL-sR,p2,overlap,bonds0,f);
return;
}
if(sL) sL--; else sR++;
}
}
static void dGprint64MaybeD(MaybeDegeneratePrimer64 p1,MaybeDegeneratePrimer64 p2,const char *name1,const char *name2,float minDG,FILE *f,const float *table) {
if(!name1 || !*name1) name1="(no name)";
if(!name2 || !*name2) name2="(no name)";
fprintf(f,"%s versus %s\n",name1,name2);
count64MaybeD(p1,p2,f);
if(p1.isD || p2.isD) dGprint64D(upgradeToDegenerate64(p1),upgradeToDegenerate64(p2),minDG,f,table);
else dGprint64(p1.p.notD,p2.p.notD,minDG,f,table);
fputc('\n',f);
}
static void dGprintBonds64(const MaybeDegeneratePrimer64 *forward,const MaybeDegeneratePrimer64 *backward,int np,FILE *f,float threshold,char* *names,const int *pools,const float *table) {
DG_ScoreRecord* sr=malloc(t_Nitems(np)*sizeof(DG_ScoreRecord));
DG_ScoreRecord* sr2 = sr; time_t start=time(NULL);
time_t next = sr?t_ProgressStart("Sorting... "):0;
#if defined(_OPENMP)
#pragma omp parallel
#endif
{
TwoRanges tr=t_iBounds(np);
int r,i,j,done=0;
for(r=0; r<2; r++)
for(i=tr.r[r].start; i<tr.r[r].end; i++,sr?t_Progress("Sorting... ",tr,np,done,&next):0,done+=np-i)
for(j=i; j<np; j++) {
if(!pools || pools[i]==pools[j]) {
float dG = deltaG64MaybeD(forward[i],backward[j],table);
if (dG <= threshold) {
#if defined(_OPENMP)
#pragma omp critical
#endif
if(sr) {
sr2->dG = dG; sr2->i = i; (sr2++)->j = j;
} else dGprint64MaybeD(forward[i],backward[j],names[i],names[j],dG,f,table);
}
}
}
}
if(sr) {
qsort(sr,sr2-sr,sizeof(DG_ScoreRecord),dGhighestScore1st);
fputs("\rSorting... done",stderr);
prnSeconds((long)(time(NULL)-start)); fputs("\n",stderr);
if(f!=stdout) { fputs("Outputting... ",stderr); start = time(NULL); next = start + 2; }
fflush(stderr);
DG_ScoreRecord *s;
for(s=sr; s<sr2; s++) {
if(f!=stdout && time(NULL) > next) {
fprintf(stderr,"\rOutputting... (%d%%) ",100*(int)(s-sr)/(int)(sr2-sr)); fflush(stderr);
next = time(NULL) + 2;
}
dGprint64MaybeD(forward[s->i],backward[s->j],names[s->i],names[s->j],s->dG,f,table);
}
free(sr);
if(f!=stdout) { fputs("\rOutputting... done",stderr); prnSeconds((long)(time(NULL)-start)); fputs("\n",stderr); fflush(stderr); }
}
}
static int* dGtriangle64(const MaybeDegeneratePrimer64 *forward,const MaybeDegeneratePrimer64 *backward,int np,const float *table) {
/* To save doing a float version of the pool splitter, emulate 'score' by
using -dG*2, as bins of size 0.5 should be enough (*10 creates too many empty ones and split_into_pools would need changing) */
time_t next = t_ProgressStart("Precalculating dG... ");
time_t start = next;
int* scores = malloc(t_Nitems(np)*sizeof(int));
if(scores)
#if defined(_OPENMP)
#pragma omp parallel
#endif
{
TwoRanges tr=t_iBounds(np);
int r,i,j,*p,done=0;
for(r=0; r<2; r++)
for(i=tr.r[r].start, p=scores+t_offset(np,i,i); i<tr.r[r].end; i++,t_Progress("Precalculating dG... ",tr,np,done,&next),done+=np-i)
for(j=i; j<np; j++)
*p++ = (i==j ? 0 : dGbucket(deltaG64MaybeD(forward[i],backward[j],table),0x4000-1));
} fputs("\rPrecalculating dG... done",stderr); prnSeconds((long)(time(NULL)-start)); fputs("\n",stderr); fflush(stderr);
return scores;
}
static int dGpCounts64(int np,const int *pools,const int *precalcScores,FILE *f) {
/* this is a combination of counts64 and pCounts64, for the delta-G variant. pools can be NULL, but not precalcScores */
int i,j; int*counts=calloc(0x4000+1,sizeof(int)); if(!counts) return 0;
for(i=0; i<np; i++) for(j=i; j<np; j++) if(!pools || pools[i]==pools[j]) {
counts[*precalcScores++]++; /* this version has no precalcScores==NULL fallback; could put one in if don't mind passing around the extra parameters */
} else precalcScores++;
int lines=0; for(i=0x4000-1; i>0; i--)
if(counts[i] && ++lines==20) break;
int first = 1;
for(; i<0x4000; i++) if(counts[i]) {
fprintf(f,"%s%.3g\t%d",first?((first=0),""):"\n",((float)(-i))/2.0,counts[i]);
}
int other=counts[0x4000]; free(counts);
if(other) fprintf(f,"%sOverlaps\t%d",first?"":"\n",other);
if(f!=stdout && f!=stderr) fclose(f);
return other;
}
static void dGsCounts64(const MaybeDegeneratePrimer64 *forward,const MaybeDegeneratePrimer64 *backward,int np,const float *table,FILE *f) {
/* show how much the score can vary around a deltaG range
(in case anyone thinks it's more accurate than it is) */
int *counts=calloc(0x4000,sizeof(int)),
*maxScore=calloc(0x4000,sizeof(int)),
*minScore=malloc(0x4000*sizeof(int));
if(memFail(counts,minScore,maxScore,_memFail)) return;
memset(minScore,0xFF,0x4000*sizeof(int));
time_t next = t_ProgressStart("Precalculating dG... ");
time_t start = next;
#if defined(_OPENMP)
#pragma omp parallel
#endif
{
TwoRanges tr=t_iBounds(np);
int r,i,j,done=0;
for(r=0; r<2; r++)
for(i=tr.r[r].start; i<tr.r[r].end; i++,t_Progress("Precalculating dG... ",tr,np,done,&next),done+=np-i)
for(j=i; j<np; j++) {
float dG = deltaG64MaybeD(forward[i],backward[j],table);
int score = score64MaybeD(forward[i],backward[j]);
int bucket = dGbucket(dG,0x4000-1);
#if defined(_OPENMP)
#pragma omp critical
#endif
{
counts[bucket]++;
if(minScore[bucket]<0 || score<minScore[bucket])
minScore[bucket] = score;
if(score>maxScore[bucket]) maxScore[bucket] = score;
}}} fputs("\rPrecalculating dG... done",stderr); prnSeconds((long)(time(NULL)-start)); fputs("\n",stderr); fflush(stderr); int i;
for(i=0; i<0x4000; i++) if(counts[i]) {
fprintf(f,"%c%d.%d\t%d\t (score ",i?'-':' ',i/2,(i%2)?5:0,counts[i]);
if(minScore[i]==maxScore[i]) fprintf(f,"%d)\n",minScore[i]);
else fprintf(f,"%d-%d)\n",minScore[i],maxScore[i]);
}
if(f!=stdout && f!=stderr) fclose(f);
}
static void pStats64(const MaybeDegeneratePrimer64 *forward,const MaybeDegeneratePrimer64 *backward,int np,const int *pools,const int *precalcScores,FILE *f) {
/* like pCounts64 but outputs per-pool */
int i,j,nPools=0,pool;
for(i=0; i<np; i++) if(pools[i]>nPools) nPools=pools[i];
const int *precalcScores2 = precalcScores; nPools++; /* 1 higher than max */
for(pool=0; pool<nPools; pool++) {
fprintf(f,"Pool %d:\n",pool+1);
precalcScores = precalcScores2;
int counts[64]={0},maxS = 0, other=0;
for(i=0; i<np; i++) for(j=i; j<np; j++) if(pools[i]==pools[j] && pools[i]==pool) {
int score = precalcScores ? *precalcScores++ : score64MaybeD(forward[i],backward[j]);
if(score<64) {
counts[score]++; if(score>maxS) maxS=score;
} else other++;
} else if(precalcScores) precalcScores++;
for(i=0; i<=maxS; i++)
fprintf(f,"%d\t%d\n",i,counts[i]);
if(other) fprintf(f,"Overlaps\t%d\n",other);
}
if(f!=stdout && f!=stderr) fclose(f);
}
static void pStats64dG(int np,const int *pools,const int *precalcScores,FILE *f) {
int*counts=malloc((0x4000+1)*sizeof(int)); if(memFail(counts,_memFail)) return;
int i,j,nPools=0,pool;
for(i=0; i<np; i++) if(pools[i]>nPools) nPools=pools[i];
const int *precalcScores2 = precalcScores; nPools++;
for(pool=0; pool<nPools; pool++) {
fprintf(f,"Pool %d:\n",pool+1);
memset(counts,0,(0x4000+1)*sizeof(int));
precalcScores = precalcScores2;
for(i=0; i<np; i++) for(j=i; j<np; j++) if(pools[i]==pools[j] && pools[i]==pool) {
counts[*precalcScores++]++;
} else precalcScores++;
int lines=0; for(i=0x4000-1; i>0; i--)
if(counts[i] && ++lines==20) break;
for(; i<0x4000; i++) if(counts[i]) {
fprintf(f,"%.3g\t%d\n",((float)(-i))/2.0,counts[i]);
}
int other=counts[0x4000];
if(other) fprintf(f,"Overlaps\t%d\n",other);
} free(counts);
if(f!=stdout && f!=stderr) fclose(f);
}
|
scalar_multiply.c | #include "main.h"
mat_rv scalar_multiply_coo_nothreading(coo matrix, SCALAR *scalar)
{
mat_rv rv;
struct timespec start, end;
get_utc_time(&start);
for(int i = 0; i < matrix.length; ++i){
if(matrix.type == MAT_INT){
if(scalar->type == MAT_INT)
matrix.elems[i].val.i = matrix.elems[i].val.i * scalar->val.i;
else
matrix.elems[i].val.f = (long double)matrix.elems[i].val.i * scalar->val.f;
}
else{
if(scalar->type == MAT_INT)
matrix.elems[i].val.f = matrix.elems[i].val.f * (long double)scalar->val.i;
else
matrix.elems[i].val.f = matrix.elems[i].val.f * scalar->val.f;
}
if((matrix.type == MAT_INT && scalar->type == MAT_LDOUBLE))
matrix.elems[i].type = MAT_LDOUBLE;
}
get_utc_time(&end);
//change matrix type if necessary
if((matrix.type == MAT_INT && scalar->type == MAT_LDOUBLE))
matrix.type = MAT_LDOUBLE;
rv = coo_to_mat_nothreading(matrix);
rv.t_process = time_delta(end, start);
return rv;
}
mat_rv scalar_multiply_coo(coo matrix, SCALAR *scalar, int thread_count)
{
mat_rv rv;
struct timespec start, end;
get_utc_time(&start);
int i;
#pragma omp parallel num_threads(thread_count) shared(scalar, matrix)
{
MAT_TYPE local_mat_type = matrix.type;
//store scalar on the local stack
SCALAR local_scalar;
local_scalar.type = scalar->type;
local_scalar.val = scalar->val;
#pragma omp for private(i)
for(i = 0; i < matrix.length; ++i){
if(local_mat_type == MAT_INT){
if(local_scalar.type == MAT_INT)
matrix.elems[i].val.i = matrix.elems[i].val.i * local_scalar.val.i;
else
matrix.elems[i].val.f = (long double)matrix.elems[i].val.i * local_scalar.val.f;
}
else{
if(local_scalar.type == MAT_INT)
matrix.elems[i].val.f = matrix.elems[i].val.f * (long double)local_scalar.val.i;
else
matrix.elems[i].val.f = matrix.elems[i].val.f * local_scalar.val.f;
}
if((local_mat_type == MAT_INT && local_scalar.type == MAT_LDOUBLE))
matrix.elems[i].type = MAT_LDOUBLE;
}
}
get_utc_time(&end);
//change matrix type if necessary
if((matrix.type == MAT_INT && scalar->type == MAT_LDOUBLE))
matrix.type = MAT_LDOUBLE;
rv = coo_to_mat(matrix, thread_count);
rv.t_process = time_delta(end, start);
return rv;
}
mat_rv scalar_multiply_csr_nothreading(csr matrix, SCALAR *scalar)
{
mat_rv rv;
struct timespec start, end;
get_utc_time(&start);
csr result;
result.type = matrix.type;
if(matrix.type == MAT_INT && scalar->type == MAT_LDOUBLE)
result.type = MAT_LDOUBLE;
result.cols = matrix.cols;
result.rows = matrix.rows;
result.num_vals = matrix.num_vals;
if(!(result.ia = (int*)malloc((result.rows + 1) * sizeof(int)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
if(!(result.ja = (int*)malloc(matrix.num_vals * sizeof(int)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
if(result.type == MAT_INT){
if(!(result.nnz.i = (int*)malloc(matrix.num_vals * sizeof(int)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
}
else{
if(!(result.nnz.f = (long double*)malloc(matrix.num_vals * sizeof(long double)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
}
for(int i = 0; i < matrix.num_vals; ++i){
if(matrix.type == MAT_INT){
if(scalar->type == MAT_INT)
result.nnz.i[i] = matrix.nnz.i[i] * scalar->val.i;
else{
result.nnz.f[i] = (long double)matrix.nnz.i[i] * scalar->val.f;
}
}
else{
if(scalar->type == MAT_INT)
result.nnz.f[i] = matrix.nnz.f[i] * (long double)scalar->val.i;
else
result.nnz.f[i] = matrix.nnz.f[i] * scalar->val.f;
}
result.ja[i] = matrix.ja[i];
}
for(int i = 0; i < matrix.rows + 1; ++i){
result.ia[i] = matrix.ia[i];
}
get_utc_time(&end);
rv = csr_to_mat_nothreading(result);
free_csr(result);
rv.t_process = time_delta(end, start);
return rv;
}
mat_rv scalar_multiply_csr(csr matrix, SCALAR *scalar, int thread_count)
{
mat_rv rv;
struct timespec start, end;
get_utc_time(&start);
csr result;
result.type = matrix.type;
if(matrix.type == MAT_INT && scalar->type == MAT_LDOUBLE)
result.type = MAT_LDOUBLE;
result.cols = matrix.cols;
result.rows = matrix.rows;
result.num_vals = matrix.num_vals;
if(!(result.ia = (int*)malloc((result.rows + 1) * sizeof(int)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
if(!(result.ja = (int*)malloc(matrix.num_vals * sizeof(int)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
if(result.type == MAT_INT){
if(!(result.nnz.i = (int*)malloc(matrix.num_vals * sizeof(int)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
}
else{
if(!(result.nnz.f = (long double*)malloc(matrix.num_vals * sizeof(long double)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
}
int i;
#pragma omp parallel num_threads(thread_count) shared(matrix, result, scalar)
{
MAT_TYPE local_mat_type = matrix.type;
//store scalar on the local stack
SCALAR local_scalar;
local_scalar.type = scalar->type;
local_scalar.val = scalar->val;
#pragma omp for private(i)
for(i = 0; i < matrix.num_vals; ++i){
if(local_mat_type == MAT_INT){
if(local_scalar.type == MAT_INT)
result.nnz.i[i] = matrix.nnz.i[i] * local_scalar.val.i;
else{
result.nnz.f[i] = (long double)matrix.nnz.i[i] * local_scalar.val.f;
}
}
else{
if(local_scalar.type == MAT_INT)
result.nnz.f[i] = matrix.nnz.f[i] * (long double)local_scalar.val.i;
else
result.nnz.f[i] = matrix.nnz.f[i] * local_scalar.val.f;
}
result.ja[i] = matrix.ja[i];
}
#pragma omp for private(i)
for(i = 0; i < matrix.rows + 1; ++i)
result.ia[i] = matrix.ia[i];
}
get_utc_time(&end);
rv = csr_to_mat(result, thread_count);
free_csr(result);
rv.t_process = time_delta(end, start);
return rv;
}
mat_rv scalar_multiply_csc_nothreading(csc matrix, SCALAR *scalar)
{
mat_rv rv;
struct timespec start, end;
get_utc_time(&start);
csr result;
result.type = matrix.type;
if(matrix.type == MAT_INT && scalar->type == MAT_LDOUBLE)
result.type = MAT_LDOUBLE;
result.cols = matrix.cols;
result.rows = matrix.rows;
result.num_vals = matrix.num_vals;
if(!(result.ia = (int*)malloc((result.cols + 1) * sizeof(int)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
if(!(result.ja = (int*)malloc(matrix.num_vals * sizeof(int)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
if(result.type == MAT_INT){
if(!(result.nnz.i = (int*)malloc(matrix.num_vals * sizeof(int)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
}
else{
if(!(result.nnz.f = (long double*)malloc(matrix.num_vals * sizeof(long double)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
}
for(int i = 0; i < matrix.num_vals; ++i){
if(matrix.type == MAT_INT){
if(scalar->type == MAT_INT)
result.nnz.i[i] = matrix.nnz.i[i] * scalar->val.i;
else
result.nnz.f[i] = (long double)matrix.nnz.i[i] * scalar->val.f;
}
else{
if(scalar->type == MAT_INT)
result.nnz.f[i] = matrix.nnz.f[i] * (long double)scalar->val.i;
else
result.nnz.f[i] = matrix.nnz.f[i] * scalar->val.f;
}
result.ja[i] = matrix.ja[i];
}
memcpy(result.ia, matrix.ia, (result.cols + 1)* sizeof(int));
get_utc_time(&end);
rv = csc_to_mat_nothreading(result);
free_csc(result);
rv.t_process = time_delta(end, start);
return rv;
}
mat_rv scalar_multiply_csc(csc matrix, SCALAR *scalar, int thread_count)
{
mat_rv rv;
struct timespec start, end;
get_utc_time(&start);
csr result;
result.type = matrix.type;
if(matrix.type == MAT_INT && scalar->type == MAT_LDOUBLE)
result.type = MAT_LDOUBLE;
result.cols = matrix.cols;
result.rows = matrix.rows;
result.num_vals = matrix.num_vals;
if(!(result.ia = (int*)malloc((result.cols + 1) * sizeof(int)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
if(!(result.ja = (int*)malloc(matrix.num_vals * sizeof(int)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
if(result.type == MAT_INT){
if(!(result.nnz.i = (int*)malloc(matrix.num_vals * sizeof(int)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
}
else{
if(!(result.nnz.f = (long double*)malloc(matrix.num_vals * sizeof(long double)))){
fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n");
exit(EXIT_FAILURE);
}
}
int i;
#pragma omp parallel num_threads(thread_count) shared(matrix, result, scalar)
{
MAT_TYPE local_mat_type = matrix.type;
//store scalar on the local stack
SCALAR local_scalar;
local_scalar.type = scalar->type;
local_scalar.val = scalar->val;
#pragma omp for private(i)
for(i = 0; i < matrix.num_vals; ++i){
if(local_mat_type == MAT_INT){
if(local_scalar.type == MAT_INT)
result.nnz.i[i] = matrix.nnz.i[i] * local_scalar.val.i;
else
result.nnz.f[i] = (long double)matrix.nnz.i[i] * local_scalar.val.f;
}
else{
if(local_scalar.type == MAT_INT)
result.nnz.f[i] = matrix.nnz.f[i] * (long double)local_scalar.val.i;
else
result.nnz.f[i] = matrix.nnz.f[i] * local_scalar.val.f;
}
result.ja[i] = matrix.ja[i];
}
#pragma omp for private(i)
for(i = 0; i < matrix.cols + 1; ++i)
result.ia[i] = matrix.ia[i];
}
get_utc_time(&end);
rv = csc_to_mat(result, thread_count);
free_csc(result);
rv.t_process = time_delta(end, start);
return rv;
}
//will send address of scalar to functions to avoid
//incompatibility with versions of GCC earlier than 4.4
mat_rv scalar_multiply(OPERATIONARGS *args)
{
mat_rv rv;
//default = COO
if (args->format == FORM_DEFAULT)
args->format = COO;
switch(args->format){
case COO:{
struct timespec construct;
struct timespec fileio_timer;
coo matrix = read_coo(args->file1, &construct, &fileio_timer);
if(args->nothreading)
rv = scalar_multiply_coo_nothreading(matrix, &(args->scalar));
else
rv = scalar_multiply_coo(matrix, &(args->scalar), args->num_threads);
rv.t_construct = time_sum(rv.t_construct, construct);
rv.t_fileio = fileio_timer;
free_coo(matrix);
return rv;
break;
}
case CSR:{
struct timespec construct;
struct timespec fileio_timer;
csr matrix = read_csr(args->file1, &construct, &fileio_timer);
if(args->nothreading)
rv = scalar_multiply_csr_nothreading(matrix, &(args->scalar));
else
rv = scalar_multiply_csr(matrix, &(args->scalar), args->num_threads);
rv.t_construct = time_sum(rv.t_construct, construct);
rv.t_fileio = fileio_timer;
free_csr(matrix);
return rv;
break;
}
case CSC:{
struct timespec construct;
struct timespec fileio_timer;
csc matrix = read_csc(args->file1, &construct, &fileio_timer);
if(args->nothreading)
rv = scalar_multiply_csc_nothreading(matrix, &(args->scalar));
else
rv = scalar_multiply_csc(matrix, &(args->scalar), args->num_threads);
rv.t_construct = time_sum(rv.t_construct, construct);
rv.t_fileio = fileio_timer;
free_csc(matrix);
return rv;
break;
}
default:
fprintf(stderr, "format not implemented\n");
exit(EXIT_FAILURE);
break;
}
//execution should never reach here
rv.error = ERR_NOT_SET;
return rv;
}
|
single.c | #include <omp.h>
#define N 10
int main (int argc, char * argv[]){
double a[N], b[N], c[N];
int i;
for(i=0; i<N; i++)
a[i] = 0;
for(i=0; i<N; i++)
b[i] = 0;
for(i=0; i<N; i++)
c[i] = 0;
#pragma omp parallel
#pragma omp for
for(int i = 0; i < N ; i ++)
a[i] = b[i] + c[i];
}
|
sht_func.c | /*
* Copyright (c) 2010-2015 Centre National de la Recherche Scientifique.
* written by Nathanael Schaeffer (CNRS, ISTerre, Grenoble, France).
*
* nathanael.schaeffer@ujf-grenoble.fr
*
* This software is governed by the CeCILL license under French law and
* abiding by the rules of distribution of free software. You can use,
* modify and/or redistribute the software under the terms of the CeCILL
* license as circulated by CEA, CNRS and INRIA at the following URL
* "http://www.cecill.info".
*
* The fact that you are presently reading this means that you have had
* knowledge of the CeCILL license and that you accept its terms.
*
*/
/** \internal \file sht_rot.c
* \brief Rotation of Spherical Harmonics.
*/
/** \addtogroup rotation Rotation of SH fields.
Rotation around axis other than Z should be considered of beta quality (they have been tested but may still contain bugs).
They also require \c mmax = \c lmax. They use an Algorithm inspired by the pseudospectral rotation described in
Gimbutas Z. and Greengard L. 2009 "A fast and stable method for rotating spherical harmonic expansions" <i>Journal of Computational Physics</i>.
doi:<a href="http://dx.doi.org/10.1016/j.jcp.2009.05.014">10.1016/j.jcp.2009.05.014</a>
These functions do only require a call to \ref shtns_create, but not to \ref shtns_set_grid.
*/
//@{
/// Rotate a SH representation Qlm around the z-axis by angle alpha (in radians),
/// which is the same as rotating the reference frame by angle -alpha.
/// Result is stored in Rlm (which can be the same array as Qlm).
void SH_Zrotate(shtns_cfg shtns, cplx *Qlm, double alpha, cplx *Rlm)
{
int im, l, lmax, mmax, mres;
lmax = shtns->lmax; mmax = shtns->mmax; mres = shtns->mres;
if (Rlm != Qlm) { // copy m=0 which does not change.
l=0; do { Rlm[l] = Qlm[l]; } while(++l <= lmax);
}
if (mmax > 0) {
im=1; do {
cplx eima = cos(im*mres*alpha) - I*sin(im*mres*alpha); // rotate reference frame by angle -alpha
for (l=im*mres; l<=lmax; ++l) Rlm[LiM(shtns, l, im)] = Qlm[LiM(shtns, l, im)] * eima;
} while(++im <= mmax);
}
}
//@}
/** \internal rotation kernel used by SH_Yrotate90(), SH_Xrotate90() and SH_rotate().
Algorithm based on the pseudospectral rotation[1] :
- rotate around Z by angle dphi0.
- synthetize for each l the spatial description for phi=0 and phi=pi on an equispaced latitudinal grid.
- Fourier ananlyze as data on the equator to recover the m in the 90 degrees rotated frame.
- rotate around new Z by angle dphi1.
[1] Gimbutas Z. and Greengard L. 2009 "A fast and stable method for rotating spherical harmonic expansions" Journal of Computational Physics. **/
static void SH_rotK90(shtns_cfg shtns, cplx *Qlm, cplx *Rlm, double dphi0, double dphi1)
{
fftw_plan fft;
cplx *q;
double *q0;
long int k, m, l;
int lmax, ntheta;
int nrembed, ncembed;
lmax = shtns->lmax;
ntheta = ((lmax+2)>>1)*2;
m = 2* sizeof(double)*(2*ntheta+2)*lmax;
q0 = VMALLOC(m); memset(q0, 0, m); // alloc & zero out.
// rotate around Z by dphi0
if (dphi0 != 0.0) {
SH_Zrotate(shtns, Qlm, dphi0, Rlm);
Qlm = Rlm;
} else {
Rlm[0] = Qlm[0]; // l=0 is rotation invariant.
}
#pragma omp parallel private(k,m,l) num_threads(shtns->nthreads)
{
double yl[lmax+1];
// compute q(l) on the meridian phi=0 and phi=pi. (rotate around X)
#pragma omp for schedule(static)
for (k=0; k<ntheta/2; ++k) {
double cost= cos(((0.5*M_PI)*(2*k+1))/ntheta);
double sint_1 = 1.0/sqrt((1.0-cost)*(1.0+cost));
m=0;
legendre_sphPlm_array(shtns, lmax, m, cost, yl+m);
double sgnt = -1.0;
for (l=1; l<=lmax; ++l) {
double qr = creal(Qlm[LiM(shtns, l, m)]) * yl[l];
q0[k*2*lmax +2*(l-1)] = qr;
q0[(ntheta-1-k)*2*lmax +2*(l-1)] = sgnt*qr;
q0[(ntheta+k)*2*lmax +2*(l-1)] = sgnt*qr;
q0[(2*ntheta-1-k)*2*lmax +2*(l-1)] = qr;
sgnt *= -1.0;
}
#if _GCC_VEC_ && __SSE2__
s2d sgnm = SIGN_MASK_HI;
s2d sgnflip = SIGN_MASK_2;
for (m=1; m<=lmax; ++m) {
legendre_sphPlm_array(shtns, lmax, m, cost, yl+m);
s2d sgnt = vdup(0.0);
s2d m_st = vset(2.0, -2*m*sint_1); // x2 for m>0
sgnm = _mm_xor_pd(sgnm, sgnflip); // (-1)^m
for (l=m; l<=lmax; ++l) {
v2d qc = ((v2d*)Qlm)[LiM(shtns, l, m)] * vdup(yl[l]) * m_st; // (q0, dq0)
((v2d*)q0)[k*lmax +(l-1)] += qc;
((v2d*)q0)[(ntheta-1-k)*lmax +(l-1)] += (v2d)_mm_xor_pd(sgnt, qc);
qc = _mm_xor_pd(sgnm, qc);
((v2d*)q0)[(ntheta+k)*lmax +(l-1)] += (v2d)_mm_xor_pd( sgnt, qc );
((v2d*)q0)[(2*ntheta-1-k)*lmax +(l-1)] += qc;
sgnt = _mm_xor_pd(sgnt, sgnflip); // (-1)^(l+m)
}
}
#else
double sgnm = 1.0;
for (m=1; m<=lmax; ++m) {
legendre_sphPlm_array(shtns, lmax, m, cost, yl+m);
double sgnt = 1.0;
sgnm *= -1.0;
for (l=m; l<=lmax; ++l) {
double qr = creal(Qlm[LiM(shtns, l, m)]) * yl[l];
double qi = cimag(Qlm[LiM(shtns, l, m)]) * m*yl[l]*sint_1;
qr += qr; qi += qi; // x2 for m>0
q0[k*2*lmax +2*(l-1)] += qr; // q0
q0[k*2*lmax +2*(l-1)+1] -= qi; // dq0
q0[(ntheta-1-k)*2*lmax +2*(l-1)] += sgnt*qr;
q0[(ntheta-1-k)*2*lmax +2*(l-1)+1] -= sgnt*qi;
q0[(ntheta+k)*2*lmax +2*(l-1)] += (sgnm*sgnt)*qr;
q0[(ntheta+k)*2*lmax +2*(l-1)+1] += (sgnm*sgnt)*qi;
q0[(2*ntheta-1-k)*2*lmax +2*(l-1)] += sgnm*qr;
q0[(2*ntheta-1-k)*2*lmax +2*(l-1)+1] += sgnm*qi;
sgnt *= -1.0;
}
}
#endif
}
}
// perform FFT
#ifdef OMP_FFTW
k = (lmax < 63) ? 1 : shtns->nthreads;
fftw_plan_with_nthreads(k);
#endif
q = (cplx*) q0;
ntheta*=2; nrembed = ntheta+2; ncembed = nrembed/2;
fft = fftw_plan_many_dft_r2c(1, &ntheta, 2*lmax, q0, &nrembed, 2*lmax, 1, q, &ncembed, 2*lmax, 1, FFTW_ESTIMATE);
fftw_execute_dft_r2c(fft, q0, q);
fftw_destroy_plan(fft);
double yl[lmax+1]; double dyl[lmax+1];
m=0;
//legendre_sphPlm_deriv_array(shtns, lmax, m, 0.0, 1.0, yl+m, dyl+m);
legendre_sphPlm_deriv_array_equ(shtns, lmax, m, yl+m, dyl+m);
for (l=1; l<lmax; l+=2) {
Rlm[LiM(shtns, l,m)] = -creal(q[m*2*lmax +2*(l-1)+1])/(dyl[l]*ntheta);
Rlm[LiM(shtns, l+1,m)] = creal(q[m*2*lmax +2*l])/(yl[l+1]*ntheta);
}
if (l==lmax) {
Rlm[LiM(shtns, l,m)] = -creal(q[m*2*lmax +2*(l-1)+1])/(dyl[l]*ntheta);
}
dphi1 += M_PI/ntheta; // shift rotation angle by angle of first synthesis latitude.
for (m=1; m<=lmax; ++m) {
//legendre_sphPlm_deriv_array(shtns, lmax, m, 0.0, 1.0, yl+m, dyl+m);
legendre_sphPlm_deriv_array_equ(shtns, lmax, m, yl+m, dyl+m);
cplx eimdp = (cos(m*dphi1) - I*sin(m*dphi1))/(ntheta);
for (l=m; l<lmax; l+=2) {
Rlm[LiM(shtns, l,m)] = eimdp*q[m*2*lmax +2*(l-1)]*(1./yl[l]);
Rlm[LiM(shtns, l+1,m)] = eimdp*q[m*2*lmax +2*l+1]*(-1./dyl[l+1]);
}
if (l==lmax) {
Rlm[LiM(shtns, l,m)] = eimdp*q[m*2*lmax +2*(l-1)]*(1./yl[l]);
}
}
VFREE(q0);
}
/// \addtogroup rotation
//@{
/// rotate Qlm by 90 degrees around X axis and store the result in Rlm.
/// shtns->mres MUST be 1, and lmax=mmax.
void SH_Xrotate90(shtns_cfg shtns, cplx *Qlm, cplx *Rlm)
{
int lmax= shtns->lmax;
if ((shtns->mres != 1) || (shtns->mmax < lmax)) shtns_runerr("truncature makes rotation not closed.");
if (lmax == 1) {
Rlm[0] = Qlm[0]; // l=0 is invariant.
int l=1; // rotation matrix for rotX(90), l=1 : m=[0, 1r, 1i]
double q0 = creal(Qlm[LiM(shtns, l, 0)]);
Rlm[LiM(shtns, l, 0)] = sqrt(2.0) * cimag(Qlm[LiM(shtns, l, 1)]); //[m=0] 0 0 sqrt(2)
Rlm[LiM(shtns, l ,1)] = creal(Qlm[LiM(shtns, l, 1)]) - I*(sqrt(0.5)*q0); //[m=1r] 0 1 0
return; //[m=1i] -sqrt(2)/2 0 0
}
SH_rotK90(shtns, Qlm, Rlm, 0.0, -M_PI/2);
}
/// rotate Qlm by 90 degrees around Y axis and store the result in Rlm.
/// shtns->mres MUST be 1, and lmax=mmax.
void SH_Yrotate90(shtns_cfg shtns, cplx *Qlm, cplx *Rlm)
{
int lmax= shtns->lmax;
if ((shtns->mres != 1) || (shtns->mmax < lmax)) shtns_runerr("truncature makes rotation not closed.");
if (lmax == 1) {
Rlm[0] = Qlm[0]; // l=0 is invariant.
int l=1; // rotation matrix for rotY(90), l=1 : m=[0, 1r, 1i]
double q0 = creal(Qlm[LiM(shtns, l, 0)]); //[m=0] 0 0 sqrt(2)
Rlm[LiM(shtns, l, 0)] = sqrt(2.0) * creal(Qlm[LiM(shtns, l, 1)]); //[m=1r] -sqrt(2)/2 0 0
Rlm[LiM(shtns, l ,1)] = I*cimag(Qlm[LiM(shtns, l, 1)]) - sqrt(0.5) * q0; //[m=1i] 0 0 1
return;
}
SH_rotK90(shtns, Qlm, Rlm, -M_PI/2, 0.0);
}
/// rotate Qlm around Y axis by arbitrary angle, using composition of rotations. Store the result in Rlm.
void SH_Yrotate(shtns_cfg shtns, cplx *Qlm, double alpha, cplx *Rlm)
{
if ((shtns->mres != 1) || (shtns->mmax < shtns->lmax)) shtns_runerr("truncature makes rotation not closed.");
SH_rotK90(shtns, Qlm, Rlm, 0.0, M_PI/2 + alpha); // Zrotate(pi/2) + Yrotate90 + Zrotate(pi+alpha)
SH_rotK90(shtns, Rlm, Rlm, 0.0, M_PI/2); // Yrotate90 + Zrotate(pi/2)
}
//@}
/** \addtogroup operators Special operators
* Apply special operators in spectral space: multiplication by cos(theta), sin(theta).d/dtheta.
*/
//@{
/// fill mx with the coefficients for multiplication by cos(theta)
/// \param mx : an array of 2*NLM double that will be filled with the matrix coefficients.
/// xq[lm] = mx[2*lm] * q[lm-1] + mx[2*lm+1] * q[lm+1];
void mul_ct_matrix(shtns_cfg shtns, double* mx)
{
long int im,l,lm;
double a_1;
if (SHT_NORM == sht_schmidt) {
lm=0;
for (im=0; im<=MMAX; im++) {
double* al = alm_im(shtns,im);
long int m=im*MRES;
mx[2*lm] = 0.0;
a_1 = 1.0 / al[1];
l=m;
while(++l < LMAX) {
al+=2;
mx[2*lm+2] = a_1;
a_1 = 1.0 / al[1];
mx[2*lm+1] = -a_1*al[0]; // = -al[2*(lm+1)] / al[2*(lm+1)+1];
lm++;
}
if (l == LMAX) { // the last one needs to be computed.
mx[2*lm+2] = a_1;
mx[2*lm+1] = sqrt((l+m)*(l-m))/(2*l+1);
lm++;
}
mx[2*lm +1] = 0.0;
lm++;
}
} else {
lm=0;
for (im=0; im<=MMAX; im++) {
double* al = alm_im(shtns, im);
l=im*MRES;
mx[2*lm] = 0.0;
while(++l <= LMAX) {
a_1 = 1.0 / al[1];
mx[2*lm+1] = a_1; // specific to orthonormal.
mx[2*lm+2] = a_1;
lm++; al+=2;
}
mx[2*lm +1] = 0.0;
lm++;
}
}
}
/// fill mx with the coefficients of operator sin(theta).d/dtheta
/// \param mx : an array of 2*NLM double that will be filled with the matrix coefficients.
/// stdq[lm] = mx[2*lm] * q[lm-1] + mx[2*lm+1] * q[lm+1];
void st_dt_matrix(shtns_cfg shtns, double* mx)
{
mul_ct_matrix(shtns, mx);
for (int lm=0; lm<NLM; lm++) {
mx[2*lm] *= shtns->li[lm] - 1;
mx[2*lm+1] *= -(shtns->li[lm] + 2);
}
}
/// Multiplication of Qlm by a matrix involving l+1 and l-1 only.
/// The result is stored in Rlm, which MUST be different from Qlm.
/// mx is an array of 2*NLM values as returned by \ref mul_ct_matrix or \ref st_dt_matrix
/// compute: Rlm[lm] = mx[2*lm] * Qlm[lm-1] + mx[2*lm+1] * Qlm[lm+1];
void SH_mul_mx(shtns_cfg shtns, double* mx, cplx *Qlm, cplx *Rlm)
{
long int nlmlim, lm;
v2d* vq = (v2d*) Qlm;
v2d* vr = (v2d*) Rlm;
nlmlim = NLM-1;
lm = 0;
s2d mxu = vdup(mx[1]);
vr[0] = mxu*vq[1];
for (lm=1; lm<nlmlim; lm++) {
s2d mxl = vdup(mx[2*lm]); s2d mxu = vdup(mx[2*lm+1]);
vr[lm] = mxl*vq[lm-1] + mxu*vq[lm+1];
}
lm = nlmlim;
s2d mxl = vdup(mx[2*lm]);
vr[lm] = mxl*vq[lm-1];
}
//@}
// truncation at LMAX and MMAX
#define LTR LMAX
#define MTR MMAX
/** \addtogroup local Local and partial evaluation of SH fields.
* These do only require a call to \ref shtns_create, but not to \ref shtns_set_grid.
* These functions are not optimized and can be relatively slow, but they provide good
* reference implemenation for the transforms.
*/
//@{
/// Evaluate scalar SH representation \b Qlm at physical point defined by \b cost = cos(theta) and \b phi
double SH_to_point(shtns_cfg shtns, cplx *Qlm, double cost, double phi)
{
double yl[LMAX+1];
double vr0, vr1;
long int l,m,im;
vr0 = 0.0; vr1 = 0.0;
m=0; im=0;
legendre_sphPlm_array(shtns, LTR, im, cost, &yl[m]);
for (l=m; l<LTR; l+=2) {
vr0 += yl[l] * creal( Qlm[l] );
vr1 += yl[l+1] * creal( Qlm[l+1] );
}
if (l==LTR) {
vr0 += yl[l] * creal( Qlm[l] );
}
vr0 += vr1;
if (MTR>0) {
cplx eip, eimp;
eip = cos(phi*MRES) + I*sin(phi*MRES); eimp = 2.0;
im = 1; do {
m = im*MRES;
legendre_sphPlm_array(shtns, LTR, im, cost, &yl[m]);
v2d* Ql = (v2d*) &Qlm[LiM(shtns, 0,im)]; // virtual pointer for l=0 and im
v2d vrm0 = vdup(0.0); v2d vrm1 = vdup(0.0);
for (l=m; l<LTR; l+=2) {
vrm0 += vdup(yl[l]) * Ql[l];
vrm1 += vdup(yl[l+1]) * Ql[l+1];
}
// eimp = 2.*(cos(m*phi) + I*sin(m*phi));
eimp *= eip; // not so accurate, but it should be enough for rendering uses.
vrm0 += vrm1;
if (l==LTR) {
vrm0 += vdup(yl[l]) * Ql[l];
}
vr0 += vcplx_real(vrm0)*creal(eimp) - vcplx_imag(vrm0)*cimag(eimp);
} while(++im <= MTR);
}
return vr0;
}
void SH_to_grad_point(shtns_cfg shtns, cplx *DrSlm, cplx *Slm, double cost, double phi,
double *gr, double *gt, double *gp)
{
double yl[LMAX+1];
double dtyl[LMAX+1];
double vtt, vpp, vr0, vrm;
long int l,m,im;
const double sint = sqrt((1.-cost)*(1.+cost));
vtt = 0.; vpp = 0.; vr0 = 0.; vrm = 0.;
m=0; im=0;
legendre_sphPlm_deriv_array(shtns, LTR, im, cost, sint, &yl[m], &dtyl[m]);
for (l=m; l<=LTR; ++l) {
vr0 += yl[l] * creal( DrSlm[l] );
vtt += dtyl[l] * creal( Slm[l] );
}
if (MTR>0) {
cplx eip, eimp, imeimp;
eip = cos(phi*MRES) + I*sin(phi*MRES); eimp = 2.0;
im=1; do {
m = im*MRES;
legendre_sphPlm_deriv_array(shtns, LTR, im, cost, sint, &yl[m], &dtyl[m]);
// eimp = 2.*(cos(m*phi) + I*sin(m*phi));
eimp *= eip; // not so accurate, but it should be enough for rendering uses.
imeimp = eimp*m*I;
l = LiM(shtns, 0,im);
v2d* Ql = (v2d*) &DrSlm[l]; v2d* Sl = (v2d*) &Slm[l];
v2d qm = vdup(0.0);
v2d dsdt = vdup(0.0); v2d dsdp = vdup(0.0);
for (l=m; l<=LTR; ++l) {
qm += vdup(yl[l]) * Ql[l];
dsdt += vdup(dtyl[l]) * Sl[l];
dsdp += vdup(yl[l]) * Sl[l];
}
vrm += vcplx_real(qm)*creal(eimp) - vcplx_imag(qm)*cimag(eimp); // dS/dr
vtt += vcplx_real(dsdt)*creal(eimp) - vcplx_imag(dsdt)*cimag(eimp); // dS/dt
vpp += vcplx_real(dsdp)*creal(imeimp) - vcplx_imag(dsdp)*cimag(imeimp); // + I.m/sint *S
} while (++im <= MTR);
vr0 += vrm*sint;
}
*gr = vr0; // Gr = dS/dr
*gt = vtt; // Gt = dS/dt
*gp = vpp; // Gp = I.m/sint *S
}
/// Evaluate vector SH representation \b Qlm at physical point defined by \b cost = cos(theta) and \b phi
void SHqst_to_point(shtns_cfg shtns, cplx *Qlm, cplx *Slm, cplx *Tlm, double cost, double phi,
double *vr, double *vt, double *vp)
{
double yl[LMAX+1];
double dtyl[LMAX+1];
double vtt, vpp, vr0, vrm;
long int l,m,im;
const double sint = sqrt((1.-cost)*(1.+cost));
vtt = 0.; vpp = 0.; vr0 = 0.; vrm = 0.;
m=0; im=0;
legendre_sphPlm_deriv_array(shtns, LTR, im, cost, sint, &yl[m], &dtyl[m]);
for (l=m; l<=LTR; ++l) {
vr0 += yl[l] * creal( Qlm[l] );
vtt += dtyl[l] * creal( Slm[l] );
vpp -= dtyl[l] * creal( Tlm[l] );
}
if (MTR>0) {
cplx eip, eimp, imeimp;
eip = cos(phi*MRES) + I*sin(phi*MRES); eimp = 2.0;
im=1; do {
m = im*MRES;
legendre_sphPlm_deriv_array(shtns, LTR, im, cost, sint, &yl[m], &dtyl[m]);
// eimp = 2.*(cos(m*phi) + I*sin(m*phi));
eimp *= eip; // not so accurate, but it should be enough for rendering uses.
imeimp = eimp*m*I;
l = LiM(shtns, 0,im);
v2d* Ql = (v2d*) &Qlm[l]; v2d* Sl = (v2d*) &Slm[l]; v2d* Tl = (v2d*) &Tlm[l];
v2d qm = vdup(0.0);
v2d dsdt = vdup(0.0); v2d dtdt = vdup(0.0);
v2d dsdp = vdup(0.0); v2d dtdp = vdup(0.0);
for (l=m; l<=LTR; ++l) {
qm += vdup(yl[l]) * Ql[l];
dsdt += vdup(dtyl[l]) * Sl[l];
dtdt += vdup(dtyl[l]) * Tl[l];
dsdp += vdup(yl[l]) * Sl[l];
dtdp += vdup(yl[l]) * Tl[l];
}
vrm += vcplx_real(qm)*creal(eimp) - vcplx_imag(qm)*cimag(eimp);
vtt += (vcplx_real(dtdp)*creal(imeimp) - vcplx_imag(dtdp)*cimag(imeimp)) // + I.m/sint *T
+ (vcplx_real(dsdt)*creal(eimp) - vcplx_imag(dsdt)*cimag(eimp)); // + dS/dt
vpp += (vcplx_real(dsdp)*creal(imeimp) - vcplx_imag(dsdp)*cimag(imeimp)) // + I.m/sint *S
- (vcplx_real(dtdt)*creal(eimp) - vcplx_imag(dtdt)*cimag(eimp)); // - dT/dt
} while (++im <= MTR);
vr0 += vrm*sint;
}
*vr = vr0;
*vt = vtt; // Bt = I.m/sint *T + dS/dt
*vp = vpp; // Bp = I.m/sint *S - dT/dt
}
//@}
#undef LTR
#undef MTR
/*
SYNTHESIS AT A GIVEN LATITUDE
(does not require a previous call to shtns_set_grid)
*/
fftw_plan ifft_lat = NULL; ///< fftw plan for SHqst_to_lat
int nphi_lat = 0; ///< nphi of previous SHqst_to_lat
double* ylm_lat = NULL;
double* dylm_lat;
double ct_lat = 2.0;
double st_lat;
/// synthesis at a given latitude, on nphi equispaced longitude points.
/// vr, vt, and vp arrays must have nphi+2 doubles allocated (fftw requirement).
/// It does not require a previous call to shtns_set_grid, but it is NOT thread-safe.
/// \ingroup local
void SHqst_to_lat(shtns_cfg shtns, cplx *Qlm, cplx *Slm, cplx *Tlm, double cost,
double *vr, double *vt, double *vp, int nphi, int ltr, int mtr)
{
cplx vst, vtt, vsp, vtp, vrr;
cplx *vrc, *vtc, *vpc;
long int m, l, j;
if (ltr > LMAX) ltr=LMAX;
if (mtr > MMAX) mtr=MMAX;
if (mtr*MRES > ltr) mtr=ltr/MRES;
if (mtr*2*MRES >= nphi) mtr = (nphi-1)/(2*MRES);
vrc = (cplx *) vr;
vtc = (cplx *) vt;
vpc = (cplx *) vp;
if ((nphi != nphi_lat)||(ifft_lat == NULL)) {
if (ifft_lat != NULL) fftw_destroy_plan(ifft_lat);
#ifdef OMP_FFTW
fftw_plan_with_nthreads(1);
#endif
ifft_lat = fftw_plan_dft_c2r_1d(nphi, vrc, vr, FFTW_ESTIMATE);
nphi_lat = nphi;
}
if (ylm_lat == NULL) {
ylm_lat = (double *) malloc(sizeof(double)* NLM*2);
dylm_lat = ylm_lat + NLM;
}
if (cost != ct_lat) { // don't recompute if same latitude (ie equatorial disc rendering)
st_lat = sqrt((1.-cost)*(1.+cost)); // sin(theta)
for (m=0,j=0; m<=mtr; ++m) {
legendre_sphPlm_deriv_array(shtns, ltr, m, cost, st_lat, &ylm_lat[j], &dylm_lat[j]);
j += LMAX -m*MRES +1;
}
}
for (m = 0; m<nphi/2+1; ++m) { // init with zeros
vrc[m] = 0.0; vtc[m] = 0.0; vpc[m] = 0.0;
}
j=0;
m=0;
vrr=0; vtt=0; vst=0;
for(l=m; l<=ltr; ++l, ++j) {
vrr += ylm_lat[j] * creal(Qlm[j]);
vst += dylm_lat[j] * creal(Slm[j]);
vtt += dylm_lat[j] * creal(Tlm[j]);
}
j += (LMAX-ltr);
vrc[m] = vrr;
vtc[m] = vst; // Vt = dS/dt
vpc[m] = -vtt; // Vp = - dT/dt
for (m=MRES; m<=mtr*MRES; m+=MRES) {
vrr=0; vtt=0; vst=0; vsp=0; vtp=0;
for(l=m; l<=ltr; ++l, ++j) {
vrr += ylm_lat[j] * Qlm[j];
vst += dylm_lat[j] * Slm[j];
vtt += dylm_lat[j] * Tlm[j];
vsp += ylm_lat[j] * Slm[j];
vtp += ylm_lat[j] * Tlm[j];
}
j+=(LMAX-ltr);
vrc[m] = vrr*st_lat;
vtc[m] = I*m*vtp + vst; // Vt = I.m/sint *T + dS/dt
vpc[m] = I*m*vsp - vtt; // Vp = I.m/sint *S - dT/dt
}
fftw_execute_dft_c2r(ifft_lat,vrc,vr);
fftw_execute_dft_c2r(ifft_lat,vtc,vt);
fftw_execute_dft_c2r(ifft_lat,vpc,vp);
// free(ylm_lat);
}
/// synthesis at a given latitude, on nphi equispaced longitude points.
/// vr arrays must have nphi+2 doubles allocated (fftw requirement).
/// It does not require a previous call to shtns_set_grid, but it is NOT thread-safe.
/// \ingroup local
void SH_to_lat(shtns_cfg shtns, cplx *Qlm, double cost,
double *vr, int nphi, int ltr, int mtr)
{
cplx vrr;
cplx *vrc;
long int m, l, j;
if (ltr > LMAX) ltr=LMAX;
if (mtr > MMAX) mtr=MMAX;
if (mtr*MRES > ltr) mtr=ltr/MRES;
if (mtr*2*MRES >= nphi) mtr = (nphi-1)/(2*MRES);
vrc = (cplx *) vr;
if ((nphi != nphi_lat)||(ifft_lat == NULL)) {
if (ifft_lat != NULL) fftw_destroy_plan(ifft_lat);
#ifdef OMP_FFTW
fftw_plan_with_nthreads(1);
#endif
ifft_lat = fftw_plan_dft_c2r_1d(nphi, vrc, vr, FFTW_ESTIMATE);
nphi_lat = nphi;
}
if (ylm_lat == NULL) {
ylm_lat = (double *) malloc(sizeof(double)* NLM*2);
dylm_lat = ylm_lat + NLM;
}
if (cost != ct_lat) { // don't recompute if same latitude (ie equatorial disc rendering)
st_lat = sqrt((1.-cost)*(1.+cost)); // sin(theta)
for (m=0,j=0; m<=mtr; ++m) {
legendre_sphPlm_deriv_array(shtns, ltr, m, cost, st_lat, &ylm_lat[j], &dylm_lat[j]);
j += LMAX -m*MRES +1;
}
}
for (m = 0; m<nphi/2+1; ++m) { // init with zeros
vrc[m] = 0.0;
}
j=0;
m=0;
vrr=0;
for(l=m; l<=ltr; ++l, ++j) {
vrr += ylm_lat[j] * creal(Qlm[j]);
}
j += (LMAX-ltr);
vrc[m] = vrr;
for (m=MRES; m<=mtr*MRES; m+=MRES) {
vrr=0;
for(l=m; l<=ltr; ++l, ++j) {
vrr += ylm_lat[j] * Qlm[j];
}
j+=(LMAX-ltr);
vrc[m] = vrr*st_lat;
}
fftw_execute_dft_c2r(ifft_lat,vrc,vr);
// free(ylm_lat);
}
/// complex scalar transform.
/// in: complex spatial field.
/// out: alm[l*(l+1)+m] is the SH coefficients of order l and degree m (with -l <= m <= l)
/// for a total of (LMAX+1)^2 coefficients.
void spat_cplx_to_SH(shtns_cfg shtns, cplx *z, cplx *alm)
{
long int nspat = shtns->nspat;
double *re, *im;
cplx *rlm, *ilm;
if (MMAX != LMAX) shtns_runerr("complex SH requires lmax=mmax and mres=1.");
// alloc temporary fields
re = (double*) VMALLOC( 2*(nspat + NLM*2)*sizeof(double) );
im = re + nspat;
rlm = (cplx*) (re + 2*nspat);
ilm = rlm + NLM;
// split z into real and imag parts.
for (int k=0; k<nspat; k++) {
re[k] = creal(z[k]); im[k] = cimag(z[k]);
}
// perform two real transforms:
spat_to_SH(shtns, re, rlm);
spat_to_SH(shtns, im, ilm);
// combine into complex coefficients
int ll = 0;
int lm = 0;
for (int l=0; l<=LMAX; l++) {
ll += 2*l; // ll = l*(l+1)
alm[ll] = creal(rlm[lm]) + I*creal(ilm[lm]); // m=0
lm++;
}
for (int m=1; m<=MMAX; m++) {
ll = (m-1)*m;
for (int l=m; l<=LMAX; l++) {
ll += 2*l; // ll = l*(l+1)
cplx rr = rlm[lm];
cplx ii = ilm[lm];
alm[ll+m] = rr + I*ii; // m>0
rr = conj(rr) + I*conj(ii); // m<0, m even
if (m&1) rr = -rr; // m<0, m odd
alm[ll-m] = rr;
lm++;
}
}
VFREE(re);
}
/// complex scalar transform.
/// in: alm[l*(l+1)+m] is the SH coefficients of order l and degree m (with -l <= m <= l)
/// for a total of (LMAX+1)^2 coefficients.
/// out: complex spatial field.
void SH_to_spat_cplx(shtns_cfg shtns, cplx *alm, cplx *z)
{
long int nspat = shtns->nspat;
double *re, *im;
cplx *rlm, *ilm;
if (MMAX != LMAX) shtns_runerr("complex SH requires lmax=mmax and mres=1.");
// alloc temporary fields
re = (double*) VMALLOC( 2*(nspat + NLM*2)*sizeof(double) );
im = re + nspat;
rlm = (cplx*) (re + 2*nspat);
ilm = rlm + NLM;
// extract complex coefficients corresponding to real and imag
int ll = 0;
int lm = 0;
for (int l=0; l<=LMAX; l++) {
ll += 2*l; // ll = l*(l+1)
rlm[lm] = creal(alm[ll]); // m=0
ilm[lm] = cimag(alm[ll]);
lm++;
}
double half_parity = 0.5;
for (int m=1; m<=MMAX; m++) {
ll = (m-1)*m;
half_parity = -half_parity; // (-1)^m * 0.5
for (int l=m; l<=LMAX; l++) {
ll += 2*l; // ll = l*(l+1)
cplx b = alm[ll-m] * half_parity; // (-1)^m for m negative.
cplx a = alm[ll+m] * 0.5;
rlm[lm] = (conj(b) + a); // real part
ilm[lm] = (conj(b) - a)*I; // imag part
lm++;
}
}
// perform two real transforms:
SH_to_spat(shtns, rlm, re);
SH_to_spat(shtns, ilm, im);
// combine into z
for (int k=0; k<nspat; k++)
z[k] = re[k] + I*im[k];
VFREE(re);
}
/*
void SH_to_spat_grad(shtns_cfg shtns, cplx *alm, double *gt, double *gp)
{
double *mx;
cplx *blm, *clm;
blm = (cplx*) VMALLOC( 3*NLM*sizeof(cplx) );
clm = blm + NLM;
mx = (double*)(clm + NLM);
st_dt_matrix(shtns, mx);
SH_mul_mx(shtns, mx, alm, blm);
int lm=0;
for (int im=0; im<=MMAX; im++) {
int m = im*MRES;
for (int l=m; l<=LMAX; l++) {
clm[lm] = alm[lm] * I*m;
lm++;
}
}
SH_to_spat(shtns, blm, gt);
SH_to_spat(shtns, clm, gp);
for (int ip=0; ip<NPHI; ip++) {
for (int it=0; it<NLAT; it++) {
gt[ip*NLAT+it] /= shtns->st[it];
gp[ip*NLAT+it] /= shtns->st[it];
}
}
VFREE(blm);
}
*/
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
parallel_utilities.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Denis Demidov
// Philipp Bucher
//
#if !defined(KRATOS_PARALLEL_UTILITIES_H_INCLUDED)
#define KRATOS_PARALLEL_UTILITIES_H_INCLUDED
// System includes
#include <iostream>
#include <array>
#include <vector>
#include <tuple>
#include <cmath>
#include <limits>
#include <future>
#include <thread>
// External includes
#ifdef KRATOS_SMP_OPENMP
#include <omp.h>
#endif
// Project includes
#include "includes/define.h"
#include "includes/global_variables.h"
#include "includes/lock_object.h"
#include "utilities/reduction_utilities.h"
namespace Kratos
{
///@addtogroup KratosCore
/// Shared memory parallelism related helper class
/** Provides access to functionalities for shared memory parallelism
* such as the number of threads in usa.
*/
class KRATOS_API(KRATOS_CORE) ParallelUtilities
{
public:
///@name Life Cycle
///@{
///@}
///@name Operations
///@{
/** @brief Returns the current number of threads
* @return number of threads
*/
static int GetNumThreads();
/** @brief Sets the current number of threads
* @param NumThreads - the number of threads to be used
*/
static void SetNumThreads(const int NumThreads);
/** @brief Returns the number of processors available to this device
* This can include the multiple threads per processing unit
* @return number of processors
*/
static int GetNumProcs();
///@}
/** @brief Returns the global lock
* Global lock that can be used for critical sections
* @return global lock
*/
static LockObject& GetGlobalLock();
///@}
private:
///@name Static Member Variables
///@{
static LockObject* mspGlobalLock;
static int* mspNumThreads;
///@}
///@name Private Operations
///@{
/// Default constructor.
ParallelUtilities() = delete;
/** @brief Initializes the number of threads to be used.
* @return number of threads
*/
static int InitializeNumberOfThreads();
///@}
///@name Private Access
///@{
static int& GetNumberOfThreads();
///@}
}; // Class ParallelUtilities
//***********************************************************************************
//***********************************************************************************
//***********************************************************************************
/** @param TContainerType - the type of the container used in the loop (must provide random access iterators)
* @param TIteratorType - type of iterator (by default as provided by the TContainerType)
* @param TMaxThreads - maximum number of threads allowed in the partitioning.
* must be known at compile time to avoid heap allocations in the partitioning
*/
template<
class TContainerType,
class TIteratorType=decltype(std::declval<TContainerType>().begin()),
int TMaxThreads=Globals::MaxAllowedThreads
>
class BlockPartition
{
public:
/** @param it_begin - iterator pointing at the beginning of the container
* @param it_end - iterator pointing to the end of the container
* @param Nchunks - number of threads to be used in the loop (must be lower than TMaxThreads)
*/
BlockPartition(TIteratorType it_begin,
TIteratorType it_end,
int Nchunks = ParallelUtilities::GetNumThreads())
{
KRATOS_ERROR_IF(Nchunks < 1) << "Number of chunks must be > 0 (and not " << Nchunks << ")" << std::endl;
const std::ptrdiff_t size_container = it_end-it_begin;
if (size_container == 0) {
mNchunks = Nchunks;
} else {
// in case the container is smaller than the number of chunks
mNchunks = std::min(static_cast<int>(size_container), Nchunks);
}
const std::ptrdiff_t block_partition_size = size_container / mNchunks;
mBlockPartition[0] = it_begin;
mBlockPartition[mNchunks] = it_end;
for (int i=1; i<mNchunks; i++) {
mBlockPartition[i] = mBlockPartition[i-1] + block_partition_size;
}
}
/** @param rData - the continer to be iterated upon
* @param Nchunks - number of threads to be used in the loop (must be lower than TMaxThreads)
*/
template <class TData>
BlockPartition(TData &&rData, int Nchunks = ParallelUtilities::GetNumThreads())
: BlockPartition(rData.begin(), rData.end(), Nchunks)
{}
virtual ~BlockPartition() = default;
/** @brief simple iteration loop. f called on every entry in rData
* @param f - must be a unary function accepting as input TContainerType::value_type&
*/
template <class TUnaryFunction>
inline void for_each(TUnaryFunction&& f)
{
#pragma omp parallel for
for (int i=0; i<mNchunks; ++i) {
for (auto it = mBlockPartition[i]; it != mBlockPartition[i+1]; ++it) {
f(*it); //note that we pass the value to the function, not the iterator
}
}
}
/** @brief loop allowing reductions. f called on every entry in rData
* the function f needs to return the values to be used by the reducer
* @param TReducer template parameter specifying the reduction operation to be done
* @param f - must be a unary function accepting as input TContainerType::value_type&
*/
template <class TReducer, class TUnaryFunction>
inline typename TReducer::return_type for_each(TUnaryFunction &&f)
{
TReducer global_reducer;
#pragma omp parallel for
for (int i=0; i<mNchunks; ++i) {
TReducer local_reducer;
for (auto it = mBlockPartition[i]; it != mBlockPartition[i+1]; ++it) {
local_reducer.LocalReduce(f(*it));
}
global_reducer.ThreadSafeReduce(local_reducer);
}
return global_reducer.GetValue();
}
/** @brief loop with thread local storage (TLS). f called on every entry in rData
* @param TThreadLocalStorage template parameter specifying the thread local storage
* @param f - must be a function accepting as input TContainerType::value_type& and the thread local storage
*/
template <class TThreadLocalStorage, class TFunction>
inline void for_each(const TThreadLocalStorage& rThreadLocalStoragePrototype, TFunction &&f)
{
static_assert(std::is_copy_constructible<TThreadLocalStorage>::value, "TThreadLocalStorage must be copy constructible!");
#pragma omp parallel
{
// copy the prototype to create the thread local storage
TThreadLocalStorage thread_local_storage(rThreadLocalStoragePrototype);
#pragma omp for
for(int i=0; i<mNchunks; ++i){
for (auto it = mBlockPartition[i]; it != mBlockPartition[i+1]; ++it){
f(*it, thread_local_storage); // note that we pass the value to the function, not the iterator
}
}
}
}
/** @brief loop with thread local storage (TLS) allowing reductions. f called on every entry in rData
* the function f needs to return the values to be used by the reducer
* @param TReducer template parameter specifying the reduction operation to be done
* @param TThreadLocalStorage template parameter specifying the thread local storage
* @param f - must be a function accepting as input TContainerType::value_type& and the thread local storage
*/
template <class TReducer, class TThreadLocalStorage, class TFunction>
inline typename TReducer::return_type for_each(const TThreadLocalStorage& rThreadLocalStoragePrototype, TFunction &&f)
{
static_assert(std::is_copy_constructible<TThreadLocalStorage>::value, "TThreadLocalStorage must be copy constructible!");
TReducer global_reducer;
#pragma omp parallel
{
// copy the prototype to create the thread local storage
TThreadLocalStorage thread_local_storage(rThreadLocalStoragePrototype);
#pragma omp for
for (int i=0; i<mNchunks; ++i) {
TReducer local_reducer;
for (auto it = mBlockPartition[i]; it != mBlockPartition[i+1]; ++it) {
local_reducer.LocalReduce(f(*it, thread_local_storage));
}
global_reducer.ThreadSafeReduce(local_reducer);
}
}
return global_reducer.GetValue();
}
private:
int mNchunks;
std::array<TIteratorType, TMaxThreads> mBlockPartition;
};
/** @brief simplified version of the basic loop (without reduction) to enable template type deduction
* @param v - containers to be looped upon
* @param func - must be a unary function accepting as input TContainerType::value_type&
*/
template <class TContainerType, class TFunctionType>
void block_for_each(TContainerType &&v, TFunctionType &&func)
{
BlockPartition<TContainerType>(v.begin(), v.end()).for_each(std::forward<TFunctionType>(func));
}
/** @brief simplified version of the basic loop with reduction to enable template type deduction
* @param v - containers to be looped upon
* @param func - must be a unary function accepting as input TContainerType::value_type&
*/
template <class TReducer, class TContainerType, class TFunctionType>
typename TReducer::return_type block_for_each(TContainerType &&v, TFunctionType &&func)
{
return BlockPartition<TContainerType>(v.begin(), v.end()).template for_each<TReducer>(std::forward<TFunctionType>(func));
}
/** @brief simplified version of the basic loop with thread local storage (TLS) to enable template type deduction
* @param v - containers to be looped upon
* @param tls - thread local storage
* @param func - must be a function accepting as input TContainerType::value_type& and the thread local storage
*/
template <class TContainerType, class TThreadLocalStorage, class TFunctionType>
void block_for_each(TContainerType &&v, const TThreadLocalStorage& tls, TFunctionType &&func)
{
BlockPartition<TContainerType>(v.begin(), v.end()).for_each(tls, std::forward<TFunctionType>(func));
}
/** @brief simplified version of the basic loop with reduction and thread local storage (TLS) to enable template type deduction
* @param v - containers to be looped upon
* @param tls - thread local storage
* @param func - must be a function accepting as input TContainerType::value_type& and the thread local storage
*/
template <class TReducer, class TContainerType, class TThreadLocalStorage, class TFunctionType>
typename TReducer::return_type block_for_each(TContainerType &&v, const TThreadLocalStorage& tls, TFunctionType &&func)
{
return BlockPartition<TContainerType>(v.begin(), v.end()).template for_each<TReducer>(tls, std::forward<TFunctionType>(func));
}
//***********************************************************************************
//***********************************************************************************
//***********************************************************************************
/** @brief This class is useful for index iteration over containers
* @param TIndexType type of index to be used in the loop
* @param TMaxThreads - maximum number of threads allowed in the partitioning.
* must be known at compile time to avoid heap allocations in the partitioning
*/
template<class TIndexType=std::size_t, int TMaxThreads=Globals::MaxAllowedThreads>
class IndexPartition
{
public:
/** @brief constructor using the size of the partition to be used
* @param Size - the size of the partition
* @param Nchunks - number of threads to be used in the loop (must be lower than TMaxThreads)
*/
IndexPartition(TIndexType Size,
int Nchunks = ParallelUtilities::GetNumThreads())
{
KRATOS_ERROR_IF(Nchunks < 1) << "Number of chunks must be > 0 (and not " << Nchunks << ")" << std::endl;
if (Size == 0) {
mNchunks = Nchunks;
} else {
// in case the container is smaller than the number of chunks
mNchunks = std::min(static_cast<int>(Size), Nchunks);
}
const int block_partition_size = Size / mNchunks;
mBlockPartition[0] = 0;
mBlockPartition[mNchunks] = Size;
for (int i=1; i<mNchunks; i++) {
mBlockPartition[i] = mBlockPartition[i-1] + block_partition_size;
}
}
virtual ~IndexPartition() = default;
//NOT COMMENTING IN DOXYGEN - THIS SHOULD BE SORT OF HIDDEN UNTIL GIVEN PRIME TIME
//pure c++11 version (can handle exceptions)
template <class TUnaryFunction>
inline void for_pure_c11(TUnaryFunction &&f)
{
std::vector< std::future<void> > runners(mNchunks);
const auto& partition = mBlockPartition;
for (int i=0; i<mNchunks; ++i) {
runners[i] = std::async(std::launch::async, [&partition, i, &f]()
{
for (auto k = partition[i]; k < partition[i+1]; ++k) {
f(k);
}
});
}
//here we impose a syncronization and we check the exceptions
for(int i=0; i<mNchunks; ++i) {
try {
runners[i].get();
}
catch(Exception& e) {
KRATOS_ERROR << std::endl << "THREAD number: " << i << " caught exception " << e.what() << std::endl;
} catch(std::exception& e) {
KRATOS_ERROR << std::endl << "THREAD number: " << i << " caught exception " << e.what() << std::endl;
} catch(...) {
KRATOS_ERROR << std::endl << "unknown error" << std::endl;
}
}
}
/** simple version of for_each (no reduction) to be called for each index in the partition
* @param f - must be a unary function accepting as input IndexType
*/
template <class TUnaryFunction>
inline void for_each(TUnaryFunction &&f)
{
#pragma omp parallel for
for (int i=0; i<mNchunks; ++i) {
for (auto k = mBlockPartition[i]; k < mBlockPartition[i+1]; ++k) {
f(k); //note that we pass a reference to the value, not the iterator
}
}
}
/** version with reduction to be called for each index in the partition
* function f is expected to return the values to be reduced
* @param TReducer - template parameter specifying the type of reducer to be applied
* @param f - must be a unary function accepting as input IndexType
*/
template <class TReducer, class TUnaryFunction>
inline typename TReducer::return_type for_each(TUnaryFunction &&f)
{
TReducer global_reducer;
#pragma omp parallel for
for (int i=0; i<mNchunks; ++i) {
TReducer local_reducer;
for (auto k = mBlockPartition[i]; k < mBlockPartition[i+1]; ++k) {
local_reducer.LocalReduce(f(k));
}
global_reducer.ThreadSafeReduce(local_reducer);
}
return global_reducer.GetValue();
}
/** @brief loop with thread local storage (TLS). f called on every entry in rData
* @param TThreadLocalStorage template parameter specifying the thread local storage
* @param f - must be a function accepting as input IndexType and the thread local storage
*/
template <class TThreadLocalStorage, class TFunction>
inline void for_each(const TThreadLocalStorage& rThreadLocalStoragePrototype, TFunction &&f)
{
static_assert(std::is_copy_constructible<TThreadLocalStorage>::value, "TThreadLocalStorage must be copy constructible!");
#pragma omp parallel
{
// copy the prototype to create the thread local storage
TThreadLocalStorage thread_local_storage(rThreadLocalStoragePrototype);
#pragma omp for
for (int i=0; i<mNchunks; ++i) {
for (auto k = mBlockPartition[i]; k < mBlockPartition[i+1]; ++k) {
f(k, thread_local_storage); //note that we pass a reference to the value, not the iterator
}
}
}
}
/** version with reduction and thread local storage (TLS) to be called for each index in the partition
* function f is expected to return the values to be reduced
* @param TReducer - template parameter specifying the type of reducer to be applied
* @param TThreadLocalStorage template parameter specifying the thread local storage
* @param f - must be a function accepting as input IndexType and the thread local storage
*/
template <class TReducer, class TThreadLocalStorage, class TFunction>
inline typename TReducer::return_type for_each(const TThreadLocalStorage& rThreadLocalStoragePrototype, TFunction &&f)
{
static_assert(std::is_copy_constructible<TThreadLocalStorage>::value, "TThreadLocalStorage must be copy constructible!");
TReducer global_reducer;
#pragma omp parallel
{
// copy the prototype to create the thread local storage
TThreadLocalStorage thread_local_storage(rThreadLocalStoragePrototype);
#pragma omp for
for (int i=0; i<mNchunks; ++i) {
TReducer local_reducer;
for (auto k = mBlockPartition[i]; k < mBlockPartition[i+1]; ++k) {
local_reducer.LocalReduce(f(k, thread_local_storage));
}
global_reducer.ThreadSafeReduce(local_reducer);
}
}
return global_reducer.GetValue();
}
private:
int mNchunks;
std::array<TIndexType, TMaxThreads> mBlockPartition;
};
} // namespace Kratos.
#endif // KRATOS_PARALLEL_UTILITIES_H_INCLUDED defined
|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% const CompositeOperator compose,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: define the width and height of the border.
%
% o compose: the composite operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,const CompositeOperator compose,
ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->matte_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,compose,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->matte_color=image->matte_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the matte_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% const CompositeOperator compose,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o compose: the composite operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
const CompositeOperator compose,ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
accentuate,
highlight,
matte,
shadow,
trough;
ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
x=(ssize_t) frame_info->width-frame_info->x-bevel_width;
y=(ssize_t) frame_info->height-frame_info->y-bevel_width;
if ((x < (ssize_t) image->columns) || (y < (ssize_t) image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass,exception) == MagickFalse)
{
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&frame_image->border_color) == MagickFalse) &&
(IsGrayColorspace(frame_image->colorspace) != MagickFalse))
(void) SetImageColorspace(frame_image,sRGBColorspace,exception);
if ((frame_image->matte_color.alpha_trait != UndefinedPixelTrait) &&
(frame_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(frame_image,OpaqueAlpha,exception);
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
matte=image->matte_color;
accentuate=matte;
accentuate.red=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.black=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.black+(QuantumRange*AccentuateModulate)));
accentuate.alpha=matte.alpha;
highlight=matte;
highlight.red=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.black=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.black+(QuantumRange*HighlightModulate)));
highlight.alpha=matte.alpha;
shadow=matte;
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.black=QuantumScale*matte.black*ShadowModulate;
shadow.alpha=matte.alpha;
trough=matte;
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.black=QuantumScale*matte.black*TroughModulate;
trough.alpha=matte.alpha;
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
frame_view=AcquireAuthenticCacheView(frame_image,exception);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
ssize_t
x;
Quantum
*magick_restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
if (q != (Quantum *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelViaPixelInfo(frame_image,&highlight,q);
else
SetPixelViaPixelInfo(frame_image,&accentuate,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelViaPixelInfo(frame_image,&shadow,q);
else
SetPixelViaPixelInfo(frame_image,&trough,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,frame_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
size_t
width;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
/*
Set frame interior pixels.
*/
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(frame_image,&frame_image->border_color,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FrameImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
ssize_t
x;
Quantum
*magick_restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (Quantum *) NULL)
{
/*
Draw bottom of ornamental border.
*/
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < y; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelViaPixelInfo(frame_image,&highlight,q);
else
SetPixelViaPixelInfo(frame_image,&accentuate,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelViaPixelInfo(frame_image,&shadow,q);
else
SetPixelViaPixelInfo(frame_image,&trough,q);
q+=GetPixelChannels(frame_image);
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (status != MagickFalse)
status=CompositeImage(frame_image,image,compose,MagickTrue,x,y,
exception);
if (status == MagickFalse)
frame_image=DestroyImage(frame_image);
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise,
ExceptionInfo *exception)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=QuantumRange;
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,raise_info->height,1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
ssize_t
i,
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*AccentuateFactor+
(double) foreground*(QuantumRange-AccentuateFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RaiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows-2*raise_info->height,1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
ssize_t
i,
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q+=GetPixelChannels(image);
for ( ; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RaiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows-raise_info->height,1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
ssize_t
i,
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*TroughFactor+
(double) background*(QuantumRange-TroughFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RaiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
GB_unaryop__minv_uint8_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_int32
// op(A') function: GB_tran__minv_uint8_int32
// C type: uint8_t
// A type: int32_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_int32
(
uint8_t *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__max_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__max_int8
// A.*B function (eWiseMult): GB_AemultB__max_int8
// A*D function (colscale): GB_AxD__max_int8
// D*A function (rowscale): GB_DxB__max_int8
// C+=B function (dense accum): GB_Cdense_accumB__max_int8
// C+=b function (dense accum): GB_Cdense_accumb__max_int8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__max_int8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__max_int8
// C=scalar+B GB_bind1st__max_int8
// C=scalar+B' GB_bind1st_tran__max_int8
// C=A+scalar GB_bind2nd__max_int8
// C=A'+scalar GB_bind2nd_tran__max_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IMAX (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_INT8 || GxB_NO_MAX_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__max_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__max_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__max_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__max_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__max_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__max_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__max_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__max_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__max_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__max_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB_bind1st_tran__max_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__max_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pcgbtrf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pzgbtrf.c, normal z -> c, Fri Sep 28 17:38:10 2018
*
**/
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include <plasma_core_blas.h>
#define A(m, n) ((plasma_complex32_t*)plasma_tile_addr(A, m, n))
/******************************************************************************/
void plasma_pcgbtrf(plasma_desc_t A, int *ipiv,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Return if failed sequence.
if (sequence->status != PlasmaSuccess)
return;
// Read parameters from the context.
plasma_context_t *plasma = plasma_context_self();
int ib = plasma->ib;
int max_panel_threads = plasma->max_panel_threads;
for (int k = 0; k < imin(A.mt, A.nt); k++) {
// for band matrix, gm is a multiple of mb,
// and there is no a10 submatrix
int mvak = plasma_tile_mview(A, k);
int nvak = plasma_tile_nview(A, k);
int ldak = plasma_tile_mmain_band(A, k, k);
// panel
int *ipivk = NULL;
plasma_complex32_t *a00 = NULL;
int mak = imin(A.m-k*A.mb, mvak+A.kl);
int size_a00 = (A.gm-k*A.mb) * plasma_tile_nmain(A, k);
int size_i = imin(mvak, nvak);
int num_panel_threads = imin(max_panel_threads,
imin(imin(A.mt, A.nt)-k, A.klt));
ipivk = &ipiv[k*A.mb];
a00 = A(k, k);
#pragma omp task depend(inout:a00[0:size_a00]) \
depend(out:ipivk[0:size_i]) \
priority(1)
{
volatile int *max_idx = (int*)malloc(num_panel_threads*sizeof(int));
if (max_idx == NULL)
plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory);
volatile plasma_complex32_t *max_val =
(plasma_complex32_t*)malloc(num_panel_threads*sizeof(
plasma_complex32_t));
if (max_val == NULL)
plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory);
volatile int info = 0;
plasma_barrier_t barrier;
plasma_barrier_init(&barrier);
if (sequence->status == PlasmaSuccess) {
for (int rank = 0; rank < num_panel_threads; rank++) {
#pragma omp task shared(barrier) priority(1)
{
// create a view for panel as a "general" submatrix
plasma_desc_t view = plasma_desc_view(
A, (A.kut-1)*A.mb, k*A.nb, mak, nvak);
view.type = PlasmaGeneral;
plasma_core_cgetrf(view, &ipiv[k*A.mb], ib,
rank, num_panel_threads,
max_idx, max_val, &info,
&barrier);
if (info != 0)
plasma_request_fail(sequence, request, k*A.mb+info);
}
}
}
#pragma omp taskwait
free((void*)max_idx);
free((void*)max_val);
}
// update
// TODO: fills are not tracked, see the one in fork
for (int n = k+1; n < imin(A.nt, k+A.kut); n++) {
plasma_complex32_t *a01 = NULL;
plasma_complex32_t *a11 = NULL;
int nvan = plasma_tile_nview(A, n);
int size_a01 = ldak*nvan;
int size_a11 = (A.gm-(k+1)*A.mb)*nvan;
a01 = A(k, n);
a11 = A(k+1, n);
#pragma omp task depend(in:a00[0:size_a00]) \
depend(inout:ipivk[0:size_i]) \
depend(inout:a01[0:size_a01]) \
depend(inout:a11[0:size_a11]) \
priority(n == k+1)
{
if (sequence->status == PlasmaSuccess) {
// geswp
int k1 = k*A.mb+1;
int k2 = imin(k*A.mb+A.mb, A.m);
plasma_desc_t view =
plasma_desc_view(A,
(A.kut-1 + k-n)*A.mb, n*A.nb,
mak, nvan);
view.type = PlasmaGeneral;
plasma_core_cgeswp(
PlasmaRowwise, view, 1, k2-k1+1, &ipiv[k*A.mb], 1);
// trsm
plasma_core_ctrsm(PlasmaLeft, PlasmaLower,
PlasmaNoTrans, PlasmaUnit,
mvak, nvan,
1.0, A(k, k), ldak,
A(k, n), plasma_tile_mmain_band(A, k, n));
// gemm
for (int m = imax(k+1, n-A.kut); m < imin(k+A.klt, A.mt); m++) {
int mvam = plasma_tile_mview(A, m);
#pragma omp task priority(n == k+1)
{
plasma_core_cgemm(
PlasmaNoTrans, PlasmaNoTrans,
mvam, nvan, A.nb,
-1.0, A(m, k), plasma_tile_mmain_band(A, m, k),
A(k, n), plasma_tile_mmain_band(A, k, n),
1.0, A(m, n), plasma_tile_mmain_band(A, m, n));
}
}
#pragma omp taskwait
}
}
}
#pragma omp task depend(in:ipivk[0:size_i])
if (sequence->status == PlasmaSuccess) {
if (k > 0) {
for (int i = 0; i < imin(mak, nvak); i++) {
ipiv[k*A.mb+i] += k*A.mb;
}
}
}
}
}
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias) {
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float *kernel = _kernel;
const float *bias = _bias;
#pragma omp parallel for
for (int p = 0; p < outch; p++) {
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++) {
float *outptr = out;
float *outptr2 = outptr + outw;
const float *img0 = bottom_blob.channel(q);
const float *kernel0 = kernel + p * inch * 9 + q * 9;
const float *r0 = img0;
const float *r1 = img0 + w;
const float *r2 = img0 + w * 2;
const float *r3 = img0 + w * 3;
const float *k0 = kernel0;
const float *k1 = kernel0 + 3;
const float *k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2) {
int remain = outw;
for (; remain > 0; remain--) {
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++) {
int remain = outw;
for (; remain > 0; remain--) {
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
}
|
convolution_pack4to8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_pack4to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const __fp16* bias_data_ptr = bias_data_fp16;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float16x8_t _sum = vdupq_n_f16((__fp16)0.f);
if (bias_data_ptr)
{
_sum = vld1q_f16(bias_data_ptr + p * 8);
}
const __fp16* kptr = weight_data_fp16.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * 4;
for (int k = 0; k < maxk; k++)
{
float16x4_t _val = vld1_f16(sptr + space_ofs[k] * 4);
float16x8_t _w0 = vld1q_f16(kptr);
float16x8_t _w1 = vld1q_f16(kptr + 8);
float16x8_t _w2 = vld1q_f16(kptr + 16);
float16x8_t _w3 = vld1q_f16(kptr + 24);
_sum = vfmaq_lane_f16(_sum, _w0, _val, 0);
_sum = vfmaq_lane_f16(_sum, _w1, _val, 1);
_sum = vfmaq_lane_f16(_sum, _w2, _val, 2);
_sum = vfmaq_lane_f16(_sum, _w3, _val, 3);
kptr += 32;
}
}
_sum = activation_ps(_sum, activation_type, activation_params);
vst1q_f16(outptr + j * 8, _sum);
}
outptr += outw * 8;
}
}
}
|
dicts.h |
/* Software SPAMS v2.1 - Copyright 2009-2011 Julien Mairal
*
* This file is part of SPAMS.
*
* SPAMS is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* SPAMS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with SPAMS. If not, see <http://www.gnu.org/licenses/>.
*/
/*!
*
* \file
* toolbox dictLearn
*
* by Julien Mairal
* julien.mairal@inria.fr
*
* File dicts.h
* \brief Contains dictionary learning algorithms
* It requires the toolbox decomp */
#ifndef DICTS_H
#define DICTS_H
#include <decomp.h>
char buffer_string[50];
enum constraint_type_D { L2, L1L2, L1L2FL, L1L2MU};
enum mode_compute { AUTO, PARAM1, PARAM2, PARAM3};
template <typename T> struct ParamDictLearn {
public:
ParamDictLearn() :
mode(PENALTY),
posAlpha(false),
modeD(L2),
posD(false),
modeParam(AUTO),
t0(1e-5),
rho(5),
gamma1(0),
mu(0),
lambda3(0),
lambda4(0),
lambda2(0),
gamma2(0),
approx(0.0),
p(1.0),
whiten(false),
expand(false),
isConstant(false),
updateConstant(true),
ThetaDiag(false),
ThetaDiagPlus(false),
ThetaId(false),
DequalsW(false),
weightClasses(false),
balanceClasses(false),
extend(false),
pattern(false),
stochastic(false),
scaleW(false),
batch(false),
verbose(true),
clean(true),
log(false),
updateD(true),
updateW(true),
updateTheta(true),
logName(NULL),
iter_updateD(1) { };
~ParamDictLearn() { delete[](logName); };
int iter;
T lambda;
constraint_type mode;
bool posAlpha;
constraint_type_D modeD;
bool posD;
mode_compute modeParam;
T t0;
T rho;
T gamma1;
T mu;
T lambda3;
T lambda4;
T lambda2;
T gamma2;
T approx;
T p;
bool whiten;
bool expand;
bool isConstant;
bool updateConstant;
bool ThetaDiag;
bool ThetaDiagPlus;
bool ThetaId;
bool DequalsW;
bool weightClasses;
bool balanceClasses;
bool extend;
bool pattern;
bool stochastic;
bool scaleW;
bool batch;
bool verbose;
bool clean;
bool log;
bool updateD;
bool updateW;
bool updateTheta;
char* logName;
int iter_updateD;
};
template <typename T> class Trainer {
public:
/// Empty constructor
Trainer();
/// Constructor with data
Trainer(const int k, const int batchsize = 256,
const int NUM_THREADS=-1);
/// Constructor with initial dictionary
Trainer(const Matrix<T>& D, const int batchsize = 256,
const int NUM_THREADS=-1);
/// Constructor with existing structure
Trainer(const Matrix<T>& A, const Matrix<T>& B, const Matrix<T>& D,
const int itercount, const int batchsize,
const int NUM_THREADS);
/// train or retrain using the matrix X
void train(const Data<T>& X, const ParamDictLearn<T>& param);
void trainOffline(const Data<T>& X, const ParamDictLearn<T>& param);
/// train or retrain using the groups XT
void train(const Data<T>& X, const vector_groups& groups,
const int J, const constraint_type
mode, const bool whiten = false, const T* param_C = NULL,
const int p = 1, const bool pattern = false);
/// Accessors
void getA(Matrix<T>& A) const { A.copy(_A);};
void getB(Matrix<T>& B) const { B.copy(_B);};
void getD(Matrix<T>& D) const { D.copy(_D);};
int getIter() const { return _itercount; };
private:
/// Forbid lazy copies
explicit Trainer<T>(const Trainer<T>& trainer);
/// Forbid lazy copies
Trainer<T>& operator=(const Trainer<T>& trainer);
/// clean the dictionary
void cleanDict(const Data<T>& X, Matrix<T>& G,
const bool posD = false,
const constraint_type_D modeD = L2, const T gamma1 = 0,
const T gamma2 = 0,
const T maxCorrel =
0.999999);
/// clean the dictionary
void cleanDict(Matrix<T>& G);
Matrix<T> _A;
Matrix<T> _B;
Matrix<T> _D;
int _k;
bool _initialDict;
int _itercount;
int _batchsize;
int _NUM_THREADS;
};
/// Empty constructor
template <typename T> Trainer<T>::Trainer() : _k(0), _initialDict(false),
_itercount(0), _batchsize(256) {
_NUM_THREADS=1;
#ifdef _OPENMP
_NUM_THREADS = MIN(MAX_THREADS,omp_get_num_procs());
#endif
_batchsize=floor(_batchsize*(_NUM_THREADS+1)/2);
};
/// Constructor with data
template <typename T> Trainer<T>::Trainer(const int k, const
int batchsize, const int NUM_THREADS) : _k(k),
_initialDict(false), _itercount(0),_batchsize(batchsize),
_NUM_THREADS(NUM_THREADS) {
if (_NUM_THREADS == -1) {
_NUM_THREADS=1;
#ifdef _OPENMP
_NUM_THREADS = MIN(MAX_THREADS,omp_get_num_procs());
#endif
}
};
/// Constructor with initial dictionary
template <typename T> Trainer<T>::Trainer(const Matrix<T>& D,
const int batchsize, const int NUM_THREADS) : _k(D.n()),
_initialDict(true),_itercount(0),_batchsize(batchsize),
_NUM_THREADS(NUM_THREADS) {
_D.copy(D);
_A.resize(D.n(),D.n());
_B.resize(D.m(),D.n());
if (_NUM_THREADS == -1) {
_NUM_THREADS=1;
#ifdef _OPENMP
_NUM_THREADS = MIN(MAX_THREADS,omp_get_num_procs());
#endif
}
}
/// Constructor with existing structure
template <typename T> Trainer<T>::Trainer(const Matrix<T>& A, const Matrix<T>&
B, const Matrix<T>& D, const int itercount, const int batchsize,
const int NUM_THREADS) : _k(D.n()),_initialDict(true),_itercount(itercount),
_batchsize(batchsize),
_NUM_THREADS(NUM_THREADS) {
_D.copy(D);
_A.copy(A);
_B.copy(B);
if (_NUM_THREADS == -1) {
_NUM_THREADS=1;
#ifdef _OPENMP
_NUM_THREADS = MIN(MAX_THREADS,omp_get_num_procs());
#endif
}
};
template <typename T>
void Trainer<T>::cleanDict(const Data<T>& X, Matrix<T>& G,
const bool posD,
const constraint_type_D modeD, const T gamma1,
const T gamma2,
const T maxCorrel) {
int sparseD = modeD == L1L2 ? 2 : 6;
const int k = _D.n();
const int n = _D.m();
const int M = X.n();
T* const pr_G=G.rawX();
Vector<T> aleat(n);
Vector<T> col(n);
for (int i = 0; i<k; ++i) {
//pr_G[i*k+i] += 1e-10;
for (int j = i; j<k; ++j) {
if ((j > i && abs(pr_G[i*k+j])/sqrt(pr_G[i*k+i]*pr_G[j*k+j]) > maxCorrel) ||
(j == i && abs(pr_G[i*k+j]) < 1e-4)) {
/// remove element j and replace it by a random element of X
const int ind = random() % M;
Vector<T> d, g;
_D.refCol(j,d);
X.getData(col,ind);
d.copy(col);
if (modeD != L2) {
aleat.copy(d);
aleat.sparseProject(d,T(1.0),sparseD,gamma1,gamma2,T(2.0),posD);
} else {
if (posD) d.thrsPos();
d.normalize();
}
G.refCol(j,g);
_D.multTrans(d,g);
for (int l = 0; l<_D.n(); ++l)
pr_G[l*k+j] = pr_G[j*k+l];
}
}
}
}
template <typename T>
void Trainer<T>::cleanDict(Matrix<T>& G) {
const int k = _D.n();
const int n = _D.m();
T* const pr_G=G.rawX();
for (int i = 0; i<k; ++i) {
pr_G[i*k+i] += 1e-10;
}
}
template <typename T>
void Trainer<T>::train(const Data<T>& X, const ParamDictLearn<T>& param) {
T rho = param.rho;
T t0 = param.t0;
int sparseD = param.modeD == L1L2 ? 2 : param.modeD == L1L2MU ? 7 : 6;
int NUM_THREADS=init_omp(_NUM_THREADS);
if (param.verbose) {
cout << "num param iterD: " << param.iter_updateD << endl;
if (param.batch) {
cout << "Batch Mode" << endl;
} else if (param.stochastic) {
cout << "Stochastic Gradient. rho : " << rho << ", t0 : " << t0 << endl;
} else {
if (param.modeParam == AUTO) {
cout << "Online Dictionary Learning with no parameter " << endl;
} else if (param.modeParam == PARAM1) {
cout << "Online Dictionary Learning with parameters: " << t0 << " rho: " << rho << endl;
} else {
cout << "Online Dictionary Learning with exponential decay t0: " << t0 << " rho: " << rho << endl;
}
}
if (param.posD)
cout << "Positivity constraints on D activated" << endl;
if (param.posAlpha)
cout << "Positivity constraints on alpha activated" << endl;
if (param.modeD != L2) cout << "Sparse dictionaries, mode: " << param.modeD << ", gamma1: " << param.gamma1 << ", gamma2: " << param.gamma2 << endl;
cout << "mode Alpha " << param.mode << endl;
if (param.clean) cout << "Cleaning activated " << endl;
if (param.log && param.logName) {
cout << "log activated " << endl;
cerr << param.logName << endl;
}
if (param.mode == PENALTY && param.lambda==0 && param.lambda2 > 0 && !param.posAlpha)
cout << "L2 solver is used" << endl;
if (_itercount > 0)
cout << "Retraining from iteration " << _itercount << endl;
flush(cout);
}
const int M = X.n();
const int K = _k;
const int n = X.m();
const int L = param.mode == SPARSITY ? static_cast<int>(param.lambda) :
param.mode == PENALTY && param.lambda == 0 && param.lambda2 > 0 && !param.posAlpha ? K : MIN(n,K);
const int batchsize= param.batch ? M : MIN(_batchsize,M);
if (param.verbose) {
cout << "batch size: " << batchsize << endl;
cout << "L: " << L << endl;
cout << "lambda: " << param.lambda << endl;
cout << "mode: " << param.mode << endl;
flush(cout);
}
if (_D.m() != n || _D.n() != K)
_initialDict=false;
srandom(0);
Vector<T> col(n);
if (!_initialDict) {
_D.resize(n,K);
for (int i = 0; i<K; ++i) {
const int ind = random() % M;
Vector<T> d;
_D.refCol(i,d);
X.getData(col,ind);
d.copy(col);
}
_initialDict=true;
}
if (param.verbose) {
cout << "*****Online Dictionary Learning*****" << endl;
flush(cout);
}
Vector<T> tmp(n);
if (param.modeD != L2) {
for (int i = 0; i<K; ++i) {
Vector<T> d;
_D.refCol(i,d);
tmp.copy(d);
tmp.sparseProject(d,T(1.0),sparseD,param.gamma1,
param.gamma2,T(2.0),param.posD);
}
} else {
if (param.posD) _D.thrsPos();
_D.normalize();
}
int count=0;
int countPrev=0;
T scalt0 = abs<T>(t0);
if (_itercount == 0) {
_A.resize(K,K);
_A.setZeros();
_B.resize(n,K);
_B.setZeros();
if (!param.batch) {
_A.setDiag(scalt0);
_B.copy(_D);
_B.scal(scalt0);
}
}
//Matrix<T> G(K,K);
Matrix<T> Borig(n,K);
Matrix<T> Aorig(K,K);
Matrix<T> Bodd(n,K);
Matrix<T> Aodd(K,K);
Matrix<T> Beven(n,K);
Matrix<T> Aeven(K,K);
SpVector<T>* spcoeffT=new SpVector<T>[_NUM_THREADS];
Vector<T>* DtRT=new Vector<T>[_NUM_THREADS];
Vector<T>* XT=new Vector<T>[_NUM_THREADS];
Matrix<T>* BT=new Matrix<T>[_NUM_THREADS];
Matrix<T>* AT=new Matrix<T>[_NUM_THREADS];
Matrix<T>* GsT=new Matrix<T>[_NUM_THREADS];
Matrix<T>* GaT=new Matrix<T>[_NUM_THREADS];
Matrix<T>* invGsT=new Matrix<T>[_NUM_THREADS];
Matrix<T>* workT=new Matrix<T>[_NUM_THREADS];
Vector<T>* uT=new Vector<T>[_NUM_THREADS];
for (int i = 0; i<_NUM_THREADS; ++i) {
spcoeffT[i].resize(K);
DtRT[i].resize(K);
XT[i].resize(n);
BT[i].resize(n,K);
BT[i].setZeros();
AT[i].resize(K,K);
AT[i].setZeros();
GsT[i].resize(L,L);
GsT[i].setZeros();
invGsT[i].resize(L,L);
invGsT[i].setZeros();
GaT[i].resize(K,L);
GaT[i].setZeros();
workT[i].resize(K,3);
workT[i].setZeros();
uT[i].resize(L);
uT[i].setZeros();
}
Timer time, time2;
time.start();
srandom(0);
Vector<int> perm;
perm.randperm(M);
Aodd.setZeros();
Bodd.setZeros();
Aeven.setZeros();
Beven.setZeros();
Aorig.copy(_A);
Borig.copy(_B);
int JJ = param.iter < 0 ? 100000000 : param.iter;
bool even=true;
int last_written=-40;
int i;
for (i = 0; i<JJ; ++i) {
if (param.verbose) {
cout << "Iteration: " << i << endl;
flush(cout);
}
time.stop();
if (param.iter < 0 &&
time.getElapsed() > T(-param.iter)) break;
if (param.log) {
int seconds=static_cast<int>(floor(log(time.getElapsed())*5));
if (seconds > last_written) {
last_written++;
sprintf(buffer_string,"%s_%d.log",param.logName,
last_written+40);
writeLog(_D,T(time.getElapsed()),i,buffer_string);
fprintf(stderr,"\r%d",i);
}
}
time.start();
Matrix<T> G;
_D.XtX(G);
if (param.clean)
this->cleanDict(X,G,param.posD,
param.modeD,param.gamma1,param.gamma2);
G.addDiag(MAX(param.lambda2,1e-10));
int j;
for (j = 0; j<_NUM_THREADS; ++j) {
AT[j].setZeros();
BT[j].setZeros();
}
#pragma omp parallel for private(j)
for (j = 0; j<batchsize; ++j) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
const int index=perm[(j+i*batchsize) % M];
Vector<T>& Xj = XT[numT];
SpVector<T>& spcoeffj = spcoeffT[numT];
Vector<T>& DtRj = DtRT[numT];
//X.refCol(index,Xj);
X.getData(Xj,index);
if (param.whiten) {
if (param.pattern) {
Vector<T> mean(4);
Xj.whiten(mean,param.pattern);
} else {
Xj.whiten(X.V());
}
}
_D.multTrans(Xj,DtRj);
Matrix<T>& Gs = GsT[numT];
Matrix<T>& Ga = GaT[numT];
Matrix<T>& invGs = invGsT[numT];
Matrix<T>& work= workT[numT];
Vector<T>& u = uT[numT];
Vector<int> ind;
Vector<T> coeffs_sparse;
spcoeffj.setL(L);
spcoeffj.refIndices(ind);
spcoeffj.refVal(coeffs_sparse);
T normX=Xj.nrm2sq();
coeffs_sparse.setZeros();
if (param.mode < SPARSITY) {
if (param.mode == PENALTY && param.lambda==0 && param.lambda2 > 0 && !param.posAlpha) {
Matrix<T>& GG = G;
u.set(0);
GG.conjugateGradient(DtRj,u,1e-4,2*K);
for (int k = 0; k<K; ++k) {
ind[k]=k;
coeffs_sparse[k]=u[k];
}
} else {
coreLARS2(DtRj,G,Gs,Ga,invGs,u,coeffs_sparse,ind,work,normX,param.mode,param.lambda,param.posAlpha);
}
} else {
if (param.mode == SPARSITY) {
coreORMPB(DtRj,G,ind,coeffs_sparse,normX,L,T(0.0),T(0.0));
} else if (param.mode==L2ERROR2) {
coreORMPB(DtRj,G,ind,coeffs_sparse,normX,L,param.lambda,T(0.0));
} else {
coreORMPB(DtRj,G,ind,coeffs_sparse,normX,L,T(0.0),param.lambda);
}
}
int count2=0;
for (int k = 0; k<L; ++k)
if (ind[k] == -1) {
break;
} else {
++count2;
}
sort(ind.rawX(),coeffs_sparse.rawX(),0,count2-1);
spcoeffj.setL(count2);
AT[numT].rank1Update(spcoeffj);
BT[numT].rank1Update(Xj,spcoeffj);
}
if (param.batch) {
_A.setZeros();
_B.setZeros();
for (j = 0; j<_NUM_THREADS; ++j) {
_A.add(AT[j]);
_B.add(BT[j]);
}
Vector<T> di, ai,bi;
Vector<T> newd(n);
for (j = 0; j<param.iter_updateD; ++j) {
for (int k = 0; k<K; ++k) {
if (_A[k*K+k] > 1e-6) {
_D.refCol(k,di);
_A.refCol(k,ai);
_B.refCol(k,bi);
_D.mult(ai,newd,T(-1.0));
newd.add(bi);
newd.scal(T(1.0)/_A[k*K+k]);
newd.add(di);
if (param.modeD != L2) {
newd.sparseProject(di,T(1.0),
sparseD,param.gamma1,
param.gamma2,T(2.0),param.posD);
} else {
if (param.posD) newd.thrsPos();
newd.normalize2();
di.copy(newd);
}
} else if (param.clean) {
_D.refCol(k,di);
di.setZeros();
}
}
}
} else if (param.stochastic) {
_A.setZeros();
_B.setZeros();
for (j = 0; j<_NUM_THREADS; ++j) {
_A.add(AT[j]);
_B.add(BT[j]);
}
_D.mult(_A,_B,false,false,T(-1.0),T(1.0));
T step_grad=rho/T(t0+batchsize*(i+1));
_D.add(_B,step_grad);
Vector<T> dj;
Vector<T> dnew(n);
if (param.modeD != L2) {
for (j = 0; j<K; ++j) {
_D.refCol(j,dj);
dnew.copy(dj);
dnew.sparseProject(dj,T(1.0),sparseD,param.gamma1,
param.gamma2,T(2.0),param.posD);
}
} else {
for (j = 0; j<K; ++j) {
_D.refCol(j,dj);
if (param.posD) dj.thrsPos();
dj.normalize2();
}
}
} else {
/// Dictionary Update
/// Check the epoch parity
int epoch = (((i+1) % M)*batchsize) / M;
if ((even && ((epoch % 2) == 1)) || (!even && ((epoch % 2) == 0))) {
Aodd.copy(Aeven);
Bodd.copy(Beven);
Aeven.setZeros();
Beven.setZeros();
count=countPrev;
countPrev=0;
even=!even;
}
int ii=_itercount+i;
int num_elem=MIN(2*M, ii < batchsize ? ii*batchsize :
batchsize*batchsize+ii-batchsize);
T scal2=T(T(1.0)/batchsize);
T scal;
int totaliter=_itercount+count;
if (param.modeParam == PARAM2) {
scal=param.rho;
} else if (param.modeParam == PARAM1) {
scal=MAX(0.95,pow(T(totaliter)/T(totaliter+1),-rho));
} else {
scal = T(_itercount+num_elem+1-
batchsize)/T(_itercount+num_elem+1);
}
Aeven.scal(scal);
Beven.scal(scal);
Aodd.scal(scal);
Bodd.scal(scal);
if ((_itercount > 0 && i*batchsize < M)
|| (_itercount == 0 && t0 != 0 &&
i*batchsize < 10000)) {
Aorig.scal(scal);
Borig.scal(scal);
_A.copy(Aorig);
_B.copy(Borig);
} else {
_A.setZeros();
_B.setZeros();
}
for (j = 0; j<_NUM_THREADS; ++j) {
Aeven.add(AT[j],scal2);
Beven.add(BT[j],scal2);
}
_A.add(Aodd);
_A.add(Aeven);
_B.add(Bodd);
_B.add(Beven);
++count;
++countPrev;
Vector<T> di, ai,bi;
Vector<T> newd(n);
for (j = 0; j<param.iter_updateD; ++j) {
for (int k = 0; k<K; ++k) {
if (_A[k*K+k] > 1e-6) {
_D.refCol(k,di);
_A.refCol(k,ai);
_B.refCol(k,bi);
_D.mult(ai,newd,T(-1.0));
newd.add(bi);
newd.scal(T(1.0)/_A[k*K+k]);
newd.add(di);
if (param.modeD != L2) {
newd.sparseProject(di,T(1.0),sparseD,
param.gamma1,param.gamma2,T(2.0),param.posD);
} else {
if (param.posD) newd.thrsPos();
newd.normalize2();
di.copy(newd);
}
} else if (param.clean &&
((_itercount+i)*batchsize) > 10000) {
_D.refCol(k,di);
di.setZeros();
}
}
}
}
}
_itercount += i;
if (param.verbose)
time.printElapsed();
delete[](spcoeffT);
delete[](DtRT);
delete[](AT);
delete[](BT);
delete[](GsT);
delete[](invGsT);
delete[](GaT);
delete[](uT);
delete[](XT);
delete[](workT);
};
template <typename T>
void writeLog(const Matrix<T>& D, const T time, int iter,
char* name) {
std::ofstream f;
f.precision(12);
f.flags(std::ios_base::scientific);
f.open(name, ofstream::trunc);
f << time << " " << iter << std::endl;
for (int i = 0; i<D.n(); ++i) {
for (int j = 0; j<D.m(); ++j) {
f << D[i*D.m()+j] << " ";
}
f << std::endl;
}
f << std::endl;
f.close();
};
template <typename T>
void Trainer<T>::trainOffline(const Data<T>& X,
const ParamDictLearn<T>& param) {
int sparseD = param.modeD == L1L2 ? 2 : 6;
int J = param.iter;
int batch_size= _batchsize;
int batchsize= _batchsize;
int NUM_THREADS=init_omp(_NUM_THREADS);
const int n = X.m();
const int K = _k;
const int M = X.n();
cout << "*****Offline Dictionary Learning*****" << endl;
fprintf(stderr,"num param iterD: %d\n",param.iter_updateD);
cout << "batch size: " << _batchsize << endl;
cout << "lambda: " << param.lambda << endl;
cout << "X: " << n << " x " << M << endl;
cout << "D: " << n << " x " << K << endl;
flush(cout);
srandom(0);
Vector<T> col(n);
if (!_initialDict) {
_D.resize(n,K);
for (int i = 0; i<K; ++i) {
const int ind = random() % M;
Vector<T> d;
_D.refCol(i,d);
X.getData(col,ind);
d.copy(col);
}
_initialDict=true;
}
Vector<T> tmp(n);
if (param.modeD != L2) {
for (int i = 0; i<K; ++i) {
Vector<T> d;
_D.refCol(i,d);
tmp.copy(d);
tmp.sparseProject(d,T(1.0),sparseD,param.gamma1,
param.gamma2,T(2.0),param.posD);
}
} else {
if (param.posD) _D.thrsPos();
_D.normalize();
}
Matrix<T> G(K,K);
Matrix<T> coeffs(K,M);
coeffs.setZeros();
Matrix<T> B(n,K);
Matrix<T> A(K,K);
SpVector<T>* spcoeffT=new SpVector<T>[NUM_THREADS];
Vector<T>* DtRT=new Vector<T>[NUM_THREADS];
Vector<T>* coeffsoldT=new Vector<T>[NUM_THREADS];
Matrix<T>* BT=new Matrix<T>[NUM_THREADS];
Matrix<T>* AT=new Matrix<T>[NUM_THREADS];
for (int i = 0; i<NUM_THREADS; ++i) {
spcoeffT[i].resize(K);
DtRT[i].resize(K);
coeffsoldT[i].resize(K);
BT[i].resize(n,K);
BT[i].setZeros();
AT[i].resize(K,K);
AT[i].setZeros();
}
Timer time;
time.start();
srandom(0);
Vector<int> perm;
perm.randperm(M);
int JJ = J < 0 ? 100000000 : J;
Vector<T> weights(M);
weights.setZeros();
for (int i = 0; i<JJ; ++i) {
if (J < 0 && time.getElapsed() > T(-J)) break;
_D.XtX(G);
if (param.clean)
this->cleanDict(X,G,param.posD,
param.modeD,param.gamma1,param.gamma2);
int j;
#pragma omp parallel for private(j)
for (j = 0; j<batch_size; ++j) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
const int ind=perm[(j+i*batch_size) % M];
Vector<T> Xj, coeffj;
SpVector<T>& spcoeffj = spcoeffT[numT];
Vector<T>& DtRj = DtRT[numT];
Vector<T>& oldcoeffj = coeffsoldT[numT];
X.getData(Xj,ind);
if (param.whiten) {
if (param.pattern) {
Vector<T> mean(4);
Xj.whiten(mean,param.pattern);
} else {
Xj.whiten(X.V());
}
}
coeffs.refCol(ind,coeffj);
oldcoeffj.copy(coeffj);
_D.multTrans(Xj,DtRj);
coeffj.toSparse(spcoeffj);
G.mult(spcoeffj,DtRj,T(-1.0),T(1.0));
if (param.mode == PENALTY) {
coreIST(G,DtRj,coeffj,param.lambda,200,T(1e-3));
} else {
T normX = Xj.nrm2sq();
coreISTconstrained(G,DtRj,coeffj,normX,param.lambda,200,T(1e-3));
}
oldcoeffj.toSparse(spcoeffj);
AT[numT].rank1Update(spcoeffj,-weights[ind]);
coeffj.toSparse(spcoeffj);
AT[numT].rank1Update(spcoeffj);
weights[ind]++;
oldcoeffj.scal(weights[ind]);
oldcoeffj.sub(coeffj);
oldcoeffj.toSparse(spcoeffj);
BT[numT].rank1Update(Xj,spcoeffj,T(-1.0));
}
A.setZeros();
B.setZeros();
T scal;
int totaliter=i;
int ii = i;
int num_elem=MIN(2*M, ii < batchsize ? ii*batchsize :
batchsize*batchsize+ii-batchsize);
if (param.modeParam == PARAM2) {
scal=param.rho;
} else if (param.modeParam == PARAM1) {
scal=MAX(0.95,pow(T(totaliter)/T(totaliter+1),-param.rho));
} else {
scal = T(num_elem+1-
batchsize)/T(num_elem+1);
}
for (j = 0; j<NUM_THREADS; ++j) {
A.add(AT[j]);
B.add(BT[j]);
AT[j].scal(scal);
BT[j].scal(scal);
}
weights.scal(scal);
Vector<T> di, ai,bi;
Vector<T> newd(n);
for (j = 0; j<param.iter_updateD; ++j) {
for (int k = 0; k<K; ++k) {
if (A[k*K+k] > 1e-6) {
_D.refCol(k,di);
A.refCol(k,ai);
B.refCol(k,bi);
_D.mult(ai,newd,T(-1.0));
newd.add(bi);
newd.scal(T(1.0)/A[k*K+k]);
newd.add(di);
if (param.modeD != L2) {
newd.sparseProject(di,T(1.0),
sparseD,param.gamma1,
param.gamma2,T(2.0),param.posD);
} else {
if (param.posD) newd.thrsPos();
newd.normalize2();
di.copy(newd);
}
} else if (param.clean) {
_D.refCol(k,di);
di.setZeros();
}
}
}
}
_D.XtX(G);
if (param.clean)
this->cleanDict(X,G,param.posD,param.modeD,
param.gamma1,param.gamma2);
time.printElapsed();
delete[](spcoeffT);
delete[](DtRT);
delete[](AT);
delete[](BT);
delete[](coeffsoldT);
}
#endif
|
11-omp-for.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
int main()
{
double sinTable[256];
#pragma omp parallel for
for(int n=0; n<256; ++n)
sinTable[n] = sin(2 * M_PI * n / 256);
// the table is now initialized
}
// RUN: clang -fopenmp -c -g -emit-llvm %s -o %t.1.bc
// RUN: opt -instnamer %t.1.bc -o %t.bc
// RUN: llvm-epp %t.bc -o %t.profile
// RUN: clang -fopenmp -v %t.epp.bc -o %t-exec -lepp-rt -lpthread -lm 2> %t.compile
// RUN: OMP_NUM_THREADS=4 %t-exec > %t.log
// RUN: llvm-epp -p=%t.profile %t.bc 2> %t.decode
// RUN: diff -aub %t.profile %s.txt
|
GB_unop__erf_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__erf_fp32_fp32)
// op(A') function: GB (_unop_tran__erf_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = erff (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = erff (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = erff (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ERF || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__erf_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = erff (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = erff (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__erf_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ImageAlgorithms.h | /******************************************************************************
* SOFA, Simulation Open-Framework Architecture, development version *
* (c) 2006-2017 INRIA, USTL, UJF, CNRS, MGH *
* *
* This program is free software; you can redistribute it and/or modify it *
* under the terms of the GNU Lesser General Public License as published by *
* the Free Software Foundation; either version 2.1 of the License, or (at *
* your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, but WITHOUT *
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License *
* for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
*******************************************************************************
* Authors: The SOFA Team and external contributors (see Authors.txt) *
* *
* Contact information: contact@sofa-framework.org *
******************************************************************************/
#ifndef IMAGE_IMAGEALGORITHMS_H
#define IMAGE_IMAGEALGORITHMS_H
#include <sofa/defaulttype/Vec.h>
#include <sofa/helper/rmath.h>
#include <sofa/defaulttype/Mat.h>
#include <set>
#include <vector>
#if (defined(WIN32) || defined (_XBOX)) && (_MSC_VER < 1800) // for all version anterior to Visual Studio 2013
# include <float.h>
# define isnan(x) (_isnan(x))
#else
# include <cmath>
# define isnan(x) (std::isnan(x))
#endif
#include "ImageTypes.h"
#ifdef _OPENMP
#include <omp.h>
#endif
/**
* Move points to the centroid of their voronoi region
* returns true if points have moved
*/
template<typename real>
bool Lloyd (std::vector<sofa::defaulttype::Vec<3,real> >& pos,const std::vector<unsigned int>& voronoiIndex, cimg_library::CImg<unsigned int>& voronoi)
{
typedef sofa::defaulttype::Vec<3,real> Coord;
unsigned int nbp=pos.size();
bool moved=false;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#ifdef WIN32
for(long int i=0; i<nbp; i++)
#else
for (unsigned int i=0; i<nbp; i++)
#endif
{
// compute centroid
Coord C,p;
unsigned int count=0;
bool valid=true;
cimg_forXYZ(voronoi,x,y,z) if (voronoi(x,y,z)==voronoiIndex[i])
{
C+=Coord(x,y,z);
count++;
}
if(!count) goto stop;
C/=(real)count;
// check validity
for (unsigned int j=0; j<3; j++) p[j]=sofa::helper::round(C[j]);
if (voronoi(p[0],p[1],p[2])!=voronoiIndex[i]) valid=false; // out of voronoi
else { for (unsigned int j=0; j<nbp; j++) if(i!=j) if(sofa::helper::round(pos[j][0])==p[0]) if(sofa::helper::round(pos[j][1])==p[1]) if(sofa::helper::round(pos[j][2])==p[2]) valid=false; } // check occupancy
while(!valid) // get closest unoccupied point in voronoi
{
real dmin=cimg_library::cimg::type<real>::max();
cimg_forXYZ(voronoi,x,y,z) if (voronoi(x,y,z)==voronoiIndex[i])
{
real d2=(C-Coord(x,y,z)).norm2();
if(dmin>d2) { dmin=d2; p=Coord(x,y,z); }
}
if(dmin==cimg_library::cimg::type<real>::max()) goto stop;// no point found
bool val2=true; for (unsigned int j=0; j<nbp; j++) if(i!=j) if(sofa::helper::round(pos[j][0])==p[0]) if(sofa::helper::round(pos[j][1])==p[1]) if(sofa::helper::round(pos[j][2])==p[2]) val2=false; // check occupancy
if(val2) valid=true;
else voronoi(p[0],p[1],p[2])=0;
}
if(pos[i][0]!=p[0] || pos[i][1]!=p[1] || pos[i][2]!=p[2]) // set new position if different
{
pos[i] = p;
moved=true;
}
stop: ;
}
return moved;
}
/**
* solve the local eikonal system: || grad d || = 1/b given the 6 neihborood values of d at [-dx,+dx,-dy,+dy,-dz,+dz] where [dx,dy,dz] is the voxel size
* using upwind first order approximation (http://math.berkeley.edu/~sethian/2006/Publications/Book/2006/)
* if(d<0) these values are not used (untreated voxels in the fast marching algorithm)
* if values at [-2dx,+2dx,-2dy,+2dy,-2dz,+2dz] (@param d2) are provided (>0), use more accurate second order approximation (cf "A second-order fast marching eikonal solver", James Rickett and Sergey Fomel , 1999)
*/
template<typename real>
real Eikonal(const sofa::defaulttype::Vec<6,real>& d,const sofa::defaulttype::Vec<6,real>& d2,const sofa::defaulttype::Vec<3,real>& voxelsize, const real b=(real)1.0)
{
// get minimum distance in each direction and some precomputations
unsigned int nbValid=3;
sofa::defaulttype::Vec<3,real> D(-1,-1,-1),D2,S;
real B2=(real)1./(b*b),val;
for (unsigned int i=0; i<3; i++)
{
if(d[2*i]>=0 && d2[2*i]>=0) { val=(4.0*d[2*i]-d2[2*i])/3.0; if(val<D[i] || D[i]==-1) { D[i]=val; S[i]=9.0/(4.0*voxelsize[i]*voxelsize[i]); } }
else if(d[2*i+1]>=0 && d2[2*i+1]>=0) { val=(4.0*d[2*i+1]-d2[2*i+1])/3.0; if(val<D[i] || D[i]==-1) { D[i]=val; S[i]=9.0/(4.0*voxelsize[i]*voxelsize[i]); } }
else if(d[2*i]>=0) { val=d[2*i]; if(val<D[i] || D[i]==-1) { D[i]=val; S[i]=1.0/(voxelsize[i]*voxelsize[i]); } }
else if(d[2*i+1]>=0) { val=d[2*i+1]; if(val<D[i] || D[i]==-1) { D[i]=val; S[i]=1.0/(voxelsize[i]*voxelsize[i]); } }
else nbValid--;
D2[i]=D[i]*D[i];
}
// solve sum S_i*(U-D_i)^2 = 1/b^2
while(1)
{
if(nbValid==0) return -1; // no valid neighbor
else if(nbValid==1) { for (unsigned int i=0; i<3; i++) if(D[i]>=0.) return (D[i]+voxelsize[i]/b); } // one valid neighbor -> simple 1D propagation
else // two or three valid neighbors -> quadratic equation
{
real A=(real)0,B=(real)0,C=-B2;
for (unsigned int i=0; i<3; i++) if(D[i]>=0.) { A+=S[i]; B+=D[i]*S[i]; C+=D2[i]*S[i]; } B*=(real)-2.;
real Delta=B*B-4.0*A*C;
if(Delta<0) { if(D[0]>D[1]) { if(D[0]>D[2]) D[0]=-1; else D[2]=-1; } else { if(D[1]>D[2]) D[1]=-1; else D[2]=-1; } nbValid--; }
else
{
real U=0.5*(sqrt(Delta)-B)/A; // largest root since A>0
if(U>D[0]) if(U>D[1]) if(U>D[2]) return U;
// one entry should be canceled
if(D[0]>D[1]) { if(D[0]>D[2]) D[0]=-1; else D[2]=-1; }
else { if(D[1]>D[2]) D[1]=-1; else D[2]=-1; } nbValid--;
}
}
}
return -1;
}
/**
* Update geodesic distances in the image, given a bias distance function b(x).
* This is equivalent to solve for the eikonal equation || grad d(x) || = 1/b(x) with d(p)=0 at @param pos
* using fast marching method presented from sethian http://math.berkeley.edu/~sethian/2006/Publications/Book/2006/
* distances should be intialized (<0 outside the object, >=0 inside, and = 0 for seeds)
* returns @param voronoi and @param distances
*/
template<typename real,typename T>
void fastMarching (std::set<std::pair<real,sofa::defaulttype::Vec<3,int> > > &trial,cimg_library::CImg<real>& distances, cimg_library::CImg<unsigned int>& voronoi,
const sofa::defaulttype::Vec<3,real>& voxelsize, const cimg_library::CImg<T>* biasFactor=NULL)
{
typedef sofa::defaulttype::Vec<3,int> iCoord;
typedef sofa::defaulttype::Vec<6,real> Dist;
typedef std::pair<real,iCoord > DistanceToPoint;
const iCoord dim(distances.width(),distances.height(),distances.depth());
// init
sofa::defaulttype::Vec<6, iCoord > offset; // image coord offsets related to 6 neighbors
for (unsigned int i=0; i<3; i++) { offset[2*i][i]=-1; offset[2*i+1][i]=1;}
unsigned int nbOffset=offset.size();
cimg_library::CImg<bool> alive(dim[0],dim[1],dim[2]); alive.fill(false);
// FMM
while( !trial.empty() )
{
DistanceToPoint top = *trial.begin();
trial.erase(trial.begin());
iCoord v = top.second;
alive(v[0],v[1],v[2])=true;
unsigned int vor = voronoi(v[0],v[1],v[2]);
real b1; if(biasFactor) b1=(real)(*biasFactor)(v[0],v[1],v[2]); else b1=1.0;
// update neighbors
for (unsigned int i=0; i<nbOffset; i++)
{
// update distance on neighbors using their own neighbors
iCoord v2 = v + offset[i];
if(v2[0]>=0) if(v2[1]>=0) if(v2[2]>=0) if(v2[0]<dim[0]) if(v2[1]<dim[1]) if(v2[2]<dim[2])
if(!alive(v2[0],v2[1],v2[2]))
{
// get neighboring alive values
iCoord v3=v2;
Dist d,d2;
for (unsigned int j=0; j<3; j++)
{
v3[j]--; if(v3[j]>=0 && alive(v3[0],v3[1],v3[2])) d[2*j]= distances(v3[0],v3[1],v3[2]); else d[2*j]=-1;
v3[j]--; if(v3[j]>=0 && alive(v3[0],v3[1],v3[2])) d2[2*j]= distances(v3[0],v3[1],v3[2]); else d2[2*j]=-1;
v3[j]+=3; if(v3[j]<dim[j] && alive(v3[0],v3[1],v3[2])) d[2*j+1]= distances(v3[0],v3[1],v3[2]); else d[2*j+1]=-1;
v3[j]++; if(v3[j]<dim[j] && alive(v3[0],v3[1],v3[2])) d2[2*j+1]= distances(v3[0],v3[1],v3[2]); else d2[2*j+1]=-1;
v3[j]-=2;
}
real b2; if(biasFactor) b2=(real)(*biasFactor)(v2[0],v2[1],v2[2]); else b2=1.0;
real newDist = Eikonal<real>(d,d2,voxelsize,sofa::helper::rmin(b1,b2));
real oldDist = distances(v2[0],v2[1],v2[2]);
if(oldDist>newDist)
{
typename std::set<DistanceToPoint>::iterator it=trial.find(DistanceToPoint(oldDist,v2)); if(it!=trial.end()) trial.erase(it);
voronoi(v2[0],v2[1],v2[2])=vor;
distances(v2[0],v2[1],v2[2])=newDist;
trial.insert( DistanceToPoint(newDist,v2) );
}
}
}
}
}
/**
* Update geodesic distances in the image given a bias distance function b(x).
* This is equivalent to solve for the eikonal equation || grad d(x) || = 1/b(x) with d(p)=0 at @param pos
* using dijkstra minimum path algorithm
* distances should be intialized (<0 outside the object, >=0 inside, and = 0 for seeds)
* returns @param voronoi and @param distances
*/
template<typename real,typename T>
void dijkstra (std::set<std::pair<real,sofa::defaulttype::Vec<3,int> > > &trial, cimg_library::CImg<real>& distances, cimg_library::CImg<unsigned int>& voronoi,
const sofa::defaulttype::Vec<3,real>& voxelsize, const cimg_library::CImg<T>* biasFactor=NULL)
{
typedef sofa::defaulttype::Vec<3,int> iCoord;
typedef std::pair<real,iCoord > DistanceToPoint;
const iCoord dim(distances.width(),distances.height(),distances.depth());
//CImg<bool> alive(dim[0],dim[1],dim[2]); alive.fill(false);
// init
sofa::defaulttype::Vec<27, iCoord > offset; // image coord offsets related to neighbors
sofa::defaulttype::Vec<27, real > lD; // precomputed local distances (supposing that the transformation is linear)
int count=0; for (int k=-1; k<=1; k++) for (int j=-1; j<=1; j++) for (int i=-1; i<=1; i++)
{
offset[count]= iCoord(i,j,k);
lD[count]= (voxelsize.linearProduct(offset[count])).norm();
count++;
}
unsigned int nbOffset=offset.size();
// dijkstra
while( !trial.empty() )
{
DistanceToPoint top = *trial.begin();
trial.erase(trial.begin());
iCoord v = top.second;
//alive(v[0],v[1],v[2])=true;
unsigned int vor = voronoi(v[0],v[1],v[2]);
real b1; if(biasFactor) b1=(real)(*biasFactor)(v[0],v[1],v[2]); else b1=1.0;
for (unsigned int i=0; i<nbOffset; i++)
{
iCoord v2 = v + offset[i];
if(v2[0]>=0) if(v2[1]>=0) if(v2[2]>=0) if(v2[0]<dim[0]) if(v2[1]<dim[1]) if(v2[2]<dim[2])
//if(!alive(v2[0],v2[1],v2[2]))
{
real b2; if(biasFactor) b2=(real)(*biasFactor)(v2[0],v2[1],v2[2]); else b2=1.0;
real newDist = distances(v[0],v[1],v[2]) + lD[i]*1.0/sofa::helper::rmin(b1,b2);
real oldDist = distances(v2[0],v2[1],v2[2]);
if(oldDist>newDist)
{
typename std::set<DistanceToPoint>::iterator it=trial.find(DistanceToPoint(oldDist,v2)); if(it!=trial.end()) trial.erase(it);
voronoi(v2[0],v2[1],v2[2]) = vor;
distances(v2[0],v2[1],v2[2]) = newDist;
trial.insert( DistanceToPoint(newDist,v2) );
}
}
}
}
}
///@brief Compute norm L2 of a pixel in a CImg
template<typename real>
real norm(cimg_library::CImg<real>& distances, sofa::helper::fixed_array<int, 3>& coord)
{
return sqrt(pow(distances(coord[0],coord[1],coord[2],0),2) +
pow(distances(coord[0],coord[1],coord[2],1),2) +
pow(distances(coord[0],coord[1],coord[2],2),2));
}
/// @brief Replace value at oldCoord with a combinaison of value at newCoord, a offset and a bias if provided
template<typename real,typename T>
void replace(cimg_library::CImg<unsigned int>& voronoi, cimg_library::CImg<real>& distances, sofa::helper::fixed_array<int, 3>& oldCoord, sofa::helper::fixed_array<int, 3>& newCoord,
sofa::helper::fixed_array<real, 3>& offset, const sofa::helper::fixed_array<real, 3>& voxelSize, const cimg_library::CImg<T>* bias)
{
real b=1.0;
if(bias)
b=std::min((*bias)(oldCoord[0], oldCoord[1], oldCoord[2]), (*bias)(newCoord[0], newCoord[1], newCoord[2]));
distances(oldCoord[0], oldCoord[1], oldCoord[2], 0) = distances(newCoord[0], newCoord[1], newCoord[2], 0) + offset[0]*voxelSize[0]/b;
distances(oldCoord[0], oldCoord[1], oldCoord[2], 1) = distances(newCoord[0], newCoord[1], newCoord[2], 1) + offset[1]*voxelSize[1]/b;
distances(oldCoord[0], oldCoord[1], oldCoord[2], 2) = distances(newCoord[0], newCoord[1], newCoord[2], 2) + offset[2]*voxelSize[2]/b;
voronoi(oldCoord[0], oldCoord[1], oldCoord[2]) = voronoi(newCoord[0], newCoord[1], newCoord[2]);
}
/// @brief Update value of the pixel of an image after comparing it with its neighbor
template<typename real,typename T>
void update(cimg_library::CImg<real>& distances, cimg_library::CImg<unsigned int>& voronoi, sofa::helper::fixed_array< sofa::helper::fixed_array<int, 3>, 10 >& coord, sofa::helper::fixed_array< sofa::helper::fixed_array<real, 3>, 10 >& offset, const sofa::helper::fixed_array<real, 3>& voxelSize, const cimg_library::CImg<T>* bias)
{
real l_curr=norm(distances,coord[0]);
for(int l=1; l<=9; ++l)
{
real l_neigh=norm(distances,coord[l]);
if(l_neigh<l_curr){ replace(voronoi,distances,coord[0],coord[l],offset[l], voxelSize, bias); l_curr=l_neigh; }
}
}
/// @brief Compare two images, pixel per pixel
/// @return true if for each pixel the error is bounded by a threshold, false otherwise.
template<typename real>
bool hasConverged(cimg_library::CImg<real>& previous, cimg_library::CImg<real>& current, SReal tolerance)
{
bool result=true;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0; i<previous.width(); ++i) for(int j=0; j<previous.height(); ++j) for(int k=0; k<previous.depth(); ++k)
{
if( !isnan(previous(i,j,k,0)) && !isnan(current(i,j,k,0)) )
{
SReal error = sqrt( pow(previous(i,j,k,0)-current(i,j,k,0),2) +
pow(previous(i,j,k,1)-current(i,j,k,1),2) +
pow(previous(i,j,k,2)-current(i,j,k,2),2));
if(error>tolerance)
result = false;
}
}
return result;
}
/// @brief Perform a raster scan from left to right to update distances
template<typename real,typename T>
void left(cimg_library::CImg<unsigned int>& v, cimg_library::CImg<real>& d, const sofa::helper::fixed_array<real, 3>& vx, const cimg_library::CImg<T>* bias)
{
for(int i=d.width()-2; i>=0; --i)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int j=d.height()-2; j>=1; --j)
{
for(int k=d.depth()-2; k>=1; --k)
{
sofa::helper::fixed_array< sofa::helper::fixed_array<int, 3>, 10 > c;
sofa::helper::fixed_array< sofa::helper::fixed_array<real, 3>, 10 > o;
c[0] = sofa::helper::fixed_array<int, 3>(i,j,k); o[0] = sofa::helper::fixed_array<real, 3>(0,0,0); int count=1;
for(int y=-1;y<=1; ++y) for(int z=-1; z<=1; z++)
{
c[count] = sofa::helper::fixed_array<int, 3>(i+1,j+y,k+z);
o[count] = sofa::helper::fixed_array<real, 3>(1,std::abs(y),std::abs(z)); count++;
}
update(d,v,c,o,vx, bias);
}
}
}
}
/// @brief Perform a raster scan from right to left to update distances
template<typename real,typename T>
void right(cimg_library::CImg<unsigned int>& v, cimg_library::CImg<real>& d, const sofa::helper::fixed_array<real, 3>& vx, const cimg_library::CImg<T>* bias)
{
for(int i=1; i<d.width(); ++i)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int j=1; j<d.height()-1; ++j)
{
for(int k=1; k<d.depth()-1; ++k)
{
sofa::helper::fixed_array< sofa::helper::fixed_array<int, 3>, 10 > c;
sofa::helper::fixed_array< sofa::helper::fixed_array<real, 3>, 10 > o;
c[0] = sofa::helper::fixed_array<int, 3>(i,j,k); o[0] = sofa::helper::fixed_array<real, 3>(0,0,0); int count=1;
for(int y=-1;y<=1; ++y) for(int z=-1; z<=1; z++)
{
c[count] = sofa::helper::fixed_array<int, 3>(i-1,j+y,k+z);
o[count] = sofa::helper::fixed_array<real, 3>(1,std::abs(y),std::abs(z)); count++;
}
update(d,v,c,o,vx, bias);
}
}
}
}
/// @brief Perform a raster scan from down to up to update distances
template<typename real,typename T>
void down(cimg_library::CImg<unsigned int>& v, cimg_library::CImg<real>& d, const sofa::helper::fixed_array<real, 3>& vx, const cimg_library::CImg<T>* bias)
{
for(int j=d.height()-2; j>=0; --j)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=d.width()-2; i>=1; --i)
{
for(int k=d.depth()-2; k>=1; --k)
{
sofa::helper::fixed_array< sofa::helper::fixed_array<int, 3>, 10 > c;
sofa::helper::fixed_array< sofa::helper::fixed_array<real, 3>, 10 > o;
c[0] = sofa::helper::fixed_array<int, 3>(i,j,k); o[0] = sofa::helper::fixed_array<real, 3>(0,0,0); int count=1;
for(int x=-1;x<=1; ++x) for(int z=-1; z<=1; z++)
{
c[count] = sofa::helper::fixed_array<int, 3>(i+x,j+1,k+z);
o[count] = sofa::helper::fixed_array<real, 3>(std::abs(x),1,std::abs(z)); count++;
}
update(d,v,c,o,vx, bias);
}
}
}
}
/// @brief Perform a raster scan from up to down to update distances
template<typename real,typename T>
void up(cimg_library::CImg<unsigned int>& v, cimg_library::CImg<real>& d, const sofa::helper::fixed_array<real, 3>& vx, const cimg_library::CImg<T>* bias)
{
for(int j=1; j<d.height(); ++j)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=1; i<d.width()-1; ++i)
{
for(int k=1; k<d.depth()-1; ++k)
{
sofa::helper::fixed_array< sofa::helper::fixed_array<int, 3>, 10 > c;
sofa::helper::fixed_array< sofa::helper::fixed_array<real, 3>, 10 > o;
c[0] = sofa::helper::fixed_array<int, 3>(i,j,k); o[0] = sofa::helper::fixed_array<real, 3>(0,0,0); int count=1;
for(int x=-1;x<=1; ++x) for(int z=-1; z<=1; z++)
{
c[count] = sofa::helper::fixed_array<int, 3>(i+x,j-1,k+z);
o[count] = sofa::helper::fixed_array<real, 3>(std::abs(x),1,std::abs(z)); count++;
}
update(d,v,c,o,vx, bias);
}
}
}
}
/// @brief Perform a raster scan from backward to forward to update distances
template<typename real,typename T>
void backward(cimg_library::CImg<unsigned int>& v, cimg_library::CImg<real>& d, const sofa::helper::fixed_array<real, 3>& vx, const cimg_library::CImg<T>* bias)
{
for(int k=d.depth()-2; k>=0; --k)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=d.width()-2; i>=1; --i)
{
for(int j=d.height()-2; j>=1; --j)
{
sofa::helper::fixed_array< sofa::helper::fixed_array<int, 3>, 10 > c;
sofa::helper::fixed_array< sofa::helper::fixed_array<real, 3>, 10 > o;
c[0] = sofa::helper::fixed_array<int, 3>(i,j,k); o[0] = sofa::helper::fixed_array<real, 3>(0,0,0); int count=1;
for(int x=-1;x<=1; ++x) for(int y=-1; y<=1; y++)
{
c[count] = sofa::helper::fixed_array<int, 3>(i+x,j+y,k+1);
o[count] = sofa::helper::fixed_array<real, 3>(std::abs(x),std::abs(y),1); count++;
}
update(d,v,c,o,vx,bias);
}
}
}
}
/// @brief Perform a raster scan from forward to backward to update distances
template<typename real,typename T>
void forward(cimg_library::CImg<unsigned int>& v, cimg_library::CImg<real>& d, const sofa::helper::fixed_array<real, 3>& vx, const cimg_library::CImg<T>* bias)
{
for(int k=1; k<d.depth(); ++k)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=1; i<d.width()-1; ++i)
{
for(int j=1; j<d.height()-1; ++j)
{
sofa::helper::fixed_array< sofa::helper::fixed_array<int, 3>, 10 > c;
sofa::helper::fixed_array< sofa::helper::fixed_array<real, 3>, 10 > o;
c[0] = sofa::helper::fixed_array<int, 3>(i,j,k); o[0] = sofa::helper::fixed_array<real, 3>(0,0,0); int count=1;
for(int x=-1;x<=1; ++x) for(int y=-1; y<=1; y++)
{
c[count] = sofa::helper::fixed_array<int, 3>(i+x,j+y,k-1);
o[count] = sofa::helper::fixed_array<real, 3>(std::abs(x),std::abs(y),1); count++;
}
update(d,v,c,o,vx,bias);
}
}
}
}
/// @brief Perform 6 raster scan of an image to fully cover it.
template<typename real,typename T>
void rasterScan(cimg_library::CImg<unsigned int>& voronoi, cimg_library::CImg<real>& distances, const sofa::helper::fixed_array<real, 3>& voxelSize, const cimg_library::CImg<T>* biasFactor=NULL)
{
right(voronoi, distances, voxelSize, biasFactor);
left(voronoi, distances, voxelSize, biasFactor);
down(voronoi, distances, voxelSize,biasFactor);
up(voronoi, distances, voxelSize,biasFactor);
forward(voronoi, distances, voxelSize,biasFactor);
backward(voronoi, distances, voxelSize,biasFactor);
}
/// @brief Update geodesic distances in the image given a bias distance function b(x).
/// using Parallel Marching Method (PMM) from Ofir Weber & .al (https://ssl.lu.usi.ch/entityws/Allegati/pdf_pub5153.pdf).
/// The implementation works with openMP. Due to data dependency it may quite slow compared to a sequential algorithm because it requires many iterations to converge.
/// In specific cases it can be very efficient (convex domain) because only one iteration is required. A GPU implementation is possible and is on the todo list.
/// @param maxIter should be carefully chosen to minimize computation time.
/// @param tolerance should be carefully chosen to minimize computation time.
/// @returns @param voronoi and @param distances
template<typename real,typename T>
void parallelMarching(cimg_library::CImg<real>& distances, cimg_library::CImg<unsigned int>& voronoi, const sofa::helper::fixed_array<real, 3>& voxelSize, const unsigned int maxIter=std::numeric_limits<unsigned int>::max(), const SReal tolerance=10, const cimg_library::CImg<T>* biasFactor=NULL)
{
if(distances.width()<3 || distances.height()<3 || distances.depth()<3)
{
std::cerr << "ImageAlgorithms::parallelMarching : Boundary conditions are not treated so size (width,height,depth) should be >=3. (Work in Progress)" << std::endl;
return;
}
//Build a new distance image from distances.
cimg_library::CImg<real> v_distances(distances.width(), distances.height(), distances.depth(), 3, std::numeric_limits<real>::max());
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0; i<distances.width(); ++i) for(int j=0; j<distances.height(); ++j) for(int k=0; k<distances.depth(); ++k)
{
if( distances(i,j,k,0) < 0 )
v_distances(i,j,k,0) = v_distances(i,j,k,1) = v_distances(i,j,k,2) = std::numeric_limits<real>::signaling_NaN();
else
v_distances(i,j,k,0) = v_distances(i,j,k,1) = v_distances(i,j,k,2) = distances(i,j,k,0);
}
//Perform raster scan until convergence
bool converged = false; unsigned int iter_count = 0; cimg_library::CImg<real> prev_distances;
while( (converged==false) || (iter_count<maxIter) )
{
prev_distances = v_distances; iter_count++;
rasterScan(voronoi, v_distances, voxelSize, biasFactor);
converged = hasConverged(prev_distances, v_distances, tolerance);
}
//Update distances with v_distances
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0; i<distances.width(); ++i) for(int j=0; j<distances.height(); ++j) for(int k=0; k<distances.depth(); ++k)
{
if( isnan(v_distances(i,j,k,0)) )
distances(i,j,k,0) = -1.0;
else
distances(i,j,k,0) = std::sqrt( std::pow(v_distances(i,j,k,0),2) + std::pow(v_distances(i,j,k,1),2) + std::pow(v_distances(i,j,k,2),2) );
}
}
/**
* Initialize null distances and voronoi value (=point index) from a position in image coordinates
* and returns list of seed (=trial) points to be used in dijkstra or fast marching algorithms
*/
template<typename real>
void AddSeedPoint (std::set<std::pair<real,sofa::defaulttype::Vec<3,int> > >& trial, cimg_library::CImg<real>& distances, cimg_library::CImg<unsigned int>& voronoi,
const sofa::defaulttype::Vec<3,real>& pos, const unsigned int index)
{
typedef sofa::defaulttype::Vec<3,int> iCoord;
typedef std::pair<real,iCoord > DistanceToPoint;
iCoord P; for (unsigned int j=0; j<3; j++) P[j]=sofa::helper::round(pos[j]);
if(distances.containsXYZC(P[0],P[1],P[2]))
if(distances(P[0],P[1],P[2])>=0)
{
distances(P[0],P[1],P[2])=0;
voronoi(P[0],P[1],P[2])=index;
trial.insert( DistanceToPoint(0.,iCoord(P[0],P[1],P[2])) );
}
}
#undef isnan
#endif // IMAGEALGORITHMS_H
|
GB_unop__identity_uint16_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint16_int8
// op(A') function: GB_unop_tran__identity_uint16_int8
// C type: uint16_t
// A type: int8_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint16_int8
(
uint16_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint16_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pdgssvx.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*! @file
* \brief Solves a system of linear equations A*X=B
*
* <pre>
* -- Distributed SuperLU routine (version 6.0) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* November 1, 2007
* October 22, 2012
* October 1, 2014
* April 5, 2015
* December 31, 2015 version 4.3
* December 31, 2016 version 5.1.3
* April 10, 2018 version 5.3
* September 18, 2018 version 6.0
* </pre>
*/
#include <math.h>
#include "superlu_ddefs.h"
/*! \brief
*
* <pre>
* Purpose
* =======
*
* PDGSSVX solves a system of linear equations A*X=B,
* by using Gaussian elimination with "static pivoting" to
* compute the LU factorization of A.
*
* Static pivoting is a technique that combines the numerical stability
* of partial pivoting with the scalability of Cholesky (no pivoting),
* to run accurately and efficiently on large numbers of processors.
* See our paper at http://www.nersc.gov/~xiaoye/SuperLU/ for a detailed
* description of the parallel algorithms.
*
* The input matrices A and B are distributed by block rows.
* Here is a graphical illustration (0-based indexing):
*
* A B
* 0 --------------- ------
* | | | |
* | | P0 | |
* | | | |
* --------------- ------
* - fst_row->| | | |
* | | | | |
* m_loc | | P1 | |
* | | | | |
* - | | | |
* --------------- ------
* | . | |. |
* | . | |. |
* | . | |. |
* --------------- ------
*
* where, fst_row is the row number of the first row,
* m_loc is the number of rows local to this processor
* These are defined in the 'SuperMatrix' structure, see supermatrix.h.
*
*
* Here are the options for using this code:
*
* 1. Independent of all the other options specified below, the
* user must supply
*
* - B, the matrix of right-hand sides, distributed by block rows,
* and its dimensions ldb (local) and nrhs (global)
* - grid, a structure describing the 2D processor mesh
* - options->IterRefine, which determines whether or not to
* improve the accuracy of the computed solution using
* iterative refinement
*
* On output, B is overwritten with the solution X.
*
* 2. Depending on options->Fact, the user has four options
* for solving A*X=B. The standard option is for factoring
* A "from scratch". (The other options, described below,
* are used when A is sufficiently similar to a previously
* solved problem to save time by reusing part or all of
* the previous factorization.)
*
* - options->Fact = DOFACT: A is factored "from scratch"
*
* In this case the user must also supply
*
* o A, the input matrix
*
* as well as the following options to determine what matrix to
* factorize.
*
* o options->Equil, to specify how to scale the rows and columns
* of A to "equilibrate" it (to try to reduce its
* condition number and so improve the
* accuracy of the computed solution)
*
* o options->RowPerm, to specify how to permute the rows of A
* (typically to control numerical stability)
*
* o options->ColPerm, to specify how to permute the columns of A
* (typically to control fill-in and enhance
* parallelism during factorization)
*
* o options->ReplaceTinyPivot, to specify how to deal with tiny
* pivots encountered during factorization
* (to control numerical stability)
*
* The outputs returned include
*
* o ScalePermstruct, modified to describe how the input matrix A
* was equilibrated and permuted:
* . ScalePermstruct->DiagScale, indicates whether the rows and/or
* columns of A were scaled
* . ScalePermstruct->R, array of row scale factors
* . ScalePermstruct->C, array of column scale factors
* . ScalePermstruct->perm_r, row permutation vector
* . ScalePermstruct->perm_c, column permutation vector
*
* (part of ScalePermstruct may also need to be supplied on input,
* depending on options->RowPerm and options->ColPerm as described
* later).
*
* o A, the input matrix A overwritten by the scaled and permuted
* matrix diag(R)*A*diag(C)*Pc^T, where
* Pc is the row permutation matrix determined by
* ScalePermstruct->perm_c
* diag(R) and diag(C) are diagonal scaling matrices determined
* by ScalePermstruct->DiagScale, ScalePermstruct->R and
* ScalePermstruct->C
*
* o LUstruct, which contains the L and U factorization of A1 where
*
* A1 = Pc*Pr*diag(R)*A*diag(C)*Pc^T = L*U
*
* (Note that A1 = Pc*Pr*Aout, where Aout is the matrix stored
* in A on output.)
*
* 3. The second value of options->Fact assumes that a matrix with the same
* sparsity pattern as A has already been factored:
*
* - options->Fact = SamePattern: A is factored, assuming that it has
* the same nonzero pattern as a previously factored matrix. In
* this case the algorithm saves time by reusing the previously
* computed column permutation vector stored in
* ScalePermstruct->perm_c and the "elimination tree" of A
* stored in LUstruct->etree
*
* In this case the user must still specify the following options
* as before:
*
* o options->Equil
* o options->RowPerm
* o options->ReplaceTinyPivot
*
* but not options->ColPerm, whose value is ignored. This is because the
* previous column permutation from ScalePermstruct->perm_c is used as
* input. The user must also supply
*
* o A, the input matrix
* o ScalePermstruct->perm_c, the column permutation
* o LUstruct->etree, the elimination tree
*
* The outputs returned include
*
* o A, the input matrix A overwritten by the scaled and permuted
* matrix as described above
* o ScalePermstruct, modified to describe how the input matrix A was
* equilibrated and row permuted
* o LUstruct, modified to contain the new L and U factors
*
* 4. The third value of options->Fact assumes that a matrix B with the same
* sparsity pattern as A has already been factored, and where the
* row permutation of B can be reused for A. This is useful when A and B
* have similar numerical values, so that the same row permutation
* will make both factorizations numerically stable. This lets us reuse
* all of the previously computed structure of L and U.
*
* - options->Fact = SamePattern_SameRowPerm: A is factored,
* assuming not only the same nonzero pattern as the previously
* factored matrix B, but reusing B's row permutation.
*
* In this case the user must still specify the following options
* as before:
*
* o options->Equil
* o options->ReplaceTinyPivot
*
* but not options->RowPerm or options->ColPerm, whose values are
* ignored. This is because the permutations from ScalePermstruct->perm_r
* and ScalePermstruct->perm_c are used as input.
*
* The user must also supply
*
* o A, the input matrix
* o ScalePermstruct->DiagScale, how the previous matrix was row
* and/or column scaled
* o ScalePermstruct->R, the row scalings of the previous matrix,
* if any
* o ScalePermstruct->C, the columns scalings of the previous matrix,
* if any
* o ScalePermstruct->perm_r, the row permutation of the previous
* matrix
* o ScalePermstruct->perm_c, the column permutation of the previous
* matrix
* o all of LUstruct, the previously computed information about
* L and U (the actual numerical values of L and U
* stored in LUstruct->Llu are ignored)
*
* The outputs returned include
*
* o A, the input matrix A overwritten by the scaled and permuted
* matrix as described above
* o ScalePermstruct, modified to describe how the input matrix A was
* equilibrated (thus ScalePermstruct->DiagScale,
* R and C may be modified)
* o LUstruct, modified to contain the new L and U factors
*
* 5. The fourth and last value of options->Fact assumes that A is
* identical to a matrix that has already been factored on a previous
* call, and reuses its entire LU factorization
*
* - options->Fact = Factored: A is identical to a previously
* factorized matrix, so the entire previous factorization
* can be reused.
*
* In this case all the other options mentioned above are ignored
* (options->Equil, options->RowPerm, options->ColPerm,
* options->ReplaceTinyPivot)
*
* The user must also supply
*
* o A, the unfactored matrix, only in the case that iterative
* refinement is to be done (specifically A must be the output
* A from the previous call, so that it has been scaled and permuted)
* o all of ScalePermstruct
* o all of LUstruct, including the actual numerical values of
* L and U
*
* all of which are unmodified on output.
*
* Arguments
* =========
*
* options (input) superlu_dist_options_t* (global)
* The structure defines the input parameters to control
* how the LU decomposition will be performed.
* The following fields should be defined for this structure:
*
* o Fact (fact_t)
* Specifies whether or not the factored form of the matrix
* A is supplied on entry, and if not, how the matrix A should
* be factorized based on the previous history.
*
* = DOFACT: The matrix A will be factorized from scratch.
* Inputs: A
* options->Equil, RowPerm, ColPerm, ReplaceTinyPivot
* Outputs: modified A
* (possibly row and/or column scaled and/or
* permuted)
* all of ScalePermstruct
* all of LUstruct
*
* = SamePattern: the matrix A will be factorized assuming
* that a factorization of a matrix with the same sparsity
* pattern was performed prior to this one. Therefore, this
* factorization will reuse column permutation vector
* ScalePermstruct->perm_c and the elimination tree
* LUstruct->etree
* Inputs: A
* options->Equil, RowPerm, ReplaceTinyPivot
* ScalePermstruct->perm_c
* LUstruct->etree
* Outputs: modified A
* (possibly row and/or column scaled and/or
* permuted)
* rest of ScalePermstruct (DiagScale, R, C, perm_r)
* rest of LUstruct (GLU_persist, Llu)
*
* = SamePattern_SameRowPerm: the matrix A will be factorized
* assuming that a factorization of a matrix with the same
* sparsity pattern and similar numerical values was performed
* prior to this one. Therefore, this factorization will reuse
* both row and column scaling factors R and C, and the
* both row and column permutation vectors perm_r and perm_c,
* distributed data structure set up from the previous symbolic
* factorization.
* Inputs: A
* options->Equil, ReplaceTinyPivot
* all of ScalePermstruct
* all of LUstruct
* Outputs: modified A
* (possibly row and/or column scaled and/or
* permuted)
* modified LUstruct->Llu
* = FACTORED: the matrix A is already factored.
* Inputs: all of ScalePermstruct
* all of LUstruct
*
* o Equil (yes_no_t)
* Specifies whether to equilibrate the system.
* = NO: no equilibration.
* = YES: scaling factors are computed to equilibrate the system:
* diag(R)*A*diag(C)*inv(diag(C))*X = diag(R)*B.
* Whether or not the system will be equilibrated depends
* on the scaling of the matrix A, but if equilibration is
* used, A is overwritten by diag(R)*A*diag(C) and B by
* diag(R)*B.
*
* o RowPerm (rowperm_t)
* Specifies how to permute rows of the matrix A.
* = NATURAL: use the natural ordering.
* = LargeDiag_MC64: use the Duff/Koster algorithm to permute rows
* of the original matrix to make the diagonal large
* relative to the off-diagonal.
* = LargeDiag_APWM: use the parallel approximate-weight perfect
* matching to permute rows of the original matrix
* to make the diagonal large relative to the
* off-diagonal.
* = MY_PERMR: use the ordering given in ScalePermstruct->perm_r
* input by the user.
*
* o ColPerm (colperm_t)
* Specifies what type of column permutation to use to reduce fill.
* = NATURAL: natural ordering.
* = MMD_AT_PLUS_A: minimum degree ordering on structure of A'+A.
* = MMD_ATA: minimum degree ordering on structure of A'*A.
* = MY_PERMC: the ordering given in ScalePermstruct->perm_c.
*
* o ReplaceTinyPivot (yes_no_t)
* = NO: do not modify pivots
* = YES: replace tiny pivots by sqrt(epsilon)*norm(A) during
* LU factorization.
*
* o IterRefine (IterRefine_t)
* Specifies how to perform iterative refinement.
* = NO: no iterative refinement.
* = SLU_DOUBLE: accumulate residual in double precision.
* = SLU_EXTRA: accumulate residual in extra precision.
*
* NOTE: all options must be identical on all processes when
* calling this routine.
*
* A (input/output) SuperMatrix* (local)
* On entry, matrix A in A*X=B, of dimension (A->nrow, A->ncol).
* The number of linear equations is A->nrow. The type of A must be:
* Stype = SLU_NR_loc; Dtype = SLU_D; Mtype = SLU_GE.
* That is, A is stored in distributed compressed row format.
* See supermatrix.h for the definition of 'SuperMatrix'.
* This routine only handles square A, however, the LU factorization
* routine PDGSTRF can factorize rectangular matrices.
* On exit, A may be overwtirren by diag(R)*A*diag(C)*Pc^T,
* depending on ScalePermstruct->DiagScale and options->ColPerm:
* if ScalePermstruct->DiagScale != NOEQUIL, A is overwritten by
* diag(R)*A*diag(C).
* if options->ColPerm != NATURAL, A is further overwritten by
* diag(R)*A*diag(C)*Pc^T.
* If all the above condition are true, the LU decomposition is
* performed on the matrix Pc*Pr*diag(R)*A*diag(C)*Pc^T.
*
* ScalePermstruct (input/output) ScalePermstruct_t* (global)
* The data structure to store the scaling and permutation vectors
* describing the transformations performed to the matrix A.
* It contains the following fields:
*
* o DiagScale (DiagScale_t)
* Specifies the form of equilibration that was done.
* = NOEQUIL: no equilibration.
* = ROW: row equilibration, i.e., A was premultiplied by
* diag(R).
* = COL: Column equilibration, i.e., A was postmultiplied
* by diag(C).
* = BOTH: both row and column equilibration, i.e., A was
* replaced by diag(R)*A*diag(C).
* If options->Fact = FACTORED or SamePattern_SameRowPerm,
* DiagScale is an input argument; otherwise it is an output
* argument.
*
* o perm_r (int*)
* Row permutation vector, which defines the permutation matrix Pr;
* perm_r[i] = j means row i of A is in position j in Pr*A.
* If options->RowPerm = MY_PERMR, or
* options->Fact = SamePattern_SameRowPerm, perm_r is an
* input argument; otherwise it is an output argument.
*
* o perm_c (int*)
* Column permutation vector, which defines the
* permutation matrix Pc; perm_c[i] = j means column i of A is
* in position j in A*Pc.
* If options->ColPerm = MY_PERMC or options->Fact = SamePattern
* or options->Fact = SamePattern_SameRowPerm, perm_c is an
* input argument; otherwise, it is an output argument.
* On exit, perm_c may be overwritten by the product of the input
* perm_c and a permutation that postorders the elimination tree
* of Pc*A'*A*Pc'; perm_c is not changed if the elimination tree
* is already in postorder.
*
* o R (double*) dimension (A->nrow)
* The row scale factors for A.
* If DiagScale = ROW or BOTH, A is multiplied on the left by
* diag(R).
* If DiagScale = NOEQUIL or COL, R is not defined.
* If options->Fact = FACTORED or SamePattern_SameRowPerm, R is
* an input argument; otherwise, R is an output argument.
*
* o C (double*) dimension (A->ncol)
* The column scale factors for A.
* If DiagScale = COL or BOTH, A is multiplied on the right by
* diag(C).
* If DiagScale = NOEQUIL or ROW, C is not defined.
* If options->Fact = FACTORED or SamePattern_SameRowPerm, C is
* an input argument; otherwise, C is an output argument.
*
* B (input/output) double* (local)
* On entry, the right-hand side matrix of dimension (m_loc, nrhs),
* where, m_loc is the number of rows stored locally on my
* process and is defined in the data structure of matrix A.
* On exit, the solution matrix if info = 0;
*
* ldb (input) int (local)
* The leading dimension of matrix B.
*
* nrhs (input) int (global)
* The number of right-hand sides.
* If nrhs = 0, only LU decomposition is performed, the forward
* and back substitutions are skipped.
*
* grid (input) gridinfo_t* (global)
* The 2D process mesh. It contains the MPI communicator, the number
* of process rows (NPROW), the number of process columns (NPCOL),
* and my process rank. It is an input argument to all the
* parallel routines.
* Grid can be initialized by subroutine SUPERLU_GRIDINIT.
* See superlu_ddefs.h for the definition of 'gridinfo_t'.
*
* LUstruct (input/output) LUstruct_t*
* The data structures to store the distributed L and U factors.
* It contains the following fields:
*
* o etree (int*) dimension (A->ncol) (global)
* Elimination tree of Pc*(A'+A)*Pc' or Pc*A'*A*Pc'.
* It is computed in sp_colorder() during the first factorization,
* and is reused in the subsequent factorizations of the matrices
* with the same nonzero pattern.
* On exit of sp_colorder(), the columns of A are permuted so that
* the etree is in a certain postorder. This postorder is reflected
* in ScalePermstruct->perm_c.
* NOTE:
* Etree is a vector of parent pointers for a forest whose vertices
* are the integers 0 to A->ncol-1; etree[root]==A->ncol.
*
* o Glu_persist (Glu_persist_t*) (global)
* Global data structure (xsup, supno) replicated on all processes,
* describing the supernode partition in the factored matrices
* L and U:
* xsup[s] is the leading column of the s-th supernode,
* supno[i] is the supernode number to which column i belongs.
*
* o Llu (LocalLU_t*) (local)
* The distributed data structures to store L and U factors.
* See superlu_ddefs.h for the definition of 'LocalLU_t'.
*
* SOLVEstruct (input/output) SOLVEstruct_t*
* The data structure to hold the communication pattern used
* in the phases of triangular solution and iterative refinement.
* This pattern should be initialized only once for repeated solutions.
* If options->SolveInitialized = YES, it is an input argument.
* If options->SolveInitialized = NO and nrhs != 0, it is an output
* argument. See superlu_ddefs.h for the definition of 'SOLVEstruct_t'.
*
* berr (output) double*, dimension (nrhs) (global)
* The componentwise relative backward error of each solution
* vector X(j) (i.e., the smallest relative change in
* any element of A or B that makes X(j) an exact solution).
*
* stat (output) SuperLUStat_t*
* Record the statistics on runtime and floating-point operation count.
* See util.h for the definition of 'SuperLUStat_t'.
*
* info (output) int*
* = 0: successful exit
* > 0: if info = i, and i is
* <= A->ncol: U(i,i) is exactly zero. The factorization has
* been completed, but the factor U is exactly singular,
* so the solution could not be computed.
* > A->ncol: number of bytes allocated when memory allocation
* failure occurred, plus A->ncol.
*
* See superlu_ddefs.h for the definitions of various data types.
* </pre>
*/
void
pdgssvx(superlu_dist_options_t *options, SuperMatrix *A,
ScalePermstruct_t *ScalePermstruct,
double B[], int ldb, int nrhs, gridinfo_t *grid,
LUstruct_t *LUstruct, SOLVEstruct_t *SOLVEstruct, double *berr,
SuperLUStat_t *stat, int *info)
{
NRformat_loc *Astore;
SuperMatrix GA; /* Global A in NC format */
NCformat *GAstore;
double *a_GA;
SuperMatrix GAC; /* Global A in NCP format (add n end pointers) */
NCPformat *GACstore;
Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
Glu_freeable_t *Glu_freeable;
/* The nonzero structures of L and U factors, which are
replicated on all processrs.
(lsub, xlsub) contains the compressed subscript of
supernodes in L.
(usub, xusub) contains the compressed subscript of
nonzero segments in U.
If options->Fact != SamePattern_SameRowPerm, they are
computed by SYMBFACT routine, and then used by PDDISTRIBUTE
routine. They will be freed after PDDISTRIBUTE routine.
If options->Fact == SamePattern_SameRowPerm, these
structures are not used. */
fact_t Fact;
double *a;
int_t *colptr, *rowind;
int_t *perm_r; /* row permutations from partial pivoting */
int_t *perm_c; /* column permutation vector */
int_t *etree; /* elimination tree */
int_t *rowptr, *colind; /* Local A in NR*/
int_t colequ, Equil, factored, job, notran, rowequ, need_value;
int_t i, iinfo, j, irow, m, n, nnz, permc_spec;
int_t nnz_loc, m_loc, fst_row, icol;
int iam,iam_g;
int ldx; /* LDA for matrix X (local). */
char equed[1], norm[1];
double *C, *R, *C1, *R1, amax, anorm, colcnd, rowcnd;
double *X, *b_col, *b_work, *x_col;
double t;
float GA_mem_use = 0.0; /* memory usage by global A */
float dist_mem_use = 0.0; /* memory usage during distribution */
superlu_dist_mem_usage_t num_mem_usage, symb_mem_usage;
int64_t nnzLU;
int_t nnz_tot;
double *nzval_a;
double asum,asum_tot,lsum,lsum_tot;
int_t nsupers,nsupers_j;
int_t lk,k,knsupc,nsupr;
int_t *lsub,*xsup;
double *lusup;
#if ( PRNTlevel>= 2 )
double dmin, dsum, dprod;
#endif
LUstruct->dt = 'd';
/* Structures needed for parallel symbolic factorization */
int_t *sizes, *fstVtxSep, parSymbFact;
int noDomains, nprocs_num;
MPI_Comm symb_comm; /* communicator for symbolic factorization */
int col, key; /* parameters for creating a new communicator */
Pslu_freeable_t Pslu_freeable;
float flinfo;
/* Initialization. */
m = A->nrow;
n = A->ncol;
Astore = (NRformat_loc *) A->Store;
nnz_loc = Astore->nnz_loc;
m_loc = Astore->m_loc;
fst_row = Astore->fst_row;
a = (double *) Astore->nzval;
rowptr = Astore->rowptr;
colind = Astore->colind;
sizes = NULL;
fstVtxSep = NULL;
symb_comm = MPI_COMM_NULL;
num_mem_usage.for_lu = num_mem_usage.total = 0.0;
symb_mem_usage.for_lu = symb_mem_usage.total = 0.0;
/* Test the input parameters. */
*info = 0;
Fact = options->Fact;
if ( Fact < 0 || Fact > FACTORED )
*info = -1;
else if ( options->RowPerm < 0 || options->RowPerm > MY_PERMR )
*info = -1;
else if ( options->ColPerm < 0 || options->ColPerm > MY_PERMC )
*info = -1;
else if ( options->IterRefine < 0 || options->IterRefine > SLU_EXTRA )
*info = -1;
else if ( options->IterRefine == SLU_EXTRA ) {
*info = -1;
printf("ERROR: Extra precise iterative refinement yet to support.\n");
} else if ( A->nrow != A->ncol || A->nrow < 0 || A->Stype != SLU_NR_loc
|| A->Dtype != SLU_D || A->Mtype != SLU_GE )
*info = -2;
else if ( ldb < m_loc )
*info = -5;
else if ( nrhs < 0 )
*info = -6;
if ( sp_ienv_dist(2) > sp_ienv_dist(3) ) {
*info = 1;
printf("ERROR: Relaxation (NREL) cannot be larger than max. supernode size (NSUP).\n"
"\t-> Check parameter setting in sp_ienv_dist.c to correct error.\n");
}
if ( *info ) {
i = -(*info);
pxerr_dist("pdgssvx", grid, -*info);
return;
}
factored = (Fact == FACTORED);
Equil = (!factored && options->Equil == YES);
notran = (options->Trans == NOTRANS);
parSymbFact = options->ParSymbFact;
iam = grid->iam;
job = 5;
if ( factored || (Fact == SamePattern_SameRowPerm && Equil) ) {
rowequ = (ScalePermstruct->DiagScale == ROW) ||
(ScalePermstruct->DiagScale == BOTH);
colequ = (ScalePermstruct->DiagScale == COL) ||
(ScalePermstruct->DiagScale == BOTH);
} else rowequ = colequ = FALSE;
/* The following arrays are replicated on all processes. */
perm_r = ScalePermstruct->perm_r;
perm_c = ScalePermstruct->perm_c;
etree = LUstruct->etree;
R = ScalePermstruct->R;
C = ScalePermstruct->C;
/********/
#if ( DEBUGlevel>=1 )
CHECK_MALLOC(iam, "Enter pdgssvx()");
#endif
/* Not factored & ask for equilibration */
if ( Equil && Fact != SamePattern_SameRowPerm ) {
/* Allocate storage if not done so before. */
switch ( ScalePermstruct->DiagScale ) {
case NOEQUIL:
if ( !(R = (double *) doubleMalloc_dist(m)) )
ABORT("Malloc fails for R[].");
if ( !(C = (double *) doubleMalloc_dist(n)) )
ABORT("Malloc fails for C[].");
ScalePermstruct->R = R;
ScalePermstruct->C = C;
break;
case ROW:
if ( !(C = (double *) doubleMalloc_dist(n)) )
ABORT("Malloc fails for C[].");
ScalePermstruct->C = C;
break;
case COL:
if ( !(R = (double *) doubleMalloc_dist(m)) )
ABORT("Malloc fails for R[].");
ScalePermstruct->R = R;
break;
}
}
/* ------------------------------------------------------------
* Diagonal scaling to equilibrate the matrix. (simple scheme)
* for row i = 1:n, A(i,:) <- A(i,:) / max(abs(A(i,:));
* for column j = 1:n, A(:,j) <- A(:, j) / max(abs(A(:,j))
* ------------------------------------------------------------*/
if ( Equil ) {
#if ( DEBUGlevel>=1 )
CHECK_MALLOC(iam, "Enter equil");
#endif
t = SuperLU_timer_();
if ( Fact == SamePattern_SameRowPerm ) {
/* Reuse R and C. */
switch ( ScalePermstruct->DiagScale ) {
case NOEQUIL:
break;
case ROW:
irow = fst_row;
for (j = 0; j < m_loc; ++j) {
for (i = rowptr[j]; i < rowptr[j+1]; ++i) {
a[i] *= R[irow]; /* Scale rows. */
}
++irow;
}
break;
case COL:
for (j = 0; j < m_loc; ++j)
for (i = rowptr[j]; i < rowptr[j+1]; ++i){
icol = colind[i];
a[i] *= C[icol]; /* Scale columns. */
}
break;
case BOTH:
irow = fst_row;
for (j = 0; j < m_loc; ++j) {
for (i = rowptr[j]; i < rowptr[j+1]; ++i) {
icol = colind[i];
a[i] *= R[irow] * C[icol]; /* Scale rows and cols. */
}
++irow;
}
break;
}
} else { /* Compute R & C from scratch */
/* Compute the row and column scalings. */
pdgsequ(A, R, C, &rowcnd, &colcnd, &amax, &iinfo, grid);
if ( iinfo > 0 ) {
if ( iinfo <= m ) {
#if ( PRNTlevel>=1 )
fprintf(stderr, "The " IFMT "-th row of A is exactly zero\n", iinfo);
#endif
} else {
#if ( PRNTlevel>=1 )
fprintf(stderr, "The " IFMT "-th column of A is exactly zero\n", iinfo-n);
#endif
}
} else if ( iinfo < 0 ) return;
/* Now iinfo == 0 */
/* Equilibrate matrix A if it is badly-scaled.
A <-- diag(R)*A*diag(C) */
pdlaqgs(A, R, C, rowcnd, colcnd, amax, equed);
if ( strncmp(equed, "R", 1)==0 ) {
ScalePermstruct->DiagScale = ROW;
rowequ = ROW;
} else if ( strncmp(equed, "C", 1)==0 ) {
ScalePermstruct->DiagScale = COL;
colequ = COL;
} else if ( strncmp(equed, "B", 1)==0 ) {
ScalePermstruct->DiagScale = BOTH;
rowequ = ROW;
colequ = COL;
} else ScalePermstruct->DiagScale = NOEQUIL;
#if ( PRNTlevel>=1 )
if ( !iam ) {
printf(".. equilibrated? *equed = %c\n", *equed);
fflush(stdout);
}
#endif
} /* end if Fact ... */
stat->utime[EQUIL] = SuperLU_timer_() - t;
#if ( DEBUGlevel>=1 )
CHECK_MALLOC(iam, "Exit equil");
#endif
} /* end if Equil ... LAPACK style, not involving MC64 */
if ( !factored ) { /* Skip this if already factored. */
/*
* For serial symbolic factorization, gather A from the distributed
* compressed row format to global A in compressed column format.
* Numerical values are gathered only when a row permutation
* for large diagonal is sought after.
*/
if ( Fact != SamePattern_SameRowPerm &&
(parSymbFact == NO || options->RowPerm != NO) ) {
/* Performs serial symbolic factorzation and/or MC64 */
need_value = (options->RowPerm == LargeDiag_MC64);
pdCompRow_loc_to_CompCol_global(need_value, A, grid, &GA);
GAstore = (NCformat *) GA.Store;
colptr = GAstore->colptr;
rowind = GAstore->rowind;
nnz = GAstore->nnz;
GA_mem_use = (nnz + n + 1) * sizeof(int_t);
if ( need_value ) {
a_GA = (double *) GAstore->nzval;
GA_mem_use += nnz * sizeof(double);
} else assert(GAstore->nzval == NULL);
}
/* ------------------------------------------------------------
Find the row permutation Pr for A, and apply Pr*[GA].
GA is overwritten by Pr*[GA].
------------------------------------------------------------*/
if ( options->RowPerm != NO ) {
t = SuperLU_timer_();
if ( Fact != SamePattern_SameRowPerm ) {
if ( options->RowPerm == MY_PERMR ) { /* Use user's perm_r. */
/* Permute the global matrix GA for symbfact() */
for (i = 0; i < colptr[n]; ++i) {
irow = rowind[i];
rowind[i] = perm_r[irow];
}
} else if ( options->RowPerm == LargeDiag_MC64 ) {
/* Get a new perm_r[] from MC64 */
if ( job == 5 ) {
/* Allocate storage for scaling factors. */
if ( !(R1 = doubleMalloc_dist(m)) )
ABORT("SUPERLU_MALLOC fails for R1[]");
if ( !(C1 = doubleMalloc_dist(n)) )
ABORT("SUPERLU_MALLOC fails for C1[]");
}
if ( !iam ) { /* Process 0 finds a row permutation */
iinfo = dldperm_dist(job, m, nnz, colptr, rowind, a_GA,
perm_r, R1, C1);
MPI_Bcast( &iinfo, 1, mpi_int_t, 0, grid->comm );
if ( iinfo == 0 ) {
MPI_Bcast( perm_r, m, mpi_int_t, 0, grid->comm );
if ( job == 5 && Equil ) {
MPI_Bcast( R1, m, MPI_DOUBLE, 0, grid->comm );
MPI_Bcast( C1, n, MPI_DOUBLE, 0, grid->comm );
}
}
} else {
MPI_Bcast( &iinfo, 1, mpi_int_t, 0, grid->comm );
if ( iinfo == 0 ) {
MPI_Bcast( perm_r, m, mpi_int_t, 0, grid->comm );
if ( job == 5 && Equil ) {
MPI_Bcast( R1, m, MPI_DOUBLE, 0, grid->comm );
MPI_Bcast( C1, n, MPI_DOUBLE, 0, grid->comm );
}
}
}
if ( iinfo && job == 5) { /* Error return */
SUPERLU_FREE(R1);
SUPERLU_FREE(C1);
}
#if ( PRNTlevel>=2 )
dmin = dmach_dist("Overflow");
dsum = 0.0;
dprod = 1.0;
#endif
if ( iinfo == 0 ) {
if ( job == 5 ) {
if ( Equil ) {
for (i = 0; i < n; ++i) {
R1[i] = exp(R1[i]);
C1[i] = exp(C1[i]);
}
/* Scale the distributed matrix further.
A <-- diag(R1)*A*diag(C1) */
irow = fst_row;
for (j = 0; j < m_loc; ++j) {
for (i = rowptr[j]; i < rowptr[j+1]; ++i) {
icol = colind[i];
a[i] *= R1[irow] * C1[icol];
#if ( PRNTlevel>=2 )
if ( perm_r[irow] == icol ) { /* New diagonal */
if ( job == 2 || job == 3 )
dmin = SUPERLU_MIN(dmin, fabs(a[i]));
else if ( job == 4 )
dsum += fabs(a[i]);
else if ( job == 5 )
dprod *= fabs(a[i]);
}
#endif
}
++irow;
}
/* Multiply together the scaling factors --
R/C from simple scheme, R1/C1 from MC64. */
if ( rowequ ) for (i = 0; i < m; ++i) R[i] *= R1[i];
else for (i = 0; i < m; ++i) R[i] = R1[i];
if ( colequ ) for (i = 0; i < n; ++i) C[i] *= C1[i];
else for (i = 0; i < n; ++i) C[i] = C1[i];
ScalePermstruct->DiagScale = BOTH;
rowequ = colequ = 1;
} /* end Equil */
/* Now permute global GA to prepare for symbfact() */
for (j = 0; j < n; ++j) {
for (i = colptr[j]; i < colptr[j+1]; ++i) {
irow = rowind[i];
rowind[i] = perm_r[irow];
}
}
SUPERLU_FREE (R1);
SUPERLU_FREE (C1);
} else { /* job = 2,3,4 */
for (j = 0; j < n; ++j) {
for (i = colptr[j]; i < colptr[j+1]; ++i) {
irow = rowind[i];
rowind[i] = perm_r[irow];
} /* end for i ... */
} /* end for j ... */
} /* end else job ... */
} else { /* if iinfo != 0 */
for (i = 0; i < m; ++i) perm_r[i] = i;
}
#if ( PRNTlevel>=2 )
if ( job == 2 || job == 3 ) {
if ( !iam ) printf("\tsmallest diagonal %e\n", dmin);
} else if ( job == 4 ) {
if ( !iam ) printf("\tsum of diagonal %e\n", dsum);
} else if ( job == 5 ) {
if ( !iam ) printf("\t product of diagonal %e\n", dprod);
}
#endif
} else { /* use largeDiag_AWPM */
#ifdef HAVE_COMBBLAS
c2cpp_GetAWPM(A, grid, ScalePermstruct);
#else
if ( iam == 0 ) {
printf("CombBLAS is not available\n"); fflush(stdout);
}
#endif
} /* end if options->RowPerm ... */
t = SuperLU_timer_() - t;
stat->utime[ROWPERM] = t;
#if ( PRNTlevel>=1 )
if ( !iam ) {
printf(".. LDPERM job " IFMT "\t time: %.2f\n", job, t);
fflush(stdout);
}
#endif
} /* end if Fact ... */
} else { /* options->RowPerm == NOROWPERM / NATURAL */
for (i = 0; i < m; ++i) perm_r[i] = i;
}
#if ( DEBUGlevel>=2 )
if ( !iam ) PrintInt10("perm_r", m, perm_r);
#endif
} /* end if (!factored) */
if ( !factored || options->IterRefine ) {
/* Compute norm(A), which will be used to adjust small diagonal. */
if ( notran ) *(unsigned char *)norm = '1';
else *(unsigned char *)norm = 'I';
anorm = pdlangs(norm, A, grid);
#if ( PRNTlevel>=1 )
if ( !iam ) { printf(".. anorm %e\n", anorm); fflush(stdout); }
#endif
}
/* ------------------------------------------------------------
Perform the LU factorization: symbolic factorization,
redistribution, and numerical factorization.
------------------------------------------------------------*/
if ( !factored ) {
t = SuperLU_timer_();
/*
* Get column permutation vector perm_c[], according to permc_spec:
* permc_spec = NATURAL: natural ordering
* permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A
* permc_spec = MMD_ATA: minimum degree on structure of A'*A
* permc_spec = METIS_AT_PLUS_A: METIS on structure of A'+A
* permc_spec = PARMETIS: parallel METIS on structure of A'+A
* permc_spec = MY_PERMC: the ordering already supplied in perm_c[]
*/
permc_spec = options->ColPerm;
if ( parSymbFact == YES || permc_spec == PARMETIS ) {
nprocs_num = grid->nprow * grid->npcol;
noDomains = (int) ( pow(2, ((int) LOG2( nprocs_num ))));
/* create a new communicator for the first noDomains
processes in grid->comm */
key = iam;
if (iam < noDomains) col = 0;
else col = MPI_UNDEFINED;
MPI_Comm_split (grid->comm, col, key, &symb_comm );
if ( permc_spec == NATURAL || permc_spec == MY_PERMC ) {
if ( permc_spec == NATURAL ) {
for (j = 0; j < n; ++j) perm_c[j] = j;
}
if ( !(sizes = intMalloc_dist(2 * noDomains)) )
ABORT("SUPERLU_MALLOC fails for sizes.");
if ( !(fstVtxSep = intMalloc_dist(2 * noDomains)) )
ABORT("SUPERLU_MALLOC fails for fstVtxSep.");
for (i = 0; i < 2*noDomains - 2; ++i) {
sizes[i] = 0;
fstVtxSep[i] = 0;
}
sizes[2*noDomains - 2] = m;
fstVtxSep[2*noDomains - 2] = 0;
} else if ( permc_spec != PARMETIS ) { /* same as before */
printf("{" IFMT "," IFMT "}: pdgssvx: invalid ColPerm option when ParSymbfact is used\n",
MYROW(grid->iam, grid), MYCOL(grid->iam, grid));
}
}
if ( permc_spec != MY_PERMC && Fact == DOFACT ) {
/* Reuse perm_c if Fact == SamePattern, or SamePattern_SameRowPerm */
if ( permc_spec == PARMETIS ) {
// #pragma omp parallel
// {
// #pragma omp master
// {
/* Get column permutation vector in perm_c. *
* This routine takes as input the distributed input matrix A *
* and does not modify it. It also allocates memory for *
* sizes[] and fstVtxSep[] arrays, that contain information *
* on the separator tree computed by ParMETIS. */
flinfo = get_perm_c_parmetis(A, perm_r, perm_c, nprocs_num,
noDomains, &sizes, &fstVtxSep,
grid, &symb_comm);
// }
// }
if (flinfo > 0) {
#if ( PRNTlevel>=1 )
fprintf(stderr, "Insufficient memory for get_perm_c parmetis\n");
#endif
*info = flinfo;
return;
}
} else {
get_perm_c_dist(iam, permc_spec, &GA, perm_c);
}
}
stat->utime[COLPERM] = SuperLU_timer_() - t;
/* Symbolic factorization. */
if ( Fact != SamePattern_SameRowPerm ) {
if ( parSymbFact == NO ) { /* Perform serial symbolic factorization */
/* GA = Pr*A, perm_r[] is already applied. */
int_t *GACcolbeg, *GACcolend, *GACrowind;
/* Compute the elimination tree of Pc*(A^T+A)*Pc^T or Pc*A^T*A*Pc^T
(a.k.a. column etree), depending on the choice of ColPerm.
Adjust perm_c[] to be consistent with a postorder of etree.
Permute columns of A to form A*Pc'.
After this routine, GAC = GA*Pc^T. */
sp_colorder(options, &GA, perm_c, etree, &GAC);
/* Form Pc*A*Pc^T to preserve the diagonal of the matrix GAC. */
GACstore = (NCPformat *) GAC.Store;
GACcolbeg = GACstore->colbeg;
GACcolend = GACstore->colend;
GACrowind = GACstore->rowind;
for (j = 0; j < n; ++j) {
for (i = GACcolbeg[j]; i < GACcolend[j]; ++i) {
irow = GACrowind[i];
GACrowind[i] = perm_c[irow];
}
}
/* Perform a symbolic factorization on Pc*Pr*A*Pc^T and set up
the nonzero data structures for L & U. */
#if ( PRNTlevel>=1 )
if ( !iam ) {
printf(".. symbfact(): relax " IFMT ", maxsuper " IFMT ", fill " IFMT "\n",
sp_ienv_dist(2), sp_ienv_dist(3), sp_ienv_dist(6));
fflush(stdout);
}
#endif
t = SuperLU_timer_();
if ( !(Glu_freeable = (Glu_freeable_t *)
SUPERLU_MALLOC(sizeof(Glu_freeable_t))) )
ABORT("Malloc fails for Glu_freeable.");
/* Every process does this. */
iinfo = symbfact(options, iam, &GAC, perm_c, etree,
Glu_persist, Glu_freeable);
nnzLU = Glu_freeable->nnzLU;
stat->utime[SYMBFAC] = SuperLU_timer_() - t;
if ( iinfo <= 0 ) { /* Successful return */
QuerySpace_dist(n, -iinfo, Glu_freeable, &symb_mem_usage);
#if ( PRNTlevel>=1 )
if ( !iam ) {
printf("\tNo of supers " IFMT "\n", Glu_persist->supno[n-1]+1);
printf("\tSize of G(L) " IFMT "\n", Glu_freeable->xlsub[n]);
printf("\tSize of G(U) " IFMT "\n", Glu_freeable->xusub[n]);
printf("\tint %d, short %d, float %d, double %d\n",
(int) sizeof(int_t), (int) sizeof(short),
(int) sizeof(float), (int) sizeof(double));
printf("\tSYMBfact (MB):\tL\\U %.2f\ttotal %.2f\texpansions " IFMT "\n",
symb_mem_usage.for_lu*1e-6,
symb_mem_usage.total*1e-6,
symb_mem_usage.expansions);
fflush(stdout);
}
#endif
} else { /* symbfact out of memory */
#if ( PRNTlevel>=1 )
if ( !iam )
fprintf(stderr,"symbfact() error returns " IFMT "\n",iinfo);
#endif
*info = iinfo;
return;
}
} /* end serial symbolic factorization */
else { /* parallel symbolic factorization */
t = SuperLU_timer_();
flinfo = symbfact_dist(nprocs_num, noDomains, A, perm_c, perm_r,
sizes, fstVtxSep, &Pslu_freeable,
&(grid->comm), &symb_comm,
&symb_mem_usage);
nnzLU = Pslu_freeable.nnzLU;
stat->utime[SYMBFAC] = SuperLU_timer_() - t;
if (flinfo > 0) {
#if ( PRNTlevel>=1 )
fprintf(stderr, "Insufficient memory for parallel symbolic factorization.");
#endif
*info = flinfo;
return;
}
}
/* Destroy global GA */
if ( parSymbFact == NO || options->RowPerm != NO )
Destroy_CompCol_Matrix_dist(&GA);
if ( parSymbFact == NO )
Destroy_CompCol_Permuted_dist(&GAC);
} /* end if Fact != SamePattern_SameRowPerm ... */
if (sizes) SUPERLU_FREE (sizes);
if (fstVtxSep) SUPERLU_FREE (fstVtxSep);
if (symb_comm != MPI_COMM_NULL) MPI_Comm_free (&symb_comm);
/* Distribute entries of A into L & U data structures. */
//if (parSymbFact == NO || ???? Fact == SamePattern_SameRowPerm) {
if ( parSymbFact == NO ) {
/* CASE OF SERIAL SYMBOLIC */
/* Apply column permutation to the original distributed A */
for (j = 0; j < nnz_loc; ++j) colind[j] = perm_c[colind[j]];
/* Distribute Pc*Pr*diag(R)*A*diag(C)*Pc^T into L and U storage.
NOTE: the row permutation Pc*Pr is applied internally in the
distribution routine. */
t = SuperLU_timer_();
dist_mem_use = pddistribute(Fact, n, A, ScalePermstruct,
Glu_freeable, LUstruct, grid);
stat->utime[DIST] = SuperLU_timer_() - t;
/* Deallocate storage used in symbolic factorization. */
if ( Fact != SamePattern_SameRowPerm ) {
iinfo = symbfact_SubFree(Glu_freeable);
SUPERLU_FREE(Glu_freeable);
}
} else { /* CASE OF PARALLEL SYMBOLIC */
/* Distribute Pc*Pr*diag(R)*A*diag(C)*Pc' into L and U storage.
NOTE: the row permutation Pc*Pr is applied internally in the
distribution routine. */
/* Apply column permutation to the original distributed A */
for (j = 0; j < nnz_loc; ++j) colind[j] = perm_c[colind[j]];
t = SuperLU_timer_();
dist_mem_use = ddist_psymbtonum(Fact, n, A, ScalePermstruct,
&Pslu_freeable, LUstruct, grid);
if (dist_mem_use > 0)
ABORT ("Not enough memory available for dist_psymbtonum\n");
stat->utime[DIST] = SuperLU_timer_() - t;
}
/*if (!iam) printf ("\tDISTRIBUTE time %8.2f\n", stat->utime[DIST]);*/
/* Perform numerical factorization in parallel. */
t = SuperLU_timer_();
// #pragma omp parallel
// {
// #pragma omp master
// {
pdgstrf(options, m, n, anorm, LUstruct, grid, stat, info);
stat->utime[FACT] = SuperLU_timer_() - t;
// }
// }
#if ( PRNTlevel>=2 )
/* ------------------------------------------------------------
SUM OVER ALL ENTRIES OF A AND PRINT NNZ AND SIZE OF A.
------------------------------------------------------------*/
Astore = (NRformat_loc *) A->Store;
xsup = Glu_persist->xsup;
nzval_a = Astore->nzval;
asum=0;
for (i = 0; i < Astore->m_loc; ++i) {
for (j = Astore->rowptr[i]; j < Astore->rowptr[i+1]; ++j) {
asum += nzval_a[j];
}
}
nsupers = Glu_persist->supno[n-1] + 1;
nsupers_j = CEILING( nsupers, grid->npcol ); /* Number of local block columns */
lsum=0.0;
for (lk=0;lk<nsupers_j;++lk){
lsub = LUstruct->Llu->Lrowind_bc_ptr[lk];
lusup = LUstruct->Llu->Lnzval_bc_ptr[lk];
if(lsub){
k = MYCOL(grid->iam, grid)+lk*grid->npcol; /* not sure */
knsupc = SuperSize( k );
nsupr = lsub[1];
for (j=0; j<knsupc; ++j)
for (i = 0; i < nsupr; ++i)
lsum +=lusup[j*nsupr+i];
}
}
MPI_Allreduce( &asum, &asum_tot,1, MPI_DOUBLE, MPI_SUM, grid->comm );
MPI_Allreduce( &lsum, &lsum_tot,1, MPI_DOUBLE, MPI_SUM, grid->comm );
MPI_Allreduce( &Astore->rowptr[Astore->m_loc], &nnz_tot,1, mpi_int_t, MPI_SUM, grid->comm );
// MPI_Bcast( &nnzLU, 1, mpi_int_t, 0, grid->comm );
MPI_Comm_rank( MPI_COMM_WORLD, &iam_g );
if (!iam_g) {
print_options_dist(options);
fflush(stdout);
}
printf(".. Ainfo mygid %5d mysid %5d nnz_loc " IFMT " sum_loc %e lsum_loc %e nnz " IFMT " nnzLU %ld sum %e lsum %e N " IFMT "\n", iam_g,iam,Astore->rowptr[Astore->m_loc],asum, lsum, nnz_tot,nnzLU,asum_tot,lsum_tot,A->ncol);
fflush(stdout);
#endif
#if 0
// #ifdef GPU_PROF
// if(!iam )
// {
// char* ttemp;
// ttemp = getenv("IO_FILE");
// if(ttemp!=NULL)
// {
// printf("File being opend is %s\n",ttemp );
// FILE* fp;
// fp = fopen(ttemp,"w");
// if(!fp)
// {
// fprintf(stderr," Couldn't open output file %s\n",ttemp);
// }
// int nsup=Glu_persist->supno[n-1]+1;
// int ii;
// for (ii = 0; ii < nsup; ++ii)
// {
// fprintf(fp,"%d,%d,%d,%d,%d,%d\n",gs1.mnk_min_stats[ii],gs1.mnk_min_stats[ii+nsup],
// gs1.mnk_min_stats[ii+2*nsup],
// gs1.mnk_max_stats[ii],gs1.mnk_max_stats[ii+nsup],gs1.mnk_max_stats[ii+2*nsup]);
// }
// // lastly put the timeing stats that we need
// fprintf(fp,"Min %lf Max %lf totaltime %lf \n",gs1.osDgemmMin,gs1.osDgemmMax,stat->utime[FACT]);
// fclose(fp);
// }
// }
// #endif
#endif
if ( options->PrintStat ) {
int_t TinyPivots;
float for_lu, total, max, avg, temp;
dQuerySpace_dist(n, LUstruct, grid, stat, &num_mem_usage);
if (parSymbFact == TRUE) {
/* The memory used in the redistribution routine
includes the memory used for storing the symbolic
structure and the memory allocated for numerical
factorization */
temp = SUPERLU_MAX(symb_mem_usage.total, -dist_mem_use);
if ( options->RowPerm != NO )
temp = SUPERLU_MAX(temp, GA_mem_use);
} else {
temp = SUPERLU_MAX (
symb_mem_usage.total + GA_mem_use, /* symbfact step */
symb_mem_usage.for_lu + dist_mem_use +
num_mem_usage.for_lu /* distribution step */
);
}
temp = SUPERLU_MAX(temp, num_mem_usage.total);
MPI_Reduce( &temp, &max,
1, MPI_FLOAT, MPI_MAX, 0, grid->comm );
MPI_Reduce( &temp, &avg,
1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
MPI_Allreduce( &stat->TinyPivots, &TinyPivots, 1, mpi_int_t,
MPI_SUM, grid->comm );
stat->TinyPivots = TinyPivots;
MPI_Reduce( &num_mem_usage.for_lu, &for_lu,
1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
MPI_Reduce( &num_mem_usage.total, &total,
1, MPI_FLOAT, MPI_SUM, 0, grid->comm );
if (!iam) {
printf("\n** Memory Usage **********************************\n");
printf("** NUMfact space (MB): (sum-of-all-processes)\n"
" L\\U : %8.2f | Total : %8.2f\n",
for_lu * 1e-6, total * 1e-6);
printf("** Total highmark (MB):\n"
" Sum-of-all : %8.2f | Avg : %8.2f | Max : %8.2f\n",
avg * 1e-6,
avg / grid->nprow / grid->npcol * 1e-6,
max * 1e-6);
printf("**************************************************\n");
fflush(stdout);
}
} /* end printing stats */
} /* end if (!factored) */
if ( options->Fact == DOFACT || options->Fact == SamePattern ) {
/* Need to reset the solve's communication pattern,
because perm_r[] and/or perm_c[] is changed. */
if ( options->SolveInitialized == YES ) { /* Initialized before */
dSolveFinalize(options, SOLVEstruct); /* Clean up structure */
options->SolveInitialized = NO; /* Reset the solve state */
}
}
#if 0
/* Need to revisit: Why the following is not good enough for X-to-B
distribution -- inv_perm_c changed */
pxgstrs_finalize(SOLVEstruct->gstrs_comm);
pxgstrs_init(A->ncol, m_loc, nrhs, fst_row, perm_r, perm_c, grid,
LUstruct->Glu_persist, SOLVEstruct);
#endif
/* ------------------------------------------------------------
Compute the solution matrix X.
------------------------------------------------------------*/
if ( nrhs && *info == 0 ) {
if ( !(b_work = doubleMalloc_dist(n)) )
ABORT("Malloc fails for b_work[]");
/* ------------------------------------------------------------
Scale the right-hand side if equilibration was performed.
------------------------------------------------------------*/
if ( notran ) {
if ( rowequ ) {
b_col = B;
for (j = 0; j < nrhs; ++j) {
irow = fst_row;
for (i = 0; i < m_loc; ++i) {
b_col[i] *= R[irow];
++irow;
}
b_col += ldb;
}
}
} else if ( colequ ) {
b_col = B;
for (j = 0; j < nrhs; ++j) {
irow = fst_row;
for (i = 0; i < m_loc; ++i) {
b_col[i] *= C[irow];
++irow;
}
b_col += ldb;
}
}
/* Save a copy of the right-hand side. */
ldx = ldb;
if ( !(X = doubleMalloc_dist(((size_t)ldx) * nrhs)) )
ABORT("Malloc fails for X[]");
x_col = X; b_col = B;
for (j = 0; j < nrhs; ++j) {
#if 0 /* Sherry */
for (i = 0; i < m_loc; ++i) x_col[i] = b_col[i];
#endif
memcpy(x_col, b_col, m_loc * sizeof(double));
x_col += ldx; b_col += ldb;
}
/* ------------------------------------------------------------
Solve the linear system.
------------------------------------------------------------*/
if ( options->SolveInitialized == NO ) { /* First time */
dSolveInit(options, A, perm_r, perm_c, nrhs, LUstruct, grid,
SOLVEstruct);
/* Inside this routine, SolveInitialized is set to YES.
For repeated call to pdgssvx(), no need to re-initialilze
the Solve data & communication structures, unless a new
factorization with Fact == DOFACT or SamePattern is asked for. */
}
if ( options->DiagInv==YES &&
(options->SolveInitialized == NO || Fact == SamePattern ||
Fact == SamePattern_SameRowPerm) ) {
pdCompute_Diag_Inv(n, LUstruct, grid, stat, info);
}
// #pragma omp parallel
// {
// #pragma omp master
// {
pdgstrs(n, LUstruct, ScalePermstruct, grid, X, m_loc,
fst_row, ldb, nrhs, SOLVEstruct, stat, info);
// }
// }
/* ------------------------------------------------------------
Use iterative refinement to improve the computed solution and
compute error bounds and backward error estimates for it.
------------------------------------------------------------*/
if ( options->IterRefine ) {
/* Improve the solution by iterative refinement. */
int_t *it;
int_t *colind_gsmv = SOLVEstruct->A_colind_gsmv;
/* This was allocated and set to NULL in dSolveInit() */
SOLVEstruct_t *SOLVEstruct1; /* Used by refinement. */
t = SuperLU_timer_();
if ( options->RefineInitialized == NO || Fact == DOFACT ) {
/* All these cases need to re-initialize gsmv structure */
if ( options->RefineInitialized )
pdgsmv_finalize(SOLVEstruct->gsmv_comm);
pdgsmv_init(A, SOLVEstruct->row_to_proc, grid,
SOLVEstruct->gsmv_comm);
/* Save a copy of the transformed local col indices
in colind_gsmv[]. */
if ( colind_gsmv ) SUPERLU_FREE(colind_gsmv);
if ( !(it = intMalloc_dist(nnz_loc)) )
ABORT("Malloc fails for colind_gsmv[]");
colind_gsmv = SOLVEstruct->A_colind_gsmv = it;
for (i = 0; i < nnz_loc; ++i) colind_gsmv[i] = colind[i];
options->RefineInitialized = YES;
} else if ( Fact == SamePattern ||
Fact == SamePattern_SameRowPerm ) {
double atemp;
int_t k, jcol, p;
/* Swap to beginning the part of A corresponding to the
local part of X, as was done in pdgsmv_init() */
for (i = 0; i < m_loc; ++i) { /* Loop through each row */
k = rowptr[i];
for (j = rowptr[i]; j < rowptr[i+1]; ++j) {
jcol = colind[j];
p = SOLVEstruct->row_to_proc[jcol];
if ( p == iam ) { /* Local */
atemp = a[k]; a[k] = a[j]; a[j] = atemp;
++k;
}
}
}
/* Re-use the local col indices of A obtained from the
previous call to pdgsmv_init() */
for (i = 0; i < nnz_loc; ++i) colind[i] = colind_gsmv[i];
}
if ( nrhs == 1 ) { /* Use the existing solve structure */
SOLVEstruct1 = SOLVEstruct;
} else { /* For nrhs > 1, since refinement is performed for RHS
one at a time, the communication structure for pdgstrs
is different than the solve with nrhs RHS.
So we use SOLVEstruct1 for the refinement step.
*/
if ( !(SOLVEstruct1 = (SOLVEstruct_t *)
SUPERLU_MALLOC(sizeof(SOLVEstruct_t))) )
ABORT("Malloc fails for SOLVEstruct1");
/* Copy the same stuff */
SOLVEstruct1->row_to_proc = SOLVEstruct->row_to_proc;
SOLVEstruct1->inv_perm_c = SOLVEstruct->inv_perm_c;
SOLVEstruct1->num_diag_procs = SOLVEstruct->num_diag_procs;
SOLVEstruct1->diag_procs = SOLVEstruct->diag_procs;
SOLVEstruct1->diag_len = SOLVEstruct->diag_len;
SOLVEstruct1->gsmv_comm = SOLVEstruct->gsmv_comm;
SOLVEstruct1->A_colind_gsmv = SOLVEstruct->A_colind_gsmv;
/* Initialize the *gstrs_comm for 1 RHS. */
if ( !(SOLVEstruct1->gstrs_comm = (pxgstrs_comm_t *)
SUPERLU_MALLOC(sizeof(pxgstrs_comm_t))) )
ABORT("Malloc fails for gstrs_comm[]");
pxgstrs_init(n, m_loc, 1, fst_row, perm_r, perm_c, grid,
Glu_persist, SOLVEstruct1);
}
pdgsrfs(n, A, anorm, LUstruct, ScalePermstruct, grid,
B, ldb, X, ldx, nrhs, SOLVEstruct1, berr, stat, info);
/* Deallocate the storage associated with SOLVEstruct1 */
if ( nrhs > 1 ) {
pxgstrs_finalize(SOLVEstruct1->gstrs_comm);
SUPERLU_FREE(SOLVEstruct1);
}
stat->utime[REFINE] = SuperLU_timer_() - t;
} /* end if IterRefine */
/* Permute the solution matrix B <= Pc'*X. */
pdPermute_Dense_Matrix(fst_row, m_loc, SOLVEstruct->row_to_proc,
SOLVEstruct->inv_perm_c,
X, ldx, B, ldb, nrhs, grid);
#if ( DEBUGlevel>=2 )
printf("\n (%d) .. After pdPermute_Dense_Matrix(): b =\n", iam);
for (i = 0; i < m_loc; ++i)
printf("\t(%d)\t%4d\t%.10f\n", iam, i+fst_row, B[i]);
#endif
/* Transform the solution matrix X to a solution of the original
system before equilibration. */
if ( notran ) {
if ( colequ ) {
b_col = B;
for (j = 0; j < nrhs; ++j) {
irow = fst_row;
for (i = 0; i < m_loc; ++i) {
b_col[i] *= C[irow];
++irow;
}
b_col += ldb;
}
}
} else if ( rowequ ) {
b_col = B;
for (j = 0; j < nrhs; ++j) {
irow = fst_row;
for (i = 0; i < m_loc; ++i) {
b_col[i] *= R[irow];
++irow;
}
b_col += ldb;
}
}
SUPERLU_FREE(b_work);
SUPERLU_FREE(X);
} /* end if nrhs != 0 && *info == 0 */
#if ( PRNTlevel>=1 )
if ( !iam ) printf(".. DiagScale = %d\n", ScalePermstruct->DiagScale);
#endif
/* Deallocate R and/or C if it was not used. */
if ( Equil && Fact != SamePattern_SameRowPerm ) {
switch ( ScalePermstruct->DiagScale ) {
case NOEQUIL:
SUPERLU_FREE(R);
SUPERLU_FREE(C);
break;
case ROW:
SUPERLU_FREE(C);
break;
case COL:
SUPERLU_FREE(R);
break;
}
}
#if 0
if ( !factored && Fact != SamePattern_SameRowPerm && !parSymbFact)
Destroy_CompCol_Permuted_dist(&GAC);
#endif
#if ( DEBUGlevel>=1 )
CHECK_MALLOC(iam, "Exit pdgssvx()");
#endif
}
|
GB_binop__rdiv_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int8)
// A*D function (colscale): GB (_AxD__rdiv_int8)
// D*A function (rowscale): GB (_DxB__rdiv_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int8)
// C=scalar+B GB (_bind1st__rdiv_int8)
// C=scalar+B' GB (_bind1st_tran__rdiv_int8)
// C=A+scalar GB (_bind2nd__rdiv_int8)
// C=A'+scalar GB (_bind2nd_tran__rdiv_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (y, x, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_INT8 || GxB_NO_RDIV_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (bij, x, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (y, aij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, x, 8) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (y, aij, 8) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dmbihamt.kernel_runtime.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "local_header.h"
#include "openmp_pscmc_inc.h"
#include "dmbihamt.kernel_inc.h"
int openmp_dm_calc_hamt_dual_init (openmp_pscmc_env * pe ,openmp_dm_calc_hamt_dual_struct * kerstr ){
return 0 ;}
void openmp_dm_calc_hamt_dual_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_dm_calc_hamt_dual_struct ));
}
int openmp_dm_calc_hamt_dual_get_num_compute_units (openmp_dm_calc_hamt_dual_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_dm_calc_hamt_dual_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_dm_calc_hamt_dual_exec (openmp_dm_calc_hamt_dual_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_dm_calc_hamt_dual_scmc_kernel ( ( kerstr )->dphi_dt_out , ( kerstr )->phi_in , ( kerstr )->A1 , ( kerstr )->Y1 , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->num_ele_A1)[0] , ( ( kerstr )->M)[0] , ( ( kerstr )->Q)[0] , ( ( kerstr )->DM_A)[0] , ( ( kerstr )->DX)[0] , ( ( kerstr )->DY)[0] , ( ( kerstr )->DZ)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_dphi_dt_out (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->dphi_dt_out = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_phi_in (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_in = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_A1 (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A1 = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_Y1 (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Y1 = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_y_cpu_core (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_numvec (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_XLEN (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_YLEN (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_ZLEN (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_ovlp (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_xblock (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_yblock (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_zblock (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_num_ele (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_num_ele_A1 (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele_A1 = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_M (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->M = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_Q (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Q = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_DM_A (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DM_A = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_DX (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DX = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_DY (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DY = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_DZ (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DZ = pm->d_data);
}
int openmp_dm_calc_hamt_dual_scmc_set_parameter_DT (openmp_dm_calc_hamt_dual_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DT = pm->d_data);
}
int openmp_dm_cayley_eqn_core_init (openmp_pscmc_env * pe ,openmp_dm_cayley_eqn_core_struct * kerstr ){
return 0 ;}
void openmp_dm_cayley_eqn_core_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_dm_cayley_eqn_core_struct ));
}
int openmp_dm_cayley_eqn_core_get_num_compute_units (openmp_dm_cayley_eqn_core_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_dm_cayley_eqn_core_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_dm_cayley_eqn_core_exec (openmp_dm_cayley_eqn_core_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_dm_cayley_eqn_core_scmc_kernel ( ( kerstr )->dphi_dt_out , ( kerstr )->phi_in , ( kerstr )->A1 , ( kerstr )->Y1 , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->num_ele_A1)[0] , ( ( kerstr )->M)[0] , ( ( kerstr )->Q)[0] , ( ( kerstr )->DM_A)[0] , ( ( kerstr )->DX)[0] , ( ( kerstr )->DY)[0] , ( ( kerstr )->DZ)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_dphi_dt_out (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->dphi_dt_out = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_phi_in (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_in = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_A1 (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A1 = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_Y1 (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Y1 = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_y_cpu_core (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_numvec (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_XLEN (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_YLEN (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_ZLEN (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_ovlp (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_xblock (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_yblock (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_zblock (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_num_ele (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_num_ele_A1 (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele_A1 = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_M (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->M = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_Q (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Q = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_DM_A (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DM_A = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_DX (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DX = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_DY (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DY = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_DZ (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DZ = pm->d_data);
}
int openmp_dm_cayley_eqn_core_scmc_set_parameter_DT (openmp_dm_cayley_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DT = pm->d_data);
}
int openmp_dm_exact_eqn_m_init (openmp_pscmc_env * pe ,openmp_dm_exact_eqn_m_struct * kerstr ){
return 0 ;}
void openmp_dm_exact_eqn_m_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_dm_exact_eqn_m_struct ));
}
int openmp_dm_exact_eqn_m_get_num_compute_units (openmp_dm_exact_eqn_m_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_dm_exact_eqn_m_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_dm_exact_eqn_m_exec (openmp_dm_exact_eqn_m_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_dm_exact_eqn_m_scmc_kernel ( ( kerstr )->dphi_dt_out , ( kerstr )->phi_in , ( kerstr )->A1 , ( kerstr )->Y1 , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->num_ele_A1)[0] , ( ( kerstr )->M)[0] , ( ( kerstr )->Q)[0] , ( ( kerstr )->DM_A)[0] , ( ( kerstr )->DX)[0] , ( ( kerstr )->DY)[0] , ( ( kerstr )->DZ)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_dm_exact_eqn_m_scmc_set_parameter_dphi_dt_out (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->dphi_dt_out = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_phi_in (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_in = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_A1 (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A1 = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_Y1 (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Y1 = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_y_cpu_core (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_numvec (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_XLEN (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_YLEN (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_ZLEN (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_ovlp (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_xblock (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_yblock (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_zblock (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_num_ele (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_num_ele_A1 (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele_A1 = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_M (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->M = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_Q (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Q = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_DM_A (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DM_A = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_DX (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DX = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_DY (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DY = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_DZ (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DZ = pm->d_data);
}
int openmp_dm_exact_eqn_m_scmc_set_parameter_DT (openmp_dm_exact_eqn_m_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DT = pm->d_data);
}
int openmp_dm_calc_hamt_init (openmp_pscmc_env * pe ,openmp_dm_calc_hamt_struct * kerstr ){
return 0 ;}
void openmp_dm_calc_hamt_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_dm_calc_hamt_struct ));
}
int openmp_dm_calc_hamt_get_num_compute_units (openmp_dm_calc_hamt_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_dm_calc_hamt_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_dm_calc_hamt_exec (openmp_dm_calc_hamt_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_dm_calc_hamt_scmc_kernel ( ( kerstr )->dphi_dt_out , ( kerstr )->phi_in , ( kerstr )->A1 , ( kerstr )->Y1 , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->num_ele_A1)[0] , ( ( kerstr )->M)[0] , ( ( kerstr )->Q)[0] , ( ( kerstr )->DM_A)[0] , ( ( kerstr )->DX)[0] , ( ( kerstr )->DY)[0] , ( ( kerstr )->DZ)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_dm_calc_hamt_scmc_set_parameter_dphi_dt_out (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->dphi_dt_out = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_phi_in (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_in = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_A1 (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A1 = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_Y1 (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Y1 = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_y_cpu_core (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_numvec (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_XLEN (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_YLEN (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_ZLEN (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_ovlp (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_xblock (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_yblock (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_zblock (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_num_ele (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_num_ele_A1 (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele_A1 = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_M (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->M = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_Q (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Q = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_DM_A (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DM_A = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_DX (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DX = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_DY (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DY = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_DZ (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DZ = pm->d_data);
}
int openmp_dm_calc_hamt_scmc_set_parameter_DT (openmp_dm_calc_hamt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DT = pm->d_data);
}
int openmp_dm_cayley_eqn_J_init (openmp_pscmc_env * pe ,openmp_dm_cayley_eqn_J_struct * kerstr ){
return 0 ;}
void openmp_dm_cayley_eqn_J_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_dm_cayley_eqn_J_struct ));
}
int openmp_dm_cayley_eqn_J_get_num_compute_units (openmp_dm_cayley_eqn_J_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_dm_cayley_eqn_J_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_dm_cayley_eqn_J_exec (openmp_dm_cayley_eqn_J_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_dm_cayley_eqn_J_scmc_kernel ( ( kerstr )->dphi_dt_out , ( kerstr )->phi_in , ( kerstr )->A1 , ( kerstr )->Y1 , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->num_ele_A1)[0] , ( ( kerstr )->M)[0] , ( ( kerstr )->Q)[0] , ( ( kerstr )->DM_A)[0] , ( ( kerstr )->DX)[0] , ( ( kerstr )->DY)[0] , ( ( kerstr )->DZ)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_dphi_dt_out (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->dphi_dt_out = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_phi_in (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_in = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_A1 (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A1 = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_Y1 (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Y1 = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_y_cpu_core (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_numvec (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_XLEN (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_YLEN (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_ZLEN (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_ovlp (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_xblock (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_yblock (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_zblock (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_num_ele (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_num_ele_A1 (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele_A1 = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_M (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->M = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_Q (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Q = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_DM_A (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DM_A = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_DX (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DX = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_DY (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DY = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_DZ (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DZ = pm->d_data);
}
int openmp_dm_cayley_eqn_J_scmc_set_parameter_DT (openmp_dm_cayley_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DT = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_init (openmp_pscmc_env * pe ,openmp_dm_bihamt_eqn_dydt_struct * kerstr ){
return 0 ;}
void openmp_dm_bihamt_eqn_dydt_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_dm_bihamt_eqn_dydt_struct ));
}
int openmp_dm_bihamt_eqn_dydt_get_num_compute_units (openmp_dm_bihamt_eqn_dydt_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_dm_bihamt_eqn_dydt_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_dm_bihamt_eqn_dydt_exec (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_dm_bihamt_eqn_dydt_scmc_kernel ( ( kerstr )->dphi_dt_out , ( kerstr )->phi_in , ( kerstr )->A1 , ( kerstr )->Y1 , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->num_ele_A1)[0] , ( ( kerstr )->M)[0] , ( ( kerstr )->Q)[0] , ( ( kerstr )->DM_A)[0] , ( ( kerstr )->DX)[0] , ( ( kerstr )->DY)[0] , ( ( kerstr )->DZ)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_dphi_dt_out (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->dphi_dt_out = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_phi_in (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_in = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_A1 (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A1 = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_Y1 (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Y1 = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_y_cpu_core (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_numvec (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_XLEN (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_YLEN (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_ZLEN (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_ovlp (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_xblock (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_yblock (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_zblock (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_num_ele (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_num_ele_A1 (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele_A1 = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_M (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->M = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_Q (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Q = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_DM_A (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DM_A = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_DX (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DX = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_DY (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DY = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_DZ (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DZ = pm->d_data);
}
int openmp_dm_bihamt_eqn_dydt_scmc_set_parameter_DT (openmp_dm_bihamt_eqn_dydt_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DT = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_init (openmp_pscmc_env * pe ,openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ){
return 0 ;}
void openmp_dm_bihamt_dual_psi_eqn_J_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_dm_bihamt_dual_psi_eqn_J_struct ));
}
int openmp_dm_bihamt_dual_psi_eqn_J_get_num_compute_units (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_dm_bihamt_dual_psi_eqn_J_get_xlen (){
return IDX_OPT_MAX ;}
int openmp_dm_bihamt_dual_psi_eqn_J_exec (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_dm_bihamt_dual_psi_eqn_J_scmc_kernel ( ( kerstr )->dphi_dt_out , ( kerstr )->phi_in , ( kerstr )->A1 , ( kerstr )->Y1 , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->num_ele_A1)[0] , ( ( kerstr )->M)[0] , ( ( kerstr )->Q)[0] , ( ( kerstr )->DM_A)[0] , ( ( kerstr )->DX)[0] , ( ( kerstr )->DY)[0] , ( ( kerstr )->DZ)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_dphi_dt_out (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->dphi_dt_out = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_phi_in (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->phi_in = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_A1 (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->A1 = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_Y1 (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Y1 = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_y_cpu_core (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->y_cpu_core = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_numvec (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_XLEN (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_YLEN (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_ZLEN (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_ovlp (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_xblock (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xblock = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_yblock (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yblock = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_zblock (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zblock = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_num_ele (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_num_ele_A1 (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele_A1 = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_M (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->M = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_Q (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Q = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_DM_A (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DM_A = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_DX (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DX = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_DY (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DY = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_DZ (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DZ = pm->d_data);
}
int openmp_dm_bihamt_dual_psi_eqn_J_scmc_set_parameter_DT (openmp_dm_bihamt_dual_psi_eqn_J_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DT = pm->d_data);
}
|
Searching.202007270051.subsearch.profile.h | //
// Created by Zhen Peng on 7/27/2020.
//
#ifndef BATCH_SEARCHING_SEARCHING_H
#define BATCH_SEARCHING_SEARCHING_H
#include <vector>
#include <boost/dynamic_bitset.hpp>
//#include <boost/sort/sort.hpp>
#include <iostream>
#include <fstream>
#include <unordered_map>
#include <immintrin.h>
#include <cstring>
#include <unordered_set>
#include <set>
#include <cfloat>
#include <algorithm>
//#include <omp.h>
#include "../include/definitions.h"
//#include "../include/efanna2e/neighbor.h"
#include "../include/utils.h"
#include "../include/Candidate.h"
#include "../include/parallelization.h"
#include "../include/bitvector.h"
namespace PANNS {
class Searching {
//private:
public:
idi num_v_ = 0;
edgei num_e_ = 0;
idi num_queries_ = 0;
uint64_t dimension_ = 0;
idi width_ = 0; // NSG largest degree
idi ep_ = 0; // Start point
// std::vector<dataf> data_load_;
// std::vector<dataf> queries_load_;
// std::vector< std::vector<dataf> > data_load_;
// std::vector< std::vector<dataf> > queries_load_;
// std::vector<distf> norms_;
dataf *data_load_ = nullptr;
dataf *queries_load_ = nullptr;
// dataf *norms_;
// std::vector< std::vector<idi> > nsg_graph_;
// idi *nsg_graph_indices_;
// idi *nsg_graph_out_edges_;
// std::vector< std::vector<idi> > edge_list_;
char *opt_nsg_graph_ = nullptr;
uint64_t data_bytes_;
uint64_t neighbor_bytes_;
uint64_t vertex_bytes_;
// For multithreads
int num_threads_ = 1;
// int num_real_threads_ = 1;
// int num_threads_intra_query_ = 1;
// int num_threads_inter_query_ = 1;
dataf compute_norm(
const dataf *data) const;
// idi vertex_id);
// const std::vector<PANNS::dataf> &data);
// size_t loc_start,
// idi dimension)
dataf compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<dataf> &d_data,
// const std::vector<dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
const dataf vertex_norm) const;
// idi dimension)
static idi insert_into_queue(
std::vector<Candidate> &c_queue,
idi c_queue_top,
Candidate cand);
static idi add_into_queue(
std::vector<PANNS::Candidate> &queue,
idi &queue_top,
const idi queue_size,
const PANNS::Candidate &cand);
static idi add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_size,
const idi queue_capacity,
const PANNS::Candidate &cand);
static void add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_top, // The number of elements in queue, independent with queue_start
const idi queue_size); // The maximum capacity of queue, independent with queue_start.
static void insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue_base,
const idi insert_index,
const idi queue_start,
const idi queue_size);
// idi insert_into_queue_nsg(
// std::vector< Candidate > &c_queue,
// idi c_queue_top,
// Candidate cand);
static idi merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
static void merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
idi merge_all_queues_para_list(
std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<idi> &local_queues_ends,
std::vector<Candidate> &set_L,
const idi L);
// idi merge_all_queues_para_array(
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// std::vector<Candidate> &set_L,
// const idi L);
idi merge_all_queues_para_array(
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L);
idi merge_all_queues_queue_base(
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi queue_base,
const int real_threads,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L);
void merge_two_consecutive_queues_in_place(
std::vector<Candidate> &two_queues,
const idi base_1,
// const idi &end_1,
const idi base_2,
const idi &length_2);
void merge_in_set_L(
std::vector<Candidate> &set_L,
const idi set_L_length,
const idi num_queues,
const idi local_queue_length);
distf selecting_top_L_seq(
std::vector<Candidate> &set_L,
const idi global_L,
// const idi local_L,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes);
void selecting_unchecked_top_M_seq(
const idi query_id,
const idi iter,
std::vector<Candidate> &set_L,
const std::vector<idi> &pointers_starts,
const idi value_M,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
const std::vector<idi> &local_queues_sizes,
std::vector<idi> &local_m_counts);
// idi merge_all_queues_all_together_in_sequential(
// std::vector<Candidate> &set_L,
// std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L);
// idi min_all_queues_at_heads(
// const std::vector<Candidate> &set_L,
// std::vector<idi> &queue_heads,
// const std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L);
public:
// For Profiling
// L3CacheMissRate cache_miss_kernel;
uint64_t count_distance_computation_ = 0;
uint64_t count_add_to_queue_ = 0;
// uint64_t count_single_query_computation_ = 0;
// distf dist_min_ = 0;
// distf dist_max_ = 0;
double time_merge_ = 0;
double time_select_ = 0;
// double time_select_L_ = 0.0;
// double time_select_M_ = 0.0;
double time_initialization_ = 0;
double time_sequential_phase_ = 0;
double time_parallel_phase_ = 0;
double time_ending_ = 0.0;
double time_assign_s_ = 0.0;
double time_expand_ = 0.0;
double time_pick_top_m_ = 0.0;
double time_distance_computation_ = 0.0;
double time_add_to_queue_ = 0.0;
// double time_insert_ = 0;
// double time_compare_minimum_ = 0;
// double time_memmove_ = 0;
// std::vector<double> time_memmove_list_;
// L3CacheMissRate profile_miss_rate;
// uint64_t number_local_elements_ = 0;
// std::vector<idi> L_ids_;
// std::vector<idi> M_ids_;
~Searching()
{
free(data_load_);
data_load_ = nullptr;
// free(queries_load_);
// _mm_free(data_load_);
free(queries_load_);
queries_load_ = nullptr;
// free(norms_);
// free(nsg_graph_indices_);
// free(nsg_graph_out_edges_);
free(opt_nsg_graph_);
opt_nsg_graph_ = nullptr;
}
void load_data_load(char *filename);
void load_queries_load(char *filename);
void load_nsg_graph(char *filename);
// void build_opt_graph();
void prepare_init_ids(
std::vector<unsigned> &init_ids,
const unsigned L) const;
// void prepare_candidate_queue_list(
// const float *query_load,
// std::vector<std::vector<efanna2e::Neighbor> > &retset_list,
// std::vector<boost::dynamic_bitset<> > &is_visited_list,
// const std::vector<unsigned> &init_ids,
// const boost::dynamic_bitset<> &flags,
// unsigned batch_start,
// unsigned batch_size,
// unsigned L);
// void search_in_batch(
//// const float *query_load,
// size_t K,
// size_t L,
// unsigned batch_start,
// unsigned batch_size,
// std::vector< std::vector<Candidate> > &set_L_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list,
// const std::vector<idi> &init_ids,
// const boost::dynamic_bitset<> &is_visited,
// std::vector<std::vector<idi> > &set_K_list);
void search_in_sequential(
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
// boost::dynamic_bitset<> &is_visited,
// boost::dynamic_bitset<> is_visited,
// std::vector<idi> &init_ids,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// void search_in_sequential_BitVector(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// idi get_out_degree(idi v_id) const
// {
// if (v_id < num_v_ - 1) {
// return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id];
// } else {
// return num_e_ - nsg_graph_indices_[v_id];
// }
// }
void search_with_top_m(
idi M,
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// std::vector< std::vector<idi> > &top_m_list);
void search_with_top_m_scale_m(
const PANNS::idi value_M_max,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
// void search_with_top_m_myths_M(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void search_with_top_m_to_get_distance_range(
// const PANNS::idi M,
// const PANNS::idi query_id,
//// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids);
// void search_with_top_m_profile_bit_CAS(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void search_with_top_m_no_local_arrays(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// boost::dynamic_bitset<> &is_visited);
void search_with_top_m_in_batch(
PANNS::idi M,
PANNS::idi batch_start,
PANNS::idi batch_size,
PANNS::idi K,
PANNS::idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list);
// void para_search_with_top_m_critical_area(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_critical_area_no_omp(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_critical_area_yes_omp(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_visited_array(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited);
// void para_search_with_top_m_merge_queues(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_queues_seq_merge(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_merge_queues_no_CAS(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length,
// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<idi> &local_queues_ends,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_in_array(
// void para_search_with_top_m_merge_queues_new_threshold(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_by_sort(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &dest_offsets,
// const std::vector<idi> &offsets_load_set_L, // Offsets for store into set_L.
// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_better_merge_v0(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
//// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_better_merge_v2(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited,
// std::vector<distf> &local_thresholds);
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_better_merge_v1(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
//// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
//// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_better_merge_v0_0(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_less_merge(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited,
// std::vector<distf> &local_thresholds);
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_no_merge(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited,
// std::vector<distf> &local_thresholds,
// const uint64_t computation_threshold);
// void para_search_with_top_m_merge_queues_scale_m_v0(
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_middle_m(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
std::vector<idi> &top_m_candidates,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited);
// std::vector<distf> &local_thresholds);
// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_scale_m_v2(
// const idi value_M_min,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_scale_m_v3(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_middle_m_no_merge(
const uint64_t computation_threshold,
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
const idi init_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_sequential_merge(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_nested_para(
const idi batch_start,
const idi batch_size,
const idi value_M_middle,
const idi value_M_max,
const idi K,
const idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue
std::vector< std::vector<idi> > &top_m_candidates_list,
std::vector< boost::dynamic_bitset<> > &is_visited_list);
void subsearch_with_top_m(
const idi value_M_max,
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &local_top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation);
void subsearch_top_m_for_one_iteration(
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation);
void subsearch_top_m_for_one_iteration_lth(
const distf bound_lth,
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation);
void subsearch_top_m_for_one_iteration_lth_mth(
const distf bound_lth,
// const idi top_m_position,
const idi iter,
idi &k_uc,
const idi local_m_count,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation,
double &time_pick_top_m,
uint64_t &count_add_to_queue,
double &time_distance_computation,
double &time_add_to_queue);
void para_search_with_top_m_subsearch_v0(
// const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector< std::vector<idi> > &top_m_candidates_list,
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_subsearch_v1(
const idi local_M_max,
const idi query_id,
const idi K,
const idi global_L,
const idi local_L,
const idi total_L,
const idi init_queue_end,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const std::vector<idi> &local_queues_bases,
std::vector<idi> &local_queues_ends,
std::vector< std::vector<idi> > &top_m_candidates_list,
boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_subsearch_v2(
// const idi local_M_max,
// const idi query_id,
// const idi K,
// const idi global_L,
// const idi local_L,
// const idi total_L,
// const idi init_queue_size,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const std::vector<idi> &local_queues_starts,
// std::vector<idi> &local_queues_sizes,
// std::vector<idi> &local_m_counts,
// std::vector< std::vector<idi> > &top_m_candidates_list,
// boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_subsearch_v3(
const idi local_M_max,
const idi local_M_middle,
const idi query_id,
const idi K,
const idi global_L,
const idi local_L,
// const idi total_L,
// const idi init_queue_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
std::vector<idi> &local_m_counts,
std::vector< std::vector<idi> > &top_m_candidates_list,
boost::dynamic_bitset<> &is_visited);
void subsearch_for_simple_search(
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi base_set_L,
idi &set_L_end,
// std::vector<uint8_t> &is_visited,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation);
void para_simple_search_subsearch(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited);
boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_global_threshold(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_distance_threshold_m(
//// const idi value_M_middle,
//// const idi value_M_max,
// const distf relative_dist_threshold,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
//// std::vector<distf> &local_thresholds)
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_distance_threshold_m_middle_iteration(
//// const idi value_M_middle,
//// const idi value_M_max,
// const distf relative_dist_threshold,
// const idi middle_iteration,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_collectors(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_selecting(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_myths(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited);
//// std::vector<uint8_t> &is_visited);
//// boost::dynamic_bitset<> &is_visited);
//// void para_prepare_init_ids(
//// std::vector<unsigned> &init_ids,
//// unsigned L) const;
// void para_search_with_top_m_in_batch_embarassing_para(
// const PANNS::idi M,
// const PANNS::idi batch_start,
// const PANNS::idi batch_size,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector< std::vector<Candidate> > &set_L_list,
// const std::vector<idi> &init_ids,
// std::vector< std::vector<idi> > &set_K_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list);
// void test_neighbors_distance_to_father(
// const idi num_selected) const;
// void test_neighbors_normalized_distance_to_father(
// const idi num_selected) const;
void load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list);
void get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const;
}; // Class Searching
/**
* Input the data from the file.
* @param filename
*/
inline void Searching::load_data_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
data_load_,
num_v_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: data dimension " << dimension_
<< " is not equal to query dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input queries from the file.
* @param filename
*/
inline void Searching::load_queries_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
queries_load_,
num_queries_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: query dimension " << dimension_
<< " is not equal to data dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input the NSG graph from the file.
* Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp
* @param filename
*/
inline void Searching::load_nsg_graph(char *filename)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
std::cerr << "Error: cannot read file " << filename << " ." << std::endl;
exit(EXIT_FAILURE);
}
fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned));
fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned));
data_bytes_ = (1 + dimension_) * sizeof(dataf);
neighbor_bytes_ = (1 + width_) * sizeof(idi);
vertex_bytes_ = data_bytes_ + neighbor_bytes_;
opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_);
if (!opt_nsg_graph_) {
std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl;
exit(EXIT_FAILURE);
}
idi v_id = 0;
num_e_ = 0;
char *base_location = opt_nsg_graph_;
while (true) {
idi degree;
fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
if (fin.eof()) {
break;
}
num_e_ += degree;
// std::vector<idi> tmp_ngbrs(degree);
// fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned));
// Norm and data
distf norm = compute_norm(data_load_ + v_id * dimension_);
// distf norm = compute_norm(v_id);
std::memcpy(base_location, &norm, sizeof(distf)); // Norm
memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data
base_location += data_bytes_;
// Neighbors
memcpy(base_location, °ree, sizeof(idi)); // Number of neighbors
fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors
// memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned));
base_location += neighbor_bytes_;
++v_id;
}
if (v_id != num_v_) {
std::cerr << "Error: NSG data has " << v_id
<< " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
exit(EXIT_FAILURE);
}
free(data_load_);
data_load_ = nullptr;
// ////////////////////////
// idi v_id = 0;
// num_e_ = 0;
// while (true) {
// idi degree;
// fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
// if (fin.eof()) {
// break;
// }
// num_e_ += degree;
//
// std::vector<idi> ngbrs(degree);
// fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned));
//// nsg_graph_.push_back(ngbrs);
//// tmp_edge_list.push_back(ngbrs);
// edge_list_.push_back(ngbrs);
// ++v_id;
// }
// if (v_id != num_v_) {
// std::cerr << "Error: NSG data has " << v_id
// << " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
// exit(EXIT_FAILURE);
// }
}
/**
* Load those true top-K neighbors (ground truth) of queries
* @param filename
* @param[out] true_nn_list
*/
inline void Searching::load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list)
// unsigned &t_K)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
fprintf(stderr, "Error: cannot open file %s\n", filename);
exit(EXIT_FAILURE);
}
idi t_query_num;
idi t_K;
// unsigned t_K;
fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num));
fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K));
// if (t_query_num != query_num) {
// fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n",
// query_num, t_query_num, filename);
// exit(EXIT_FAILURE);
// }
if (t_query_num < num_queries_) {
fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_);
exit(EXIT_FAILURE);
}
if (t_K < 100) {
fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
exit(EXIT_FAILURE);
}
// data = new unsigned[(size_t) t_query_num * (size_t) t_K];
true_nn_list.resize(t_query_num);
for (idi q_i = 0; q_i < t_query_num; ++q_i) {
true_nn_list[q_i].resize(t_K);
}
for (unsigned q_i = 0; q_i < t_query_num; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned n_i = 0; n_i < t_K; ++n_i) {
unsigned id;
float dist;
fin.read(reinterpret_cast<char *>(&id), sizeof(id));
fin.read(reinterpret_cast<char *>(&dist), sizeof(dist));
// data[offset + n_i] = id;
true_nn_list[q_i][n_i] = id;
}
}
fin.close();
}
inline void Searching::get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const
{
// if (t_K < 100) {
// fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
// exit(EXIT_FAILURE);
// }
if (true_nn_list[0].size() < 100) {
fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n");
exit(EXIT_FAILURE);
}
recalls[1] = 0.0;
recalls[5] = 0.0;
recalls[10] = 0.0;
recalls[20] = 0.0;
recalls[50] = 0.0;
recalls[100] = 0.0;
for (unsigned q_i = 0; q_i < num_queries_; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned top_i = 0; top_i < 100; ++top_i) {
unsigned true_id = true_nn_list[q_i][top_i];
for (unsigned n_i = 0; n_i < 100; ++n_i) {
if (set_K_list[q_i][n_i] == true_id) {
if (n_i < 1) recalls[1] += 1;
if (n_i < 5) recalls[5] += 1;
if (n_i < 10) recalls[10] += 1;
if (n_i < 20) recalls[20] += 1;
if (n_i < 50) recalls[50] += 1;
if (n_i < 100) recalls[100] += 1;
}
}
}
}
recalls[1] /= 1.0 * num_queries_;
recalls[5] /= 5.0 * num_queries_;
recalls[10] /= 10.0 * num_queries_;
recalls[20] /= 20.0 * num_queries_;
recalls[50] /= 50.0 * num_queries_;
recalls[100] /= 100.0 * num_queries_;
}
inline void Searching::search_in_sequential(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
// {//test
// printf("Iteration: Relative_Distance:\n");
//// printf("Iteration: Relative_Distance:\n");
//// printf("----query: %u----\n", query_id);
// }
boost::dynamic_bitset<> is_visited(num_v_);
for (idi v_i = 0; v_i < L; ++v_i) {
is_visited[init_ids[v_i]] = true;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
idi k = 0; // Index of every queue's first unchecked candidate.
idi tmp_count = 0; // for debug
// {// Print relative distance
//// distf top_dist = set_L[0].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l].distance_);
//// tmp_count, set_L[i_l].distance_ - top_dist);
// }
// }
while (k < L) {
Candidate &top_cand = set_L[k];
unsigned nk = L;
if (!top_cand.is_checked_) {
++tmp_count;
top_cand.is_checked_ = true;
idi v_id = top_cand.id_; // Vertex ID.
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
// Traverse v_id's all neighbors, pushing them into the queue
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// Compute the distance
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
// Insert into the queue
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
// {// Print relative distance
//// distf top_dist = set_L[0].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l].distance_);
//// tmp_count, set_L[i_l].distance_ - top_dist);
// }
// }
}
if (nk <= k) {
k = nk;
} else {
++k;
}
}
// cache_miss_kernel.measure_stop();
for (size_t k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
// {//test
// if (0 == query_id) {
// exit(1);
// }
// }
}
//inline void Searching::search_in_sequential_BitVector(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// boost::dynamic_bitset<> is_visited(num_v_);
// BitVector is_visited(num_v_);
//
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
//// is_visited[init_ids[v_i]] = true;
// is_visited.atomic_set_bit(init_ids[v_i]);
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
// Candidate &top_cand = set_L[k];
// unsigned nk = L;
// if (!top_cand.is_checked_) {
// top_cand.is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = true;
//
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//// cache_miss_kernel.measure_stop();
//#pragma omp parallel for
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
/**
* Prepare init_ids and flags, as they are constant for all queries.
* @param[out] init_ids
* @param L
*/
inline void Searching::prepare_init_ids(
std::vector<unsigned int> &init_ids,
const unsigned L) const
{
// idi num_ngbrs = get_out_degree(ep_);
// edgei edge_start = nsg_graph_indices_[ep_];
// // Store ep_'s neighbors as candidates
// idi tmp_l = 0;
// for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) {
// init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l];
// }
// std::unordered_set<idi> visited_ids;
boost::dynamic_bitset<> is_selected(num_v_);
idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
idi init_ids_end = 0;
// for (; tmp_l < L && tmp_l < out_degree; tmp_l++) {
for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) {
// idi v_id = out_edges[tmp_l];
idi v_id = out_edges[e_i];
if(is_selected[v_id]) {
continue;
}
is_selected[v_id] = true;
// init_ids[tmp_l] = v_id;
init_ids[init_ids_end++] = v_id;
// init_ids[tmp_l] = out_edges[tmp_l];
// visited_ids.insert(init_ids[tmp_l]);
}
// for (idi i = 0; i < tmp_l; ++i) {
// is_visited[init_ids[i]] = true;
// }
// If ep_'s neighbors are not enough, add other random vertices
idi tmp_id = ep_ + 1; // use tmp_id to replace rand().
while (init_ids_end < L) {
tmp_id %= num_v_;
idi v_id = tmp_id++;
if (is_selected[v_id]) {
continue;
}
// if (visited_ids.find(id) != visited_ids.end()) {
// continue;
// }
is_selected[v_id] = true;
// visited_ids.insert(id);
init_ids[init_ids_end++] = v_id;
// tmp_l++;
}
}
// TODO: re-code in AVX-512
inline dataf Searching::compute_norm(
const dataf *data) const
// idi vertex_id)
// const std::vector<PANNS::dataf> &data)
// size_t loc_start,
// idi dimension)
{
// const dataf *a = data.data() + loc_start;
// const dataf *a = data_load_ + vertex_id * dimension_;
// idi size = dimension_;
dataf result = 0;
//#define AVX_L2NORM(addr, dest, tmp) \
// tmp = _mm256_load_ps(addr); \
// tmp = _mm256_mul_ps(tmp, tmp); \
// dest = _mm256_add_ps(dest, tmp);
#define AVX_L2NORM(addr, dest, tmp) \
tmp = _mm256_loadu_ps(addr); \
tmp = _mm256_mul_ps(tmp, tmp); \
dest = _mm256_add_ps(dest, tmp);
__m256 sum;
__m256 l0, l1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = data;
const float *e_l = l + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_L2NORM(e_l, sum, l0); }
for (unsigned i = 0; i < DD; i += 16, l += 16) {
AVX_L2NORM(l, sum, l0);
AVX_L2NORM(l + 8, sum, l1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
return result;
}
inline dataf Searching::compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<PANNS::dataf> &d_data,
// const std::vector<PANNS::dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
const dataf vertex_norm) const
// idi dimension)
{
// idi size = dimension_;
float result = 0;
//#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
// tmp1 = _mm256_load_ps(addr1);\
// tmp2 = _mm256_load_ps(addr2);\
// tmp1 = _mm256_mul_ps(tmp1, tmp2); \
// dest = _mm256_add_ps(dest, tmp1);
#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
tmp1 = _mm256_loadu_ps(addr1);\
tmp2 = _mm256_loadu_ps(addr2);\
tmp1 = _mm256_mul_ps(tmp1, tmp2); \
dest = _mm256_add_ps(dest, tmp1);
__m256 sum;
__m256 l0, l1;
__m256 r0, r1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = v_data;
const float *r = q_data;
// const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf));
// const float *r = queries_load_ + query_id * dimension_;
const float *e_l = l + DD;
const float *e_r = r + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); }
for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) {
AVX_DOT(l, r, sum, l0, r0);
AVX_DOT(l + 8, r + 8, sum, l1, r1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
result = -2 * result + vertex_norm;
return result;
}
//// DEPRECATED.
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
//inline idi Searching::add_into_queue(
// std::vector<PANNS::Candidate> &queue,
// idi &queue_top,
// const idi queue_size,
// const PANNS::Candidate &cand)
//{
// assert(queue_size > 1);
// if (0 == queue_top) {
// queue[queue_top++] = cand;
// return 0;
// } else if (1 == queue_top) {
// if (queue[0] < cand) {
// queue[queue_top++] = cand;
// return 1;
// } else {
// queue[++queue_top] = queue[0];
// queue[0] = cand;
// return 0;
// }
// }
//
// if (queue[queue_top - 1] < cand) {
// if (queue_top < queue_size) {
// queue[queue_top++] = cand;
// }
// return queue_top;
// }
//
// idi r = insert_into_queue(
// queue,
// queue_top - 1,
// cand);
//// {//test
//// printf("r: %u"
//// "queue_top: %u "
//// "queue_size: %u\n",
//// r,
//// queue_top,
//// queue_size);
//// }
// return r;
//
//// /////////////////////////////////////////////////////////////
//// // Find the insert location
//// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand);
//// idi insert_loc = it_loc - queue.begin();
//// if (insert_loc == queue_size) {
//// return queue_size;
//// }
////
//// // Insert
////// if (queue_top == queue_size) {
////// // If full already
////// --queue_top;
////// }
//// memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
//// reinterpret_cast<char *>(queue.data() + insert_loc),
//// (queue_top - insert_loc) * sizeof(Candidate));
////// for (idi q_i = queue_top; q_i > insert_loc; --q_i) {
////// queue.at(q_i) = queue.at(q_i - 1);
////// }
//// queue[insert_loc] = cand;
//// ++queue_top;
//// return insert_loc;
//}
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
inline idi Searching::add_into_queue(
std::vector<PANNS::Candidate> &queue,
idi &queue_top,
const idi queue_size,
const PANNS::Candidate &cand)
{
if (0 == queue_top) {
queue[queue_top++] = cand;
return 0;
}
// Find the insert location
auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand);
idi insert_loc = it_loc - queue.begin();
if (insert_loc == queue_size) {
return queue_size;
}
// Insert
if (queue_top == queue_size) {
// If full already
--queue_top;
}
memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
reinterpret_cast<char *>(queue.data() + insert_loc),
(queue_top - insert_loc) * sizeof(Candidate));
// for (idi q_i = queue_top; q_i > insert_loc; --q_i) {
// queue.at(q_i) = queue.at(q_i - 1);
// }
queue[insert_loc] = cand;
++queue_top;
return insert_loc;
}
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
// add_into_queue with a queue_start
inline idi Searching::add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_size, // The insertion location starting from queue_start
const idi queue_capacity, // The maximum capacity of queue, independent with queue_start.
const PANNS::Candidate &cand)
{
if (0 == queue_size) {
queue[queue_start + queue_size++] = cand;
return 0;
}
idi queue_end = queue_start + queue_size;
// Find the insert location
const auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand);
// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_size, cand);
idi insert_loc = it_loc - queue.begin();
if (insert_loc != queue_end) {
if (cand.id_ == it_loc->id_) {
// Duplicate
return queue_capacity;
}
if (queue_size >= queue_capacity) { // Queue is full
--queue_size;
--queue_end;
}
} else { // insert_loc == queue_end, insert at the end?
if (queue_size < queue_capacity) { // Queue is not full
// Insert at the end
queue[insert_loc] = cand;
++queue_size;
return queue_size - 1;
} else { // Queue is full
return queue_capacity;
}
}
// Add into queue
memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
reinterpret_cast<char *>(queue.data() + insert_loc),
(queue_end - insert_loc) * sizeof(Candidate));
queue[insert_loc] = cand;
++queue_size;
return insert_loc - queue_start;
}
inline void Searching::add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_size, // The number of elements in queue, independent with queue_start
const idi queue_length) // The maximum capacity of queue, independent with queue_start.
{
const idi dest_index = queue_start + insert_index;
if (queue_size == queue_length) {
--queue_size;
}
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index) * sizeof(Candidate));
queue[dest_index] = cand;
++queue_size;
}
inline void Searching::insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index,
const idi queue_start,
const idi queue_size)
{
const idi dest_index = queue_start + insert_index;
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index - 1) * sizeof(Candidate));
queue[dest_index] = cand;
// memmove(reinterpret_cast<char *>(queue_base + dest_index + 1),
// reinterpret_cast<char *>(queue_base + dest_index),
// (queue_size - insert_index - 1) * sizeof(T));
// for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) {
// queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start);
// }
// queue_base[dest_index] = cand;
}
/**
* PANNS version of InsertIntoPool(): binary-search to find the insert place and then move.
* @param[out] c_queue
* @param c_queue_top
* @param cand
* @return
*/
inline idi Searching::insert_into_queue(
std::vector<PANNS::Candidate> &c_queue,
PANNS::idi c_queue_top,
PANNS::Candidate cand)
{
if (c_queue[0].distance_ > cand.distance_) {
// If the first
memmove(reinterpret_cast<char *>(c_queue.data() + 1),
reinterpret_cast<char *>(c_queue.data()),
c_queue_top * sizeof(Candidate));
c_queue[0] = cand;
return 0;
} else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) {
// If the last
if (c_queue[c_queue_top - 1].id_ > cand.id_) {
// Use ID as the second metrics for ordering
c_queue[c_queue_top - 1] = cand;
return c_queue_top - 1;
} else {
return c_queue_top;
}
}
idi left = 0;
idi right = c_queue_top;
while (left < right) {
idi mid = (right - left) / 2 + left;
if (c_queue[mid].distance_ > cand.distance_) {
right = mid;
} else {
left = mid + 1;
}
}
// If the distance is the same
if (0 != left && c_queue[left - 1].distance_ != cand.distance_) {
;
} else {
while (0 != left
&& c_queue[left - 1].distance_ == cand.distance_
&& c_queue[left - 1].id_ > cand.id_) {
// Use ID as the second metrics for ordering
--left;
}
}
// Insert to left
memmove(reinterpret_cast<char *>(c_queue.data() + left + 1),
reinterpret_cast<char *>(c_queue.data() + left),
(c_queue_top - left) * sizeof(Candidate));
c_queue[left] = cand;
return left;
}
//inline void Searching::cand_pushes_ngbrs_into_queue(
// idi cand_id,
// const dataf *query_data,
// idi L,
// idi &new_k,
// boost::dynamic_bitset<> &is_visited,
// std::vector<Candidate> &set_L)
//{
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
//}
//inline void Searching::search_in_sequential(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K) const
//{
// boost::dynamic_bitset<> is_visited(num_v_);
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// is_visited[init_ids[v_i]] = true;
// }
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
// Candidate &top_cand = set_L[k];
// unsigned nk = L;
// if (!top_cand.is_checked_) {
// top_cand.is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
// Deprecated: cannot use std::set, because its element is constant.
//inline void Searching::search_in_sequential(
// const idi query_id,
// const idi K,
// const idi L,
//// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K) const
//{
// std::set<Candidate> set_L;
// boost::dynamic_bitset<> is_visited(num_v_);
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// is_visited[init_ids[v_i]] = true;
// }
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// set_L.emplace(v_id, dist, false);
// }
//// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
//// Candidate &top_cand = set_L[k];
// std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k);
// unsigned nk = L;
// if (!top_cand->is_checked_) {
// top_cand->is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
/* Function:
* queue1_size is fixed.
*/
inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
return insert_index;
} else if (insert_index == queue1_size - 1) {
queue1[queue1_start + insert_index] = queue2[queue2_start];
return insert_index;
}
// Insert the 1st of queue2
if (queue2[queue2_start].id_ != it_loc->id_) {
// Not Duplicate
insert_one_element_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size);
}
if (queue2_size == 1) {
return insert_index;
}
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
const idi q_i_1_bound = queue1_start + queue1_size;
const idi q_i_2_bound = queue2_start + queue2_size;
// const idi insert_i_bound = queue1_start + limit_size;
for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) {
if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) {
// queue1 or queue2 finished traverse. Rest o
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else if (queue2[q_i_2] < queue1[q_i_1]) {
// Insert queue2[q_i_2] into queue1
insert_one_element_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size);
++q_i_1;
} else {
// Duplicate
++q_i_2;
++q_i_1;
}
}
return insert_index;
}
/* Function:
* queue1_size should be updated.
* queue1_length should be provided.
*/
inline void Searching::merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
idi copy_count = (queue1_size + queue2_size > queue1_length) ?
queue1_length - queue1_size :
queue2_size;
memmove(queue1.data() + queue1_start + queue1_size,
queue2.data() + queue2_start,
copy_count * sizeof(Candidate));
queue1_size += copy_count;
return;
}
if (queue2[queue2_start].id_ != it_loc->id_) {
// Not Duplicate
add_into_queue_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size,
queue1_length);
}
if (queue2_size == 1) {
return;
}
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
idi q_i_1_bound = queue1_start + queue1_size; // When queue1_size is updated, so should be q_i_1_bound.
const idi q_i_2_bound = queue2_start + queue2_size;
// idi insert_i;
for (idi insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) {
if (q_i_1 >= q_i_1_bound) {
queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2);
for ( ; insert_i < queue1_size; ++insert_i) {
queue1[queue1_start + insert_i] = queue2[q_i_2++];
}
break;
} else if (q_i_2 >= q_i_2_bound) {
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else if (queue2[q_i_2] < queue1[q_i_1]) {
add_into_queue_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size,
queue1_length);
++q_i_1;
q_i_1_bound = queue1_start + queue1_size;
} else {
// Duplicate
++q_i_2;
++q_i_1;
}
}
}
inline idi Searching::merge_all_queues_para_list(
std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<idi> &local_queues_ends,
std::vector<Candidate> &set_L,
const idi L)
{
int size = 1 << (static_cast<idi>(log2(num_threads_)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi bi = i + (1 << d) - 1; // i + 2^d - 1
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
local_queues_list[ai].swap(local_queues_list[bi]);
std::swap(local_queues_ends[ai], local_queues_ends[bi]);
continue;
}
// else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) {
// local_queues_list[ai].swap(local_queues_list[bi]);
// std::swap(local_queues_ends[ai], local_queues_ends[bi]);
// }
// merge_two_queues_into_1st_queue_seq(
// local_queues_list[ai],
// 0,
// local_queues_ends[ai],
// local_queues_list[bi],
// 0,
// local_queues_ends[bi]);
idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi];
std::vector<Candidate> tmp_queue(tmp_length);
std::merge(
local_queues_list[ai].begin(),
local_queues_list[ai].begin() + local_queues_ends[ai],
local_queues_list[bi].begin(),
local_queues_list[bi].begin() + local_queues_ends[bi],
tmp_queue.begin());
if (tmp_length > L) {
tmp_queue.resize(L);
tmp_length = L;
} else if (tmp_length < L) {
tmp_queue.resize(L);
}
local_queues_list[ai].swap(tmp_queue);
local_queues_ends[ai] = tmp_length;
// {// Print queue a
// printf("d: %u "
// "i: %u "
// "ai: %u "
// "local_queues_ends[%d]: %d\n",
// d,
// i,
// ai,
// ai,
// local_queues_ends[ai]);
// for (idi i_q = 0; i_q < local_queues_ends[ai]; ++i_q) {
// printf("[%u]: "
// "id: %u "
// "dist: %f\n",
// i_q,
// local_queues_list[ai][i_q].id_,
// local_queues_list[ai][i_q].distance_);
// }
// }
}
}
// Remain, prefix-sum-like merge
if (size != num_threads_) {
for (int i = size; i < num_threads_; ++i) {
idi ai = i;
idi bi = i - 1;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
local_queues_list[ai].swap(local_queues_list[bi]);
std::swap(local_queues_ends[ai], local_queues_ends[bi]);
continue;
}
// else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) {
// local_queues_list[ai].swap(local_queues_list[bi]);
// std::swap(local_queues_ends[ai], local_queues_ends[bi]);
// }
// merge_two_queues_into_1st_queue_seq(
// local_queues_list[ai],
// 0,
// local_queues_ends[ai],
// local_queues_list[bi],
// 0,
// local_queues_ends[bi]);
idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi];
std::vector<Candidate> tmp_queue(tmp_length);
std::merge(
local_queues_list[ai].begin(),
local_queues_list[ai].begin() + local_queues_ends[ai],
local_queues_list[bi].begin(),
local_queues_list[bi].begin() + local_queues_ends[bi],
tmp_queue.begin());
if (tmp_length > L) {
tmp_queue.resize(L);
tmp_length = L;
} else if (tmp_length < L) {
tmp_queue.resize(L);
}
local_queues_list[ai].swap(tmp_queue);
local_queues_ends[ai] = tmp_length;
}
}
// Merge into set_L
idi r = L;
if (local_queues_ends[num_threads_ - 1]) {
r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
0,
L,
local_queues_list[num_threads_ - 1],
0,
local_queues_ends[num_threads_ - 1]);
}
// Reset local_queues_ends
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return r;
}
/* Function:
* Use large local_queues_array as a concatenation of all queues
*/
inline idi Searching::merge_all_queues_para_array(
std::vector<Candidate> &set_L,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
const idi L)
{
const int num_queues = num_threads_;
idi nk = L;
int size = 1 << (static_cast<idi>(log2(num_queues)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi a_start = ai * local_queue_length;
idi bi = i + (1 << d) - 1; // i + 2^d - 1
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_queues - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Remain, prefix-sum-like merge
if (size != num_queues) {
for (int i = size; i < num_queues; ++i) {
idi ai = i;
idi a_start = ai * local_queue_length;
idi bi = i - 1;
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_queues - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Reset local_queues_ends
// Not do this for Collector Idea or Selecting Idea
std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return nk;
// return r;
}
/* Function:
* When merge all queues (in an array, and [num_threads_ - 1] is the global queue),
* the starting local is at [queue_base]
*/
inline idi Searching::merge_all_queues_queue_base(
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi queue_base,
const int real_threads,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L)
{
idi nk = L;
int size = 1 << (static_cast<idi>(log2(real_threads)));
// int size = 1 << (static_cast<idi>(log2(num_threads_)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
idi by = 1 << (d + 1);
idi i_bound = size + queue_base;
#pragma omp parallel for num_threads(real_threads)
for (idi i = queue_base; i < i_bound; i += by) {
// for (int i = 0; i < size; i += by) {
// idi ai = i + (1 << (d + 1)) - 1 + queue_base; // i + 2^(d+1) - 1
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi a_start = ai * local_queue_length;
// idi bi = i + (1 << d) - 1 + queue_base; // i + 2^d - 1
idi bi = i + (1 << d) - 1; // i + 2^d - 1
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
// local_queues_list[ai].swap(local_queues_list[bi]);
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_threads_ - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Remain, prefix-sum-like merge
if (size != real_threads) {
// if (size != num_threads_) {
for (int i = size + queue_base; i < num_threads_; ++i) {
// for (int i = size; i < num_threads_; ++i) {
idi ai = i;
idi a_start = ai * local_queue_length;
idi bi = i - 1;
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_threads_ - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Reset local_queues_ends
std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return nk;
// return r;
}
inline void Searching::merge_two_consecutive_queues_in_place(
std::vector<Candidate> &two_queues,
const idi base_1,
// const idi &end_1,
const idi base_2,
const idi &length_2)
{
// idi tid = omp_get_thread_num();
idi index_1 = base_1;
idi index_2 = base_2;
const idi bound_2 = base_2 + length_2;
while (index_1 < index_2
&& index_2 < bound_2) {
Candidate e_1 = two_queues[index_1];
Candidate e_2 = two_queues[index_2];
if (e_1 < e_2) {
++index_1;
} else if (e_2 < e_1) {
// time_memmove_list_[tid] -= WallTimer::get_time_mark();
std::memmove(two_queues.data() + index_1 + 1,
two_queues.data() + index_1,
(index_2 - index_1) * sizeof(Candidate));
// time_memmove_list_[tid] += WallTimer::get_time_mark();
two_queues[index_1] = e_2;
++index_1;
++index_2;
} else { // Duplicate, but have no idea what to do right now
// time_memmove_list_[tid] -= WallTimer::get_time_mark();
std::memmove(two_queues.data() + index_1 + 1,
two_queues.data() + index_1,
(index_2 - index_1) * sizeof(Candidate));
// time_memmove_list_[tid] += WallTimer::get_time_mark();
index_1 += 2;
++index_2;
}
}
}
///* Function:
// * Merge all queues to the global queue, in a two-queue-merge way
// */
//inline idi Searching::merge_all_queues_all_together_in_sequential(
// std::vector<Candidate> &set_L,
// std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L)
//{
// const idi num_queues = num_threads_;
// const idi global_queue_base = (num_queues - 1) * local_queue_length;
// std::vector<idi> queue_heads(num_queues, 0);
// idi queue_id_min;
//
//// bool is_finished = false;
// bool is_1st_selected = true;
// idi nk = L; // The highest location of insertion.
// {
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// if (0 == local_queues_ends[q_i]) {
// continue;
// }
// _mm_prefetch(set_L.data() + q_i * local_queue_length, _MM_HINT_T0);
// }
// }
// while (queue_heads[num_queues - 1] < L) {
//// time_compare_minimum_ -= WallTimer::get_time_mark();
// queue_id_min = min_all_queues_at_heads(
// set_L,
// queue_heads,
// local_queues_ends,
// local_queue_length,
// L);
//// time_compare_minimum_ += WallTimer::get_time_mark();
// if (queue_id_min != num_queues - 1) { // Not in the global queue
//// time_insert_ -= WallTimer::get_time_mark();
// insert_one_element_at(
// set_L[queue_heads[queue_id_min] + queue_id_min * local_queue_length],
// set_L,
// queue_heads[num_queues - 1],
// global_queue_base,
// L);
//// time_insert_ += WallTimer::get_time_mark();
// if (is_1st_selected) { // Get the highest inserting location
// is_1st_selected = false;
// nk = queue_heads[num_queues - 1];
// }
// ++queue_heads[queue_id_min];
// }
// ++queue_heads[num_queues - 1];
// }
//
// // Reset local_queues_ends
// std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// return nk;
//}
///* Function:
// * Find the minimum among queues at their head locations
// */
//inline idi Searching::min_all_queues_at_heads(
// const std::vector<Candidate> &set_L,
// std::vector<idi> &queue_heads,
// const std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// const idi L)
//{
// const idi num_queues = num_threads_;
// idi min_queue_id = num_queues - 1;
// Candidate min_candidate = set_L[queue_heads[min_queue_id] + min_queue_id * local_queue_length];
//
// for (idi q_i = 0; q_i < num_queues - 1; ++q_i) {
// if (queue_heads[q_i] >= local_queues_ends[q_i]) { // q_i finished
// continue;
// }
// const Candidate &ele = set_L[queue_heads[q_i] + q_i * local_queue_length];
// if (ele < min_candidate) {
// min_candidate = ele;
// min_queue_id = q_i;
// } else if (ele.id_ == min_candidate.id_) { // Redundant element
// ++queue_heads[q_i];
// }
// }
//
// return min_queue_id;
//}
inline void Searching::merge_in_set_L(
std::vector<Candidate> &set_L,
const idi set_L_length,
const idi num_queues,
const idi local_queue_length)
{
idi size = 1 << (static_cast<idi>(log2(num_queues)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
const idi merge_length = (local_queue_length << d);
idi by = 1 << (d + 1);
// Parallel for
#pragma omp parallel for
for (idi i = 0; i < size; i += by) {
// idi a = i + (1 << d) - 1;
// idi b = i + (1 << (d + 1)) - 1;
idi a = i;
idi b = i + (1 << d);
idi base_a = a * local_queue_length;
idi base_b = b * local_queue_length;
if (base_a >= set_L_length || base_b >= set_L_length) {
continue;
}
idi length_b;
if (a + by < size) {
length_b = merge_length;
} else { // The last one
if (size == num_queues) {
length_b = set_L_length - base_b;
} else {
length_b = merge_length;
}
}
// printf("a: %u b: %u "
// "base_a: %u base_b: %u length_b: %u\n",
// a, b,
// base_a, base_b, length_b);
merge_two_consecutive_queues_in_place(
set_L,
base_a,
base_b,
length_b);
}
}
if (size != num_queues) {
for (idi i = size; i < num_queues; ++i) {
idi a = 0;
idi b = i;
idi base_a = a;
idi base_b = b * local_queue_length;
if (base_b >= set_L_length) {
continue;
}
idi length_b;
if (b != num_queues - 1) {
length_b = local_queue_length;
} else {
length_b = set_L_length - base_b;
}
// printf("a: %u b: %u "
// "base_a: %u base_b: %u length_b: %u\n",
// a, b,
// base_a, base_b, length_b);
merge_two_consecutive_queues_in_place(
set_L,
base_a,
base_b,
length_b);
}
}
}
/*
* 7/5/2020-20:27
* Every queue keeps only elements which can be ordered in the top-L globally.
* local_queues_lengths records the end location for all queues
*/
inline distf Searching::selecting_top_L_seq(
std::vector<Candidate> &set_L,
const idi global_L,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes)
{
std::vector<idi> pointers(num_queues, 0);
distf bound_lth;
idi rank = 0;
bool is_finished = false;
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_id;
while (rank < global_L) {
is_finished = true;
min_dist = FLT_MAX;
for (idi q_i = 0; q_i < num_queues; ++q_i) {
if (pointers[q_i] >= local_queues_sizes[q_i]) {
// q_i is finished
continue;
}
is_finished = false;
idi sub = pointers[q_i] + local_queues_starts[q_i];
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
}
}
if (is_finished) {
{//test
printf("Error: selecting_top_L_seq: only found %u elements but global_L is %u.\n",
rank,
global_L);
}
break;
}
bound_lth = min_dist;
++pointers[min_q_i];
++rank;
}
std::copy(pointers.begin(), pointers.end(), local_queues_sizes.begin());
return bound_lth;
}
/*
* 7/24/2020-10:08
* Record for every queue the position that contains the top-M unchecked vertices.
* So the total expanded vertices should still be M, which means the computation should
* be the same with merging idea.
*/
inline void Searching::selecting_unchecked_top_M_seq(
const idi query_id,
const idi iter,
std::vector<Candidate> &set_L,
const std::vector<idi> &pointers_starts,
const idi value_M,
const idi num_queues,
const std::vector<idi> &local_queues_starts,
const std::vector<idi> &local_queues_sizes,
std::vector<idi> &local_m_counts)
{
std::vector<idi> pointers(pointers_starts);
// std::vector<idi> pointers(num_queues, 0);
std::fill(local_m_counts.begin(), local_m_counts.end(), 0);
idi rank = 0;
bool is_finished = true;
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_id;
while (rank < value_M) {
min_dist = FLT_MAX;
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi &pointer = pointers[q_i];
idi sub = pointer + local_queues_starts[q_i];
// {//test
// if (133 == query_id &&
// 3 == iter &&
// 321341 == set_L[sub].id_) {
// printf("(%u %f)\n",
// set_L[sub].id_, set_L[sub].distance_);
// }
// }
while (pointer < local_queues_sizes[q_i]
&& set_L[sub].is_checked_) {
++pointer;
++sub;
}
if (pointer >= local_queues_sizes[q_i]) {
// q_i is finished
continue;
}
is_finished = false;
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
}
}
if (!is_finished) {
is_finished = true;
++pointers[min_q_i];
++rank;
++local_m_counts[min_q_i];
} else {
break;
}
}
// std::copy(pointers.begin(), pointers.end(), local_top_m_positions.begin());
}
inline void Searching::search_with_top_m(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
boost::dynamic_bitset<> is_visited(num_v_);
{
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
}
inline void Searching::search_with_top_m_scale_m(
const PANNS::idi value_M_max,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
// boost::dynamic_bitset<> is_visited(num_v_);
{
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
// std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
while (k < L) {
++tmp_count;
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
if (M < value_M_max) {
M <<= 1;
}
}
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
{// Reset
is_visited.reset();
}
}
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_to_get_distance_range(
// const PANNS::idi M,
// const PANNS::idi query_id,
//// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids)
//// std::vector<idi> &set_K)
//{
// dist_max_ = -FLT_MAX;
// dist_min_ = FLT_MAX;
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
//// {// For distance range
//// if (dist > dist_max_) {
//// dist_max_ = dist;
//// }
//// if (dist < dist_min_) {
//// dist_min_ = dist;
//// }
//// }
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
//// {// For distance range
//// if (dist > dist_max_) {
//// dist_max_ = dist;
//// }
//// if (dist < dist_min_) {
//// dist_min_ = dist;
//// }
//// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// For histogram
// for (idi i_l = 0; i_l < L; ++i_l) {
// distf dist = set_L[i_l].distance_;
// {// For distance range
// if (dist > dist_max_) {
// dist_max_ = dist;
// }
// if (dist < dist_min_) {
// dist_min_ = dist;
// }
// }
// }
// }
// }
//
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// set_K[k_i] = set_L[k_i].id_;
//// }
//}
//
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_myths_M(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// {//test
//// printf("query_id: %u\n", query_id);
//// }
// const idi loc_range = L / 3;
//
//
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
//// {// For histogram
//// const distf dist_range = dist_max_ - dist_min_;
//// printf("iter:%u\n", 0);
//// for (idi i_l = 0; i_l < L; ++i_l) {
//// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0);
//// }
//// }
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// std::vector<idi> range_count(3, 0);
// idi zero_inserted_count = 0;
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// }
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//// {//test
//// printf("top_m_candidates_ends: %u\n", top_m_candidates_end);
//// }
// {
// if (0 == top_m_candidates_end) {
// break;
// }
// }
//
//
// uint64_t count_neighbors = 0;
// uint64_t count_inserted = 0;
// std::vector<idi> locs_to_count(M);
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
//
// count_neighbors += out_degree;
// idi num_inserted = 0;
//
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// ++num_inserted;
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
//// {
//// printf("c_i: %u "
//// "count: %u "
//// "loc_inserted: %u\n",
//// c_i,
//// num_inserted,
//// r);
//// }
// if (r < nk) {
// nk = r;
// }
// {
// ++range_count[r / loc_range];
// }
// }
// {
// if (0 == num_inserted) {
// ++zero_inserted_count;
// }
// locs_to_count[c_i] = num_inserted;
// count_inserted += num_inserted;
// }
//// {
//// printf("c_i: %u "
//// "num_inserted: %u\n",
//// c_i,
//// num_inserted);
//// }
// }
//// {
//// for (idi c_i = top_m_candidates_end; c_i < M; ++c_i) {
//// locs_to_count[c_i] = 0;
//// }
//// printf("iter:%u\n", tmp_count);
//// for (idi c_i = 0; c_i < M; ++c_i) {
//// printf("%u %u\n", c_i, locs_to_count[c_i]);
//// }
//// }
//// {//test
//// idi sum = 0;
//// for (const idi ct : range_count) sum += ct;
//// printf("tmp_count: %u "
//// "k: %u "
//// "actual_M: %u %.1f%% "
//// "zero_ins: %u %.1f%% "
//// "1/3: %u %.1f%% "
//// "2/3: %u %.1f%% "
//// "3/3: %u %.1f%%\n",
//// tmp_count,
//// k,
//// top_m_candidates_end, 100.0 * top_m_candidates_end / M,
//// zero_inserted_count, 100.0 * zero_inserted_count / top_m_candidates_end,
//// range_count[0], 100.0 * range_count[0] / sum,
//// range_count[1], 100.0 * range_count[1] / sum,
//// range_count[2], 100.0 * range_count[2] / sum);
//// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {
// printf("query:%uiter: %u "
// "#neighbors: %lu "
// "#inserted: %lu "
// "ratio: %.2f%%\n",
// query_id, tmp_count,
// count_neighbors,
// count_inserted,
// 100.0 * count_inserted / count_neighbors);
// }
//// {// For histogram
////// const auto it_min = std::min_element(set_L.begin(), set_L.end());
////// const auto it_max = std::max_element(set_L.begin(), set_L.end());
////// const distf dist_min = it_min->distance_;
////// const distf dist_max = it_max->distance_;
////// const distf dist_min = it_min->distance_ - 1.0;
////// const distf dist_max = it_max->distance_ + 1.0;
//// const distf dist_range = dist_max_ - dist_min_;
////// const distf dist_range = dist_max - dist_min;
////// {
////// printf("it_min->distance_: %f dist_min: %f\n",
////// it_min->distance_, dist_min);
////// }
////// const distf dist_range = it_max->distance_ - it_min->distance_;
//// printf("iter:%u\n", tmp_count);
//// for (idi i_l = 0; i_l < L; ++i_l) {
////// printf("%f\n", set_L[i_l].distance_);
////// printf("%f\n", (set_L[i_l].distance_ - dist_min) / dist_range * 100.0);
//// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0);
////// printf("%.2f\n", (set_L[i_l].distance_ - it_min->distance_) / dist_range * 100.0);
//// }
//// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
// if (query_id == 3) {
// exit(1);
// }
//}
//
//// Sequential Top-M algorithm for profiling purpose: byte array, CAS, and OpenMP
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_profile_bit_CAS(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// std::vector<uint8_t> is_visited(num_v_, 0); // Byte array
//// boost::dynamic_bitset<> is_visited(num_v_); // Bit array
// BitVector is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
//// is_visited[init_ids[c_i]] = true;
// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = true;
//
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
///// Backup
//inline void Searching::search_with_top_m(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
//
////// DEPRECATED: the is_visited array cannot be shared among threads.
//inline void Searching::search_with_top_m_no_local_arrays(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// boost::dynamic_bitset<> &is_visited)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
inline void Searching::search_with_top_m_in_batch(
const PANNS::idi M,
const PANNS::idi batch_start,
const PANNS::idi batch_size,
const PANNS::idi K,
const PANNS::idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list)
{
std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_));
// Prepare the init_ids
{
//#pragma omp parallel for
for (idi q_i = 0; q_i < batch_size; ++q_i) {
auto &is_visited = is_visited_list[q_i];
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
}
// Initialize set_L_list
{
//#pragma omp parallel for
for (idi q_i = 0; q_i < batch_size; ++q_i) {
const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_;
for (idi i = 0; i < L; i++) {
idi v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L);
}
}
{
std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates
idi joint_queue_end = 0;
boost::dynamic_bitset<> is_in_joint_queue(num_v_);
// std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id].
// std::vector<idi> cands_query_ids_ends(num_v_, 0);
std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M);
std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchecked candidate.
std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted
std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked
std::vector<idi> queries_not_finished(batch_size);
idi queries_not_finished_end = batch_size;
for (idi q_i = 0; q_i < batch_size; ++q_i) {
queries_not_finished[q_i] = q_i;
}
bool is_finished = false;
idi counter_for_debug = 0;
while (!is_finished) {
++counter_for_debug;
// Build the new joint queue
// Traverse every query's queue
for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) {
idi q_local_id = queries_not_finished[q_i];
// last_ks[q_local_id] = L;
auto &set_L = set_L_list[q_local_id];
idi top_m_count = 0;
for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
set_L[c_i].is_checked_ = true;
last_ks[q_local_id] = c_i;
++top_m_count;
idi cand_id = set_L[c_i].id_;
// Record which query selected cand_id
auto tmp_c = cands_query_ids.find(cand_id);
if (tmp_c != cands_query_ids.end()) {
tmp_c->second.push_back(q_local_id);
} else {
cands_query_ids.emplace(cand_id, std::vector<idi>());
cands_query_ids[cand_id].reserve(batch_size);
cands_query_ids[cand_id].push_back(q_local_id);
}
// cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id;
// Add candidate cand_id into the joint queue
if (is_in_joint_queue[cand_id]) {
continue;
}
is_in_joint_queue[cand_id] = true;
joint_queue[joint_queue_end++] = cand_id;
}
}
queries_not_finished_end = 0; // Clear queries_not_finished
// Traverse every shared candidate
for (idi c_i = 0; c_i < joint_queue_end; ++c_i) {
idi cand_id = joint_queue[c_i];
is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
const auto &query_local_ids = cands_query_ids[cand_id];
// Push neighbors to every queue of the queries that selected cand_id.
// Traverse cand_id's neighbors
// idi &q_i_bound = cands_query_ids_ends[cand_id];
// for (idi q_i = 0; q_i < q_i_bound; ++q_i) {
// idi q_local_id = query_local_ids[q_i];
for (idi q_local_id : query_local_ids) {
dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_;
auto &is_visited = is_visited_list[q_local_id];
auto &set_L = set_L_list[q_local_id];
// // Traverse cand_id's neighbors
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate new_cand(nb_id, dist, false);
idi insert_loc = insert_into_queue(set_L, L, new_cand);
if (insert_loc < nks[q_local_id]) {
nks[q_local_id] = insert_loc;
}
}
}
cands_query_ids.erase(cand_id);
// q_i_bound = 0; // Clear cands_query_ids[cand_id]
}
joint_queue_end = 0; // Clear joint_queue
for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) {
if (nks[q_local_id] <= last_ks[q_local_id]) {
ks[q_local_id] = nks[q_local_id];
} else {
ks[q_local_id] = last_ks[q_local_id] + 1;
}
nks[q_local_id] = L;
last_ks[q_local_id] = L;
if (ks[q_local_id] < L) {
queries_not_finished[queries_not_finished_end++] = q_local_id;
}
}
if (!queries_not_finished_end) {
is_finished = true;
}
}
}
{
for (idi q_i = 0; q_i < batch_size; ++q_i) {
for (idi c_i = 0; c_i < K && c_i < L; ++c_i) {
set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_;
}
}
}
////
// {//test
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// printf("query: %u\n", q_i + batch_start);
// for (idi c_i = 0; c_i < K; ++c_i) {
// printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_);
// }
// }
// }
}
//inline void Searching::para_search_with_top_m_critical_area(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_critical_area_no_omp(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_critical_area_yes_omp(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_visited_array(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// uint64_t count_visited = 0;
//
//// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// ++count_visited;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//// ++count_visited;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
//// {
//// printf("query_id: %u "
//// "count_visited: %lu %f%%\n",
//// query_id,
//// count_visited,
//// 100.0 * count_visited / num_v_);
//// }
//}
//
//inline void Searching::para_search_with_top_m_merge_queues(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// {//test
//// printf("query_id: %u\n", query_id);
//// }
//// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_;
// const idi local_queue_length = L;
// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
// std::vector<idi> local_queues_ends(num_threads_, 0);
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// {// text
//// if (query_id == 4 &&
//// tmp_count == 5) {
//// // Print local queues
//// for (int t_i = 0; t_i < num_threads_; ++t_i) {
////// idi start_i = t_i * local_queue_length;
//// for (idi q_i = 0; q_i < local_queues_ends[t_i]; ++q_i) {
//// printf("t[%u][%u]: "
//// "id: %u "
//// "dist: %f\n",
//// t_i, q_i,
//// local_queues_list[t_i][q_i].id_,
//// local_queues_list[t_i][q_i].distance_);
//// }
//// }
//// printf("----------\n");
//// for (idi i = 0; i < L; ++i) {
//// printf("set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
//// printf("----------\n");
//// }
//// }
// // Merge. Merge all queues in parallel.
// {
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_list(
// local_queues_list,
// local_queues_ends,
// set_L,
// L);
// if (r < nk) {
// nk = r;
// }
// } else {
// if (local_queues_ends[0]) {
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[0],
// 0,
// local_queues_ends[0]);
// local_queues_ends[0] = 0;
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
//// {//test
//// if (query_id == 4) {
//// for (idi i = 0; i < L; ++i) {
//// printf("tmp_count: %u "
//// "set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// tmp_count,
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
//// }
////
//// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//// {
//// exit(1);
//// }
//// {//test
////
////// if (query_id == 4) {
//// for (idi i = 0; i < L; ++i) {
//// printf("set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
////// exit(1);
////// }
//// }
//}
//
////// Using local queue and then sequential merge.
//inline void Searching::para_search_with_top_m_queues_seq_merge(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// const idi local_queue_length = ((L - 1) / num_threads_ + 1) * width_;
// const idi local_queue_length = L;
// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
// std::vector<idi> local_queues_ends(num_threads_, 0);
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//// for (idi v_i = 0; v_i < L; ++v_i) {
//// idi v_id = init_ids[v_i];
//// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
//// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {
//// printf("tmp_count: %u "
//// "k: %u\n",
//// tmp_count,
//// k);
//// }
//
//// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
//// idi r;
////#pragma omp critical
//// {
//// r = insert_into_queue(set_L, L, cand);
//// if (r < nk) {
//// nk = r;
//// }
//// }
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
// // Merge
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[tid],
// 0,
// local_queues_ends[tid]);
//// L + 1);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
//
//inline void Searching::para_search_with_top_m_merge_queues_no_CAS(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length,
// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<idi> &local_queues_ends,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//{
////// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_;
//// const idi local_queue_length = L;
//// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
//// std::vector<idi> local_queues_ends(num_threads_, 0);
////// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
//
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// // Merge. Merge all queues in parallel.
//// {
//// if (num_threads_ > 1) {
//// idi r = merge_all_queues_para(
//// local_queues_list,
//// local_queues_ends,
//// set_L,
//// L);
//// if (r < nk) {
//// nk = r;
//// }
//// } else {
//// if (local_queues_ends[0]) {
//// idi r = merge_two_queues_into_1st_queue_seq(
//// set_L,
//// 0,
//// L,
//// local_queues_list[0],
//// 0,
//// local_queues_ends[0]);
//// local_queues_ends[0] = 0;
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// }
// // Merge
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[tid],
// 0,
// local_queues_ends[tid]);
//// L + 1);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
// is_visited.reset();
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//}
//inline void Searching::para_search_with_top_m_merge_queues_in_array(
//inline void Searching::para_search_with_top_m_merge_queues_new_threshold(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited)
//// std::vector<uint8_t> &is_visited)
//// boost::dynamic_bitset<> &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
//// is_visited[init_ids[c_i]] = 1;
// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// idi min_index = L - 1;
// distf min_1st = set_L[min_index].distance_;
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// const idi local_queue_start = tid * local_queue_length;
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// { // Sequential edition
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//// }
//// { // __ATOMIC_SEQ_CST edition
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
//// }
//// {// Acquire and Release edition
//// if (__atomic_load_n(is_visited.data() + nb_id, __ATOMIC_ACQUIRE)) {
//// continue;
//// }
//// __atomic_store_n(is_visited.data() + nb_id, 1, __ATOMIC_RELEASE);
//// }
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
//
// if (dist > min_1st) {
// continue;
// } else if (min_index > 0) {
// // Inserted, so min_1st needs update
// if (dist > set_L[min_index - 1].distance_) {
// min_1st = dist;
// if (min_index < L - 1) {
// ++min_index;
// }
// } else {
// min_1st = set_L[--min_index].distance_;
// }
//// min_1st = set_L[--min_index].distance_;
// }
//
//// if (dist > set_L[L-1].distance_) {
//// continue;
//// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
// // Merge. Merge all queues in parallel.
// {
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_array(
//// local_queues_list,
// local_queues_array,
// local_queues_ends,
// local_queue_length,
// set_L,
// L);
// if (r < nk) {
// nk = r;
// }
// } else {
// if (local_queues_ends[0]) {
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
//// local_queues_list[0],
// local_queues_array,
// 0,
// local_queues_ends[0]);
// local_queues_ends[0] = 0;
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
//// // Merge Sequentially
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_seq_fixed(
//// set_L,
//// 0,
//// L,
////// local_queues_list[tid],
////// 0,
//// local_queues_array,
//// tid * local_queue_length,
//// local_queues_ends[tid]);
////// L + 1);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// is_visited.reset();
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//}
/*
* 5/7/2020-15:14
* Use 1 threads to scale M until the value_M_middle.
* Then use multiple threads.
*/
inline void Searching::para_search_with_top_m_merge_queues_middle_m(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
time_initialization_ -= WallTimer::get_time_mark();
// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
//#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
uint64_t tmp_count_computation = 0;
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(set_L.begin(), set_L.begin() + L);
std::sort(
set_L.begin() + base_set_L,
set_L.begin() + base_set_L + L);
local_queues_ends[num_threads_ - 1] = L;
time_initialization_ += WallTimer::get_time_mark();
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
time_sequential_phase_ -= WallTimer::get_time_mark();
{ // Single thread
while (k < L && M < value_M_middle) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
time_sequential_phase_ += WallTimer::get_time_mark();
time_parallel_phase_ -= WallTimer::get_time_mark();
uint64_t tmp_count_add_to_queue = 0;
double tmp_time_pick_top_m = 0;
double tmp_time_distance_computation = 0;
double tmp_time_add_to_queue = 0.0;
{ // Multiple Threads
while (k < L) {
time_expand_ -= WallTimer::get_time_mark();
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
time_pick_top_m_ -= WallTimer::get_time_mark();
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
time_pick_top_m_ += WallTimer::get_time_mark();
idi nk = L;
// Push M candidates' neighbors into the queue.
#pragma omp parallel for reduction(+ : tmp_count_computation) \
reduction(+ : tmp_count_add_to_queue) \
reduction(+ : tmp_time_pick_top_m) \
reduction(+ : tmp_time_distance_computation) \
reduction(+ : tmp_time_add_to_queue)
// for (int tid = 0; tid < num_threads_; ++tid) {
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
tmp_time_pick_top_m -= WallTimer::get_time_mark();
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
tmp_time_pick_top_m += WallTimer::get_time_mark();
for (idi e_i = 0; e_i < out_degree; ++e_i) {
tmp_time_distance_computation -= WallTimer::get_time_mark();
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
tmp_time_distance_computation += WallTimer::get_time_mark();
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
tmp_time_distance_computation += WallTimer::get_time_mark();
if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
++tmp_count_add_to_queue;
Candidate cand(nb_id, dist, false);
// Add to the local queue.
// tmp_time_pick_top_m -= WallTimer::get_time_mark();
tmp_time_add_to_queue -= WallTimer::get_time_mark();
if (0 != tid) {
// Non-Master threads using local queues
add_into_queue(
set_L,
(tid - 1) * local_queue_length,
local_queues_ends[tid - 1],
local_queue_length,
cand);
} else {
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
tmp_time_add_to_queue += WallTimer::get_time_mark();
// tmp_time_pick_top_m += WallTimer::get_time_mark();
}
}
time_add_to_queue_ += tmp_time_add_to_queue;
tmp_time_add_to_queue = 0;
// }
time_distance_computation_ += tmp_time_distance_computation;
tmp_time_distance_computation = 0;
time_pick_top_m_ += tmp_time_pick_top_m;
tmp_time_pick_top_m = 0;
top_m_candidates_end = 0; // Clear top_m_candidates
count_add_to_queue_ += tmp_count_add_to_queue;
tmp_count_add_to_queue = 0;
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
time_expand_ += WallTimer::get_time_mark();
// // Merge. Merge all queues in parallel.
{
time_merge_ -= WallTimer::get_time_mark();
if (num_threads_ > 1) {
idi r = merge_all_queues_para_array(
set_L,
local_queues_ends,
local_queue_length,
L);
if (r < nk) {
nk = r;
}
}
time_merge_ += WallTimer::get_time_mark();
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
time_parallel_phase_ += WallTimer::get_time_mark();
time_ending_ -= WallTimer::get_time_mark();
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + base_set_L].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
time_ending_ += WallTimer::get_time_mark();
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
inline void Searching::para_search_with_top_m_merge_queues_middle_m_no_merge(
const uint64_t computation_threshold,
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
const idi init_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
uint64_t count_single_query_computation = 0;
uint64_t count_init_computation = 0;
uint64_t count_seq_computation = 0;
uint64_t count_par_computation = 0;
// {//test
// printf("query_id: %u\n", query_id);
// }
// time_initialization_ -= WallTimer::get_time_mark();
// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
{
#pragma omp parallel for
for (idi c_i = 0; c_i < init_size; ++c_i) {
// for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
// is_visited.atomic_set_bit(init_ids[c_i]);
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < init_size; ++v_i) {
// for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
uint64_t tmp_count_computation = 0;
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < init_size; i++) {
// for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
count_init_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(set_L.begin(), set_L.begin() + L);
std::sort(
set_L.begin() + base_set_L,
set_L.begin() + base_set_L + init_size);
// set_L.begin() + base_set_L + L);
local_queues_ends[num_threads_ - 1] = init_size;
// local_queues_ends[num_threads_ - 1] = L;
// time_initialization_ += WallTimer::get_time_mark();
// time_sequential_phase_ -= WallTimer::get_time_mark();
// std::vector<idi> top_m_candidates(M);
idi &global_queue_size = local_queues_ends[num_threads_ - 1];
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
{ // Single thread
while (k < L && M < value_M_middle && count_single_query_computation <= computation_threshold) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// int real_threads = std::min(static_cast<int>(M), num_threads_);
// idi queue_base = num_threads_ - real_threads;
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) {
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) {
// if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
global_queue_size,
// local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
count_seq_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// {// Local queues' ends
// printf("query%u:iter: %u", query_id, tmp_count);
// for (int i_t = 0; i_t < num_threads_; ++i_t) {
// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
// }
// printf("\n");
// }
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
// time_sequential_phase_ += WallTimer::get_time_mark();
// time_parallel_phase_ -= WallTimer::get_time_mark();
{ // Multiple Threads
while (k < L and count_single_query_computation <= computation_threshold) {
// while (k < L) {
++tmp_count;
// {//test
// printf("tmp_count: %d "
// "k: %u "
// "global_queue_size: %u\n",
// tmp_count,
// k,
// global_queue_size);
// }
// int real_threads = std::min(static_cast<int>(M), num_threads_);
// idi queue_base = num_threads_ - real_threads;
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) {
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) {
// if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Add to the local queue.
if (0 != tid) {
// Non-Master threads using local queues
add_into_queue(
set_L,
(tid - 1) * local_queue_length,
local_queues_ends[tid - 1],
local_queue_length,
cand);
} else {
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
global_queue_size,
// local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
count_par_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// {// Local queues' ends
// printf("query%u:iter: %u", query_id, tmp_count);
// for (int i_t = 0; i_t < num_threads_; ++i_t) {
// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
// }
// printf("\n");
// }
// Merge. Merge all queues in parallel.
{
if (num_threads_ > 1) {
// idi r = merge_all_queues_queue_base(
// set_L,
// local_queues_ends,
// queue_base,
// real_threads,
// local_queue_length,
// L);
idi r = merge_all_queues_para_array(
set_L,
local_queues_ends,
local_queue_length,
L);
if (r < nk) {
nk = r;
}
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
// {// Print relative distance
//// distf top_dist = set_L[base_set_L].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l + base_set_L].distance_);
//// tmp_count, set_L[i_l + base_set_L].distance_ - top_dist);
// }
// }
}
}
// time_parallel_phase_ += WallTimer::get_time_mark();
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + base_set_L].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
// {//test
// printf("count_single: %lu "
// "ct_init: %lu "
// "ct_seq: %lu "
// "ct_par: %lu\n",
// count_single_query_computation,
// count_init_computation,
// count_seq_computation,
// count_par_computation);
// }
}
///*
// * 6/15/2020-14:40
// * Queues merging together to the global queue
// */
//inline void Searching::para_search_with_top_m_merge_queues_sequential_merge(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited)
//{
//// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
// local_queues_ends[num_threads_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// { // Single thread
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// { // Multiple Threads
// while (k < L) {
// ++tmp_count;
//// {//test
//// if (num_threads_ == 2) {
//// printf("tmp_count: %d "
//// "k: %u\n",
//// tmp_count,
//// k);
//// }
//// }
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
//// // Merge. Merge all queues in parallel.
// {
//// {//test
//// for (idi q_i = 0; q_i < num_threads_; ++q_i) {
//// if (0 == local_queues_ends[q_i]) {
//// continue;
//// }
//// for (idi e_i = 0; e_i < local_queues_ends[q_i]; ++e_i) {
//// printf("tmp_count: %u "
//// "q_i: %u "
//// "[%u]: (%u, %f)\n",
//// tmp_count,
//// q_i,
//// e_i, set_L[q_i * local_queue_length + e_i].id_, set_L[q_i * local_queue_length + e_i].distance_);
//// }
//// }
//// }
//// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_ > 1) {
// idi r = merge_all_queues_all_together_in_sequential(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
//// idi r = merge_all_queues_para_array(
//// set_L,
//// local_queues_ends,
//// local_queue_length,
//// L);
// if (r < nk) {
// nk = r;
// }
//// {//test
//// printf("tmp_count: %u "
//// "r: %u "
//// "last_k: %u\n",
//// tmp_count,
//// r,
//// last_k);
//// for (idi l_i = 0; l_i < L; ++l_i) {
//// printf("tmp_count: %u "
//// "[%u]: (%u, %f)\n",
//// tmp_count,
//// l_i, set_L[l_i + base_set_L].id_, set_L[l_i + base_set_L].distance_);
//// }
//// }
// }
//
//// time_merge_ += WallTimer::get_time_mark();
// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// if (0 == query_id) {
//// exit(1);
//// }
//// }
//}
///*
// * 6/19/2020:
// * Intra-query + Inter-query
// */
//inline void Searching::para_search_with_top_m_nested_para(
// const idi batch_start,
// const idi batch_size,
// const idi value_M_middle,
// const idi value_M_max,
// const idi K,
// const idi L,
// std::vector< std::vector<Candidate> > &set_L_list,
// const std::vector<idi> &init_ids,
// std::vector< std::vector<idi> > &set_K_list,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_intra_query_ - 1) * local_queue_length;
// std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue
// std::vector< std::vector<idi> > &top_m_candidates_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list)
//{
// {// Initialize is_visited flag array
//#pragma omp parallel for num_threads(num_threads_inter_query_)
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// auto &is_visited = is_visited_list[q_i];
//#pragma omp parallel for num_threads(num_threads_intra_query_)
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
// }
//
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
//
// uint64_t tmp_count_total_computation = 0;
//#pragma omp parallel for num_threads(num_threads_inter_query_) reduction(+ : tmp_count_total_computation)
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// idi query_id = batch_start + q_i;
// auto &set_L = set_L_list[q_i];
// auto &local_queues_ends = local_queues_ends_list[q_i];
// auto &is_visited = is_visited_list[q_i];
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
////#pragma omp parallel for
//// for (idi v_i = 0; v_i < L; ++v_i) {
//// idi v_id = init_ids[v_i];
//// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
//// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_query_)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
//// count_distance_computation_ += tmp_count_computation;
// tmp_count_total_computation += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
// local_queues_ends[num_threads_intra_query_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// auto &top_m_candidates = top_m_candidates_list[q_i];
// { // Single thread
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
//// {//test
//// if (391655 == nb_id) {
//// printf("tmp_count: %u "
//// "nb_id: %u "
//// "distf: %f\n",
//// tmp_count,
//// nb_id,
//// dist);
//// }
//// }
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_intra_query_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//// count_distance_computation_ += tmp_count_computation;
// tmp_count_total_computation += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// { // Multiple Threads
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_query_)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
//// {//test
//// if (391655 == nb_id) {
//// printf("tmp_count: %u "
//// "nb_id: %u "
//// "distf: %f\n",
//// tmp_count,
//// nb_id,
//// dist);
//// }
//// }
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_intra_query_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//// count_distance_computation_ += tmp_count_computation;
// tmp_count_total_computation += tmp_count_computation;
// tmp_count_computation = 0;
//
//// // Merge. Merge all queues in parallel.
// {
//// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_intra_query_ > 1) {
// idi r = merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
// if (r < nk) {
// nk = r;
// }
// }
//// time_merge_ += WallTimer::get_time_mark();
// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
// count_distance_computation_ += tmp_count_total_computation;
// tmp_count_total_computation = 0;
//
// auto &set_K = set_K_list[query_id];
//
//#pragma omp parallel for num_threads(num_threads_intra_query_)
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
// }
//
//// {//test
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//// {
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: (%u %f)\n",
//// k_i, set_L_list[0][k_i].id_, set_L_list[0][k_i].distance_);
//// }
//// if (0 == batch_start) {
//// exit(1);
//// }
//// }
//}
/*
* 6/22/2020-21:30
* Do searching on the local_set_L
* local_set_L is already sorted
* is_visited is already set up.
*/
inline void Searching::subsearch_with_top_m(
const idi value_M_max,
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &local_top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation)
{
const dataf *query_data = queries_load_ + query_id * dimension_;
// idi local_top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi iter = 0;
idi M = 1; // value of M
while (k < local_L) {
++iter;
subsearch_top_m_for_one_iteration(
iter,
k,
M,
query_id,
query_data,
local_L,
set_L,
set_L_start,
set_L_size,
local_top_m_candidates,
is_visited,
local_count_distance_computation);
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
// {//test
// printf("set_L_start: %u "
// "local_count_distance_computation: %lu\n",
// set_L_start,
// local_count_distance_computation);
// }
}
//// Backup
//inline void Searching::subsearch_with_top_m(
// const idi value_M_max,
// const idi query_id,
// const idi local_L,
// std::vector<Candidate> &set_L,
// const idi base_set_L,
// idi &set_L_end,
// std::vector<idi> &local_top_m_candidates,
// boost::dynamic_bitset<> &is_visited,
// uint64_t &local_count_distance_computation)
//{
// const dataf *query_data = queries_load_ + query_id * dimension_;
// idi local_top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi iter = 0;
// idi M = 1; // value of M
//
// while (k < local_L) {
// ++iter;
// // Select M candidates
// idi last_k = local_L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < set_L_end && local_top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = local_L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < local_top_m_candidates_end; ++c_i) {
// idi cand_id = local_top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++local_count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[set_L_end - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// set_L_end,
// local_L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// local_top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
//}
/*
* 7/6/2020-23:17
* Subsearch only 1 iteration using top-m
*/
inline void Searching::subsearch_top_m_for_one_iteration(
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation)
{
// Select M candidates
idi top_m_candidates_end = 0;
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) {
idi index_set_L = c_i + set_L_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// {//test
// M_ids_.push_back(set_L[index_set_L].id_);
// }
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[set_L_size - 1 + set_L_start].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = add_into_queue(
set_L,
set_L_start,
set_L_size,
L,
cand);
if (r < nk) {
nk = r;
}
}
}
// top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k_uc = nk;
} else {
k_uc = last_k + 1;
}
// {//test
// for (idi l_i = 0; l_i < set_L_size; ++l_i) {
// L_ids_.push_back(set_L[set_L_start + l_i].id_);
// }
// std::sort(L_ids_.begin(), L_ids_.end());
// std::sort(M_ids_.begin(), M_ids_.end());
// for (idi m_i = 0; m_i < M_ids_.size(); ++m_i) {
// printf("query_id: %u "
// "iter: %u "
// "M[%u]: "
// "%u\n",
// query_id,
// iter,
// m_i,
// M_ids_[m_i]);
// }
// M_ids_.clear();
// for (idi l_i = 0; l_i < L_ids_.size(); ++l_i) {
// printf("query_id: %u "
// "iter: %u "
// "L[%u]: "
// "%u\n",
// query_id,
// iter,
// l_i,
// L_ids_[l_i]);
// }
// L_ids_.clear();
// }
}
/*
* One more parameter for distance bound
*/
inline void Searching::subsearch_top_m_for_one_iteration_lth(
const distf bound_lth,
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation)
{
// Select M candidates
idi top_m_candidates_end = 0;
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) {
idi index_set_L = c_i + set_L_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > bound_lth) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = add_into_queue(
set_L,
set_L_start,
set_L_size,
L,
cand);
if (r < nk) {
nk = r;
}
}
}
if (nk <= last_k) {
k_uc = nk;
} else {
k_uc = last_k + 1;
}
}
/*
* 7/24/2020-10:53
* Subsearch for one iteration, with the global L-th value as the bound,
* and the top_m_position indicates the bound for local top-M vertices.
*/
inline void Searching::subsearch_top_m_for_one_iteration_lth_mth(
const distf bound_lth,
// const idi top_m_position,
const idi iter,
idi &k_uc,
const idi local_m_count,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation,
double &time_pick_top_m,
uint64_t &count_add_to_queue,
double &time_distance_computation,
double &time_add_to_queue)
{
// {//test
// printf("query_id: %u "
// "iter: %u "
// "tid: %u \n",
// query_id,
// iter,
// omp_get_thread_num());
// }
// Select M candidates
idi top_m_candidates_end = 0;
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k_uc; c_i < top_m_position; ++c_i) {
time_pick_top_m -= WallTimer::get_time_mark();
for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < local_m_count; ++c_i) {
idi index_set_L = c_i + set_L_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// {//test
// M_ids_.push_back(set_L[index_set_L].id_);
// }
}
time_pick_top_m += WallTimer::get_time_mark();
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
time_pick_top_m -= WallTimer::get_time_mark();
idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
time_pick_top_m += WallTimer::get_time_mark();
for (idi e_i = 0; e_i < out_degree; ++e_i) {
time_distance_computation -= WallTimer::get_time_mark();
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
time_distance_computation += WallTimer::get_time_mark();
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
time_distance_computation += WallTimer::get_time_mark();
if (dist > set_L[set_L_start + set_L_size - 1].distance_) {
// if (dist > bound_lth) {
continue;
}
++count_add_to_queue;
Candidate cand(nb_id, dist, false);
// time_pick_top_m -= WallTimer::get_time_mark();
time_add_to_queue -= WallTimer::get_time_mark();
idi r = add_into_queue(
set_L,
set_L_start,
set_L_size,
L,
cand);
if (r < nk) {
nk = r;
}
time_add_to_queue += WallTimer::get_time_mark();
// time_pick_top_m += WallTimer::get_time_mark();
}
}
if (nk <= last_k) {
k_uc = nk;
} else {
k_uc = last_k + 1;
}
}
/*
* 6/23/2020-13:37
* Is is good to use subsearch by every thread it self?
*/
inline void Searching::para_search_with_top_m_subsearch_v0(
// const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector< std::vector<idi> > &top_m_candidates_list,
boost::dynamic_bitset<> &is_visited)
{
uint64_t tmp_count_computation = 0;
{// Initialization
// is_visited flag array
//#pragma omp parallel for
// Cannot use OMP for bit array is_visited!
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// {//test
// printf("q_i: %u "
// "init_ids[%u]: "
// "count: %u "
// "id: %u "
// "dist: %f \n",
// query_id,
// i,
// tmp_count_computation,
// v_id,
// dist);
// }
}
// {//test
// printf("Initialization tmp_count_computation: %lu\n",
// tmp_count_computation);
// }
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(
// set_L.begin(),
// set_L.begin() + L);
}
idi queue_end = L;
// Searching
if (num_threads_ == 1) { // Single threads
std::sort(
set_L.begin(),
set_L.end());
subsearch_with_top_m(
value_M_max,
query_id,
L,
set_L,
0,
queue_end,
top_m_candidates_list[0],
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// {
// idi half_length = queue_end / 2;
// std::sort(
// set_L.begin(),
// set_L.begin() + half_length);
//
// subsearch_with_top_m(
// value_M_max,
// query_id,
// half_length,
// set_L,
// 0,
// half_length,
// top_m_candidates_list[0],
// is_visited,
// tmp_count_computation);
//
// std::sort(
// set_L.begin() + half_length,
// set_L.end());
// subsearch_with_top_m(
// value_M_max,
// query_id,
// half_length,
// set_L,
// half_length,
// half_length,
// top_m_candidates_list[0],
// is_visited,
// tmp_count_computation);
// count_distance_computation_ += tmp_count_computation;
//
// std::vector <Candidate> tmp_set_L(L);
// std::merge(set_L.begin(), set_L.begin() + half_length,
// set_L.begin() + half_length, set_L.end(),
// tmp_set_L.begin());
// std::copy(tmp_set_L.begin(), tmp_set_L.end(), set_L.begin());
// }
// {//test
// printf("q_i: %u "
// "count_distance_computation_: %lu\n",
// query_id,
// count_distance_computation_);
// }
} else { // Multiple threads
const idi num_queues = num_threads_;
const idi local_queue_length = (L - 1) / num_queues + 1;
// Parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi local_queue_base = q_i * local_queue_length;
if (local_queue_base >= L) {
continue;
}
idi local_queue_end = local_queue_length;
if (local_queue_base + local_queue_end > L) {
local_queue_end = L - local_queue_base;
}
std::sort(
set_L.begin() + local_queue_base,
set_L.begin() + local_queue_base + local_queue_end);
subsearch_with_top_m(
// 1,
value_M_max / num_queues, // value_M_max
// local_queue_end, // value_M_max
query_id,
local_queue_end, // local_L
set_L,
local_queue_base, // base_set_L
local_queue_end, // set_L_end
top_m_candidates_list[q_i],
is_visited,
tmp_count_computation);
// {//test
// printf("q_i: %u "
// "tmp_count_computation: %lu\n",
// q_i,
// tmp_count_computation);
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// }
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// {//test
// printf("query_id: %u "
// "count_distance_computation_: %lu\n",
// query_id,
// count_distance_computation_);
// }
// Merge
// time_merge_ -= WallTimer::get_time_mark();
merge_in_set_L(
set_L,
L,
num_queues,
local_queue_length);
// time_merge_ += WallTimer::get_time_mark();
// {//test
// printf("q_i: %u "
// "count_distance_computation_: %lu\n",
// query_id,
// count_distance_computation_);
// }
}
{// Return the results to set_K
// How to deal with duplicate?
idi last_id = set_L[0].id_;
set_K[0] = last_id;
idi k_i = 1;
idi l_i = 1;
while (k_i < K && l_i < L) {
if (last_id == set_L[l_i].id_) {
++l_i;
continue;
}
last_id = set_L[l_i++].id_;
set_K[k_i++] = last_id;
}
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
}
// {
// for (idi k_i = 0; k_i < L; ++k_i) {
// printf("q_i: %u "
// "k_i: %u "
// "id: %u "
// "dist: %f\n",
// query_id,
// k_i,
// set_L[k_i].id_,
// set_L[k_i].distance_);
// }
// }
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
}
// {//test
// if (0 == query_id) {
// exit(1);
// }
// }
}
/*
* 7/5/2020-18:38
* L-th Selection, and every thread does its own searching
*/
inline void Searching::para_search_with_top_m_subsearch_v1(
const idi local_M_max,
const idi query_id,
const idi K,
const idi global_L,
const idi local_L,
const idi total_L,
const idi init_queue_end,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const std::vector<idi> &local_queues_bases,
std::vector<idi> &local_queues_ends,
std::vector< std::vector<idi> > &top_m_candidates_list,
boost::dynamic_bitset<> &is_visited)
{
uint64_t tmp_count_computation = 0;
{// Initialization
// is_visited flag array
//#pragma omp parallel for
// Cannot use OMP for bit array is_visited!
for (idi c_i = 0; c_i < total_L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < total_L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (int q_i = 0; q_i < num_threads_; ++q_i) {
idi local_queue_base = local_queues_bases[q_i];
idi init_ids_base = q_i * init_queue_end;
idi init_ids_bound = init_ids_base + init_queue_end;
for (idi id_i = init_ids_base; id_i < init_ids_bound; ++id_i) {
idi v_id = init_ids[id_i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[local_queue_base++] = Candidate(v_id, dist, false); // False means not checked.
}
local_queues_ends[q_i] = init_queue_end;
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < total_L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
}
// Searching
if (num_threads_ == 1) { // Single threads
// local_queues_lengths[0] = local_L;
std::sort(
set_L.begin(),
set_L.end());
subsearch_with_top_m(
local_M_max,
query_id,
local_L,
set_L,
0,
local_queues_ends[0],
top_m_candidates_list[0],
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
} else { // Multiple threads
// std::fill(local_queues_lengths.begin(), local_queues_lengths.end(), local_L);
const dataf *query_data = queries_load_ + query_id * dimension_;
const idi num_queues = num_threads_;
#pragma omp parallel for
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi local_queue_base = local_queues_bases[q_i];
std::sort(
set_L.begin() + local_queue_base,
set_L.begin() + local_queue_base + init_queue_end);
}
distf bound_lth = FLT_MAX;
for (idi q_i = 0; q_i < num_queues; ++q_i) {
bound_lth = std::min(bound_lth, set_L[local_queues_bases[q_i] + init_queue_end - 1].distance_);
}
// const idi local_queue_length = (L - 1) / num_queues + 1;
std::vector<idi> ks(num_queues, 0);
idi iter = 0;
idi local_M = 1;
uint8_t not_finished = 1;
while (not_finished) {
not_finished = 0;
++iter;
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi &k = ks[q_i];
idi &local_queue_end = local_queues_ends[q_i];
auto &local_top_m_candidates = top_m_candidates_list[q_i];
if (k >= local_queue_end) {
continue;
}
not_finished = 1;
const idi local_queue_base = local_queues_bases[q_i];
subsearch_top_m_for_one_iteration_lth(
bound_lth,
iter,
k,
local_M,
query_id,
query_data,
local_L,
set_L,
local_queue_base,
local_queue_end,
local_top_m_candidates,
is_visited,
tmp_count_computation);
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
{// Setecting and update local_queues_lengths
// time_merge_ -= WallTimer::get_time_mark();
bound_lth = selecting_top_L_seq(
set_L,
global_L,
// local_L,
num_queues,
local_queues_bases,
local_queues_ends);
// time_merge_ += WallTimer::get_time_mark();
// {// local_queues_ends
// printf("query_id: %u "
// "iter: %u",
//// "local_queues_ends:",
// query_id,
// iter);
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// printf(" [%u]: %u",
// q_i,
// local_queues_ends[q_i]);
// }
// printf("\n");
// }
}
{// Scale M
if (local_M < local_M_max) {
local_M <<= 1;
}
// else {
// local_M = value_M_max;
// }
}
}
}
// time_merge_ -= WallTimer::get_time_mark();
{// Return the results to set_K
std::vector<idi> pointer(num_threads_, 0);
// get the first
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_sub;
idi last_id;
for (int q_i = 0; q_i < num_threads_; ++q_i) {
if (pointer[q_i] >= local_queues_ends[q_i]) {
continue;
}
idi sub = pointer[q_i] + local_queues_bases[q_i];
distf tmp_dist = set_L[sub].distance_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_q_i = q_i;
min_sub = sub;
}
}
set_K[0] = set_L[min_sub].id_;
++pointer[min_q_i];
last_id = set_K[0];
bool is_finished = false;
idi k_i = 1;
while (k_i < K && !is_finished) {
is_finished = true;
min_dist = FLT_MAX;
for (int q_i = 0; q_i < num_threads_; ++q_i) {
const idi local_queue_end = local_queues_ends[q_i];
if (pointer[q_i] >= local_queue_end) {
continue;
}
is_finished = false;
idi sub = pointer[q_i] + local_queues_bases[q_i];
while (set_L[sub].id_ == last_id
&& pointer[q_i] < local_queue_end) {
++pointer[q_i];
sub = pointer[q_i] + local_queues_bases[q_i];
}
if (pointer[q_i] >= local_queue_end) {
continue;
}
// if (set_L[sub].id_ == last_id) {
// // Duplicate
// ++pointer[q_i];
// continue;
// }
distf tmp_dist = set_L[sub].distance_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_q_i = q_i;
min_sub = sub;
}
}
set_K[k_i] = set_L[min_sub].id_;
++pointer[min_q_i];
++k_i;
}
}
// time_merge_ += WallTimer::get_time_mark();
// {// Return the results to set_K
// // How to deal with duplicate?
// idi last_id = set_L[0].id_;
// set_K[0] = last_id;
// idi k_i = 1;
// idi l_i = 1;
// while (k_i < K && l_i < L) {
// if (last_id == set_L[l_i].id_) {
// ++l_i;
// continue;
// }
// last_id = set_L[l_i++].id_;
// set_K[k_i++] = last_id;
// }
// }
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), init_queue_end);
}
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
//// Backup
//inline void Searching::para_search_with_top_m_subsearch_v1(
// const idi local_M_max,
// const idi query_id,
// const idi K,
// const idi total_L,
// const idi local_L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const std::vector<idi> &local_queues_bases,
// std::vector<idi> &local_queues_lengths,
// std::vector< std::vector<idi> > &top_m_candidates_list,
// boost::dynamic_bitset<> &is_visited)
//{
// uint64_t tmp_count_computation = 0;
// {// Initialization
// // is_visited flag array
////#pragma omp parallel for
//// Cannot use OMP for bit array is_visited!
// for (idi c_i = 0; c_i < total_L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < total_L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
//
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < total_L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(
//// set_L.begin(),
//// set_L.begin() + L);
// }
//// idi queue_end = L;
//
// // Searching
// if (num_threads_ == 1) { // Single threads
//// local_queues_lengths[0] = local_L;
// std::sort(
// set_L.begin(),
// set_L.end());
// subsearch_with_top_m(
// local_M_max,
// query_id,
// local_L,
// set_L,
// 0,
// local_queues_lengths[0],
// top_m_candidates_list[0],
// is_visited,
// tmp_count_computation);
// count_distance_computation_ += tmp_count_computation;
// } else { // Multiple threads
//// std::fill(local_queues_lengths.begin(), local_queues_lengths.end(), local_L);
// const dataf *query_data = queries_load_ + query_id * dimension_;
// const idi num_queues = num_threads_;
//#pragma omp parallel for
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// idi local_queue_base = local_queues_bases[q_i];
// std::sort(
// set_L.begin() + local_queue_base,
// set_L.begin() + local_queue_base + local_L);
// }
//// const idi local_queue_length = (L - 1) / num_queues + 1;
// std::vector<idi> ks(num_queues, 0);
// idi iter = 0;
// idi local_M = 1;
// uint8_t not_finished = 1;
// while (not_finished) {
// not_finished = 0;
// ++iter;
//
////#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// idi &k = ks[q_i];
// idi &local_queue_end = local_queues_lengths[q_i];
// auto &local_top_m_candidates = top_m_candidates_list[q_i];
// if (k >= local_queue_end) {
// continue;
// }
// not_finished = 1;
//// ++not_finished;
// const idi local_queue_base = local_queues_bases[q_i];
//
// // Select top-M unchecked vertices.
// idi last_k = local_L;
// idi local_top_m_candidates_end = 0;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < local_queue_end && local_top_m_candidates_end < local_M; ++c_i) {
// idi index_set_L = c_i + local_queue_base;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = local_L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < local_top_m_candidates_end; ++c_i) {
// idi cand_id = local_top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[local_queue_end - 1 + local_queue_end].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// local_queue_base,
// local_queue_end,
// local_L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// local_top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// {// Setecting and update local_queues_lengths
// time_merge_ -= WallTimer::get_time_mark();
// selecting_top_L_seq(
// set_L,
// total_L,
//// local_L,
// num_queues,
// local_queues_bases,
// local_queues_lengths);
//// merge_in_set_L(
//// set_L,
//// L,
//// num_queues,
//// local_queue_length);
// time_merge_ += WallTimer::get_time_mark();
// }
// {// Scale M
// if (local_M < local_M_max) {
// local_M <<= 1;
// }
//// else {
//// local_M = value_M_max;
//// }
// }
// }
// }
//
// {// Return the results to set_K
// std::vector<idi> pointer(num_threads_, 0);
// // get the first
// distf min_dist = FLT_MAX;
// idi min_q_i;
// idi min_sub;
// idi last_id;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// {//test
// if (local_queues_lengths[q_i] != local_L) {
// printf("What? local_queues_lengths[%u]: %u != local_L: %u\n",
// q_i, local_queues_lengths[q_i], local_L);
// }
// }
// if (pointer[q_i] >= local_queues_lengths[q_i]) {
// continue;
// }
// idi sub = pointer[q_i] + local_queues_bases[q_i];
// distf tmp_dist = set_L[sub].distance_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[0] = set_L[min_sub].id_;
// ++pointer[min_q_i];
// last_id = set_K[0];
//
// bool is_finished = false;
// idi k_i = 1;
// while (k_i < K && !is_finished) {
// is_finished = true;
// min_dist = FLT_MAX;
//// distf min_dist = FLT_MAX;
//// idi min_sub;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// if (pointer[q_i] >= local_queues_lengths[q_i]) {
// continue;
// }
// is_finished = false;
// idi sub = pointer[q_i] + local_queues_bases[q_i];
// if (set_L[sub].id_ == last_id) {
// // Duplicate
// ++pointer[q_i];
// continue;
// }
// distf tmp_dist = set_L[sub].distance_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[k_i] = set_L[min_sub].id_;
// ++pointer[min_q_i];
// ++k_i;
// }
// }
//// {// Return the results to set_K
//// // How to deal with duplicate?
//// idi last_id = set_L[0].id_;
//// set_K[0] = last_id;
//// idi k_i = 1;
//// idi l_i = 1;
//// while (k_i < K && l_i < L) {
//// if (last_id == set_L[l_i].id_) {
//// ++l_i;
//// continue;
//// }
//// last_id = set_L[l_i++].id_;
//// set_K[k_i++] = last_id;
//// }
//// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_lengths.begin(), local_queues_lengths.end(), local_L);
// }
//
//// {//test
//// if (1000 == query_id) {
//// exit(1);
//// }
//// }
//}
/*
* 7/24/2020-8:57
* L-th Selection.
* And also M-th Selection. Then the computation should be the same as merging.
*/
//inline void Searching::para_search_with_top_m_subsearch_v2(
// const idi local_M_max,
// const idi query_id,
// const idi K,
// const idi global_L,
// const idi local_L,
// const idi total_L,
// const idi init_queue_size,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const std::vector<idi> &local_queues_starts,
// std::vector<idi> &local_queues_sizes,
// std::vector<idi> &local_m_counts,
//// std::vector<idi> &local_top_m_positions,
// std::vector< std::vector<idi> > &top_m_candidates_list,
// boost::dynamic_bitset<> &is_visited)
//{
// uint64_t tmp_count_computation = 0;
// {// Initialization
// // is_visited flag array
////#pragma omp parallel for
//// Cannot use OMP for bit array is_visited!
// for (idi c_i = 0; c_i < total_L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < total_L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
//
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// idi local_queue_base = local_queues_starts[q_i];
// idi init_ids_base = q_i * init_queue_size;
// idi init_ids_bound = init_ids_base + init_queue_size;
// for (idi id_i = init_ids_base; id_i < init_ids_bound; ++id_i) {
// idi v_id = init_ids[id_i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[local_queue_base++] = Candidate(v_id, dist, false); // False means not checked.
//// {//test
//// if (11 == query_id
//// && 400620 == v_id) {
//// printf("query_id: %u "
//// "(%u %f)\n",
//// query_id,
//// v_id, dist);
//// }
//// }
// }
// local_queues_sizes[q_i] = init_queue_size;
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// }
//
// // Searching
// if (num_threads_ == 1) { // Single threads
//// local_queues_lengths[0] = local_L;
// std::sort(
// set_L.begin(),
// set_L.end());
// subsearch_with_top_m(
// local_M_max,
// query_id,
// local_L,
// set_L,
// 0,
// local_queues_sizes[0],
// top_m_candidates_list[0],
// is_visited,
// tmp_count_computation);
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// } else { // Multiple threads
// const dataf *query_data = queries_load_ + query_id * dimension_;
// const idi num_queues = num_threads_;
//#pragma omp parallel for
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// idi local_queue_base = local_queues_starts[q_i];
// std::sort(
// set_L.begin() + local_queue_base,
// set_L.begin() + local_queue_base + init_queue_size);
// }
// idi local_M = 1;
// idi iter = 0;
// std::vector<idi> ks(num_queues, 0);
// distf bound_lth = FLT_MAX;
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// bound_lth = std::min(bound_lth, set_L[local_queues_starts[q_i] + init_queue_size - 1].distance_);
// }
// selecting_unchecked_top_M_seq(
// query_id,
// iter,
// set_L,
// ks,
// local_M,
// num_queues,
// local_queues_starts,
// local_queues_sizes,
// local_m_counts);
//// {//test
//// printf("query_id: %u "
//// "iter: %u",
//// query_id,
//// iter);
//// printf(" local_queues_sizes:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", local_queues_sizes[i]);
//// }
//// printf(" local_m_counts:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", local_m_counts[i]);
//// }
//// printf(" ks:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", ks[i]);
//// }
//// printf("\n");
//// }
//
//
// double tmp_time_pick_top_m = 0;
// uint8_t not_finished = 1;
// while (true) {
// not_finished = 0;
// ++iter;
// // TODO: openmp
////#pragma omp parallel for reduction(+ : tmp_count_computation) reduction(+ : tmp_time_pick_top_m)
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// idi &k = ks[q_i];
// idi &local_queue_size = local_queues_sizes[q_i];
// auto &local_top_m_candidates = top_m_candidates_list[q_i];
//// if (k >= local_queue_size) {
//// continue;
//// }
//// not_finished = 1;
// idi local_m_count = local_m_counts[q_i];
//// // Cannot do this. local_m_count being 0 does not mean this worker is finished.
// if (local_M < num_queues && !local_m_count) {
// local_m_count = 1;
// }
// if (!local_m_count) {
//// k = local_L;
// continue;
// }
// not_finished = 1;
//// if (local_M < static_cast<idi>(num_threads_) &&
//// 0 == local_m_count) {
//// local_m_count = 1;
//// }
// const idi local_queue_start = local_queues_starts[q_i];
//
// subsearch_top_m_for_one_iteration_lth_mth(
// bound_lth,
//// local_top_m_pos,
// iter,
// k,
// local_m_count,
// query_id,
// query_data,
// local_L,
// set_L,
// local_queue_start,
// local_queue_size,
// local_top_m_candidates,
// is_visited,
// tmp_count_computation,
// tmp_time_pick_top_m);
// }
// time_pick_top_m_ += tmp_time_pick_top_m;
// tmp_time_pick_top_m = 0;
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// if (!not_finished) {
// break;
// }
// {// Setecting and update local_queues_lengths
//// {//test
//// printf("query_id: %u "
//// "iter: %u "
//// "before:local_queues_sizes: (%u, %u)\n",
//// query_id,
//// iter,
//// local_queues_sizes[0], local_queues_sizes[1]);
//// }
//// time_select_L_ -= WallTimer::get_time_mark();
// bound_lth = selecting_top_L_seq(
// set_L,
// global_L,
//// local_L,
// num_queues,
// local_queues_starts,
// local_queues_sizes);
//// time_select_L_ += WallTimer::get_time_mark();
//// {//test
//// for (idi t_i = 0; t_i < num_queues; ++t_i) {
//// idi sub_start = local_queues_starts[t_i];
//// idi sub_bound = sub_start + local_queues_sizes[t_i];
//// for (idi e_i = sub_start; e_i < sub_bound; ++e_i) {
//// L_ids_.push_back(set_L[e_i].id_);
//// }
//// }
//// std::sort(L_ids_.begin(), L_ids_.end());
//// std::sort(M_ids_.begin(), M_ids_.end());
//// for (idi m_i = 0; m_i < M_ids_.size(); ++m_i) {
//// printf("query_id: %u "
//// "iter: %u "
//// "M[%u]: "
//// "%u\n",
//// query_id,
//// iter,
//// m_i,
//// M_ids_[m_i]);
//// }
//// M_ids_.clear();
//// for (idi l_i = 0; l_i < L_ids_.size(); ++l_i) {
//// printf("query_id: %u "
//// "iter: %u "
//// "L[%u]: "
//// "%u\n",
//// query_id,
//// iter,
//// l_i,
//// L_ids_[l_i]);
//// }
//// L_ids_.clear();
//// }
//// {//test
//// printf("query_id: %u "
//// "iter: %u "
//// "after:local_queues_sizes: (%u, %u)\n",
//// query_id,
//// iter,
//// local_queues_sizes[0], local_queues_sizes[1]);
//// }
// }
// {// Scale M
// if (local_M < local_M_max) {
// local_M <<= 1;
// }
//// else {
//// local_M = value_M_max;
//// }
// }
// {
//// {//test
//// printf("query_id: %u "
//// "iter: %u "
//// "selecting_M\n",
//// query_id,
//// iter);
//// }
//// {//test
//// if (0 == query_id
//// && 10 == iter) {
//// printf("test.\n");
//// }
//// }
//// time_select_M_ -= WallTimer::get_time_mark();
// selecting_unchecked_top_M_seq(
// query_id,
// iter,
// set_L,
// ks,
// local_M,
// num_queues,
// local_queues_starts,
// local_queues_sizes,
// local_m_counts);
//// time_select_M_ += WallTimer::get_time_mark();
//// {//test
//// printf("query_id: %u "
//// "iter: %u "
//// "local_m_counts: (%u, %u)\n",
//// query_id,
//// iter,
//// local_m_counts[0], local_m_counts[1]);
//// }
// }
//// {//test
//// printf("query_id: %u "
//// "iter: %u",
//// query_id,
//// iter);
//// printf(" local_queues_sizes:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", local_queues_sizes[i]);
//// }
//// printf(" local_m_counts:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", local_m_counts[i]);
//// }
//// printf(" ks:");
//// for (idi i = 0; i < num_queues; ++i) {
//// printf(" %u", ks[i]);
//// }
//// printf("\n");
//// }
// }
// }
//
//// time_merge_ -= WallTimer::get_time_mark();
// {// Return the results to set_K
// std::vector<idi> pointer(num_threads_, 0);
// // get the first
// distf min_dist = FLT_MAX;
// idi min_q_i;
// idi min_id;
// idi min_sub;
// idi last_id;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// if (pointer[q_i] >= local_queues_sizes[q_i]) {
// continue;
// }
// idi sub = pointer[q_i] + local_queues_starts[q_i];
// distf tmp_dist = set_L[sub].distance_;
// idi tmp_id = set_L[sub].id_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// } else if (tmp_dist == min_dist && tmp_id < min_id) {
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[0] = set_L[min_sub].id_;
//// {//test
//// printf("query_id: %u "
//// "[%u]: "
//// "(%u, %f)\n",
//// query_id,
//// 0,
//// set_L[min_sub].id_, set_L[min_sub].distance_);
//// }
// ++pointer[min_q_i];
// last_id = set_K[0];
//
// bool is_finished = false;
// idi k_i = 1;
// while (k_i < K && !is_finished) {
// is_finished = true;
// min_dist = FLT_MAX;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// const idi local_queue_size = local_queues_sizes[q_i];
// idi sub = pointer[q_i] + local_queues_starts[q_i];
//
// while (pointer[q_i] < local_queue_size
// && set_L[sub].id_ == last_id) {
// ++pointer[q_i];
// ++sub;
// }
// if (pointer[q_i] >= local_queue_size) {
// continue;
// }
// is_finished = false;
// distf tmp_dist = set_L[sub].distance_;
// idi tmp_id = set_L[sub].id_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// } else if (tmp_dist == min_dist && tmp_id < min_id) {
// min_id = tmp_id;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[k_i] = set_L[min_sub].id_;
//// {//test
//// printf("query_id: %u "
//// "[%u]: "
//// "(%u, %f)\n",
//// query_id,
//// k_i,
//// set_L[min_sub].id_, set_L[min_sub].distance_);
//// }
// ++pointer[min_q_i];
// ++k_i;
// }
// }
//// time_merge_ += WallTimer::get_time_mark();
//// {// Return the results to set_K
//// // How to deal with duplicate?
//// idi last_id = set_L[0].id_;
//// set_K[0] = last_id;
//// idi k_i = 1;
//// idi l_i = 1;
//// while (k_i < K && l_i < L) {
//// if (last_id == set_L[l_i].id_) {
//// ++l_i;
//// continue;
//// }
//// last_id = set_L[l_i++].id_;
//// set_K[k_i++] = last_id;
//// }
//// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
//// std::fill(local_queues_sizes.begin(), local_queues_sizes.end(), init_queue_size);
// }
//
//// {//test
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
/*
* 7/26/2020-15:41
* L-th and M-th Selection.
* Seq-Par Phases: when M is 1 and 2, do sequential searching;
* When M is equal and larger than 4, do parallel searching.
* It's for load-balance issue.
*/
inline void Searching::para_search_with_top_m_subsearch_v3(
const idi local_M_max,
const idi local_M_middle,
const idi query_id,
const idi K,
const idi global_L,
const idi local_L,
// const idi total_L,
// const idi init_queue_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
std::vector<idi> &local_m_counts,
std::vector< std::vector<idi> > &top_m_candidates_list,
boost::dynamic_bitset<> &is_visited)
{
time_initialization_ -= WallTimer::get_time_mark();
uint64_t tmp_count_computation = 0;
{// Initialization
// is_visited flag array
//#pragma omp parallel for
// Cannot use OMP for bit array is_visited!
for (idi c_i = 0; c_i < global_L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < global_L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi id_i = 0; id_i < global_L; ++id_i) {
idi v_id = init_ids[id_i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[id_i] = Candidate(v_id, dist, false); // False means not checked.
}
local_queues_sizes[0] = global_L;
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
std::sort(set_L.begin(), set_L.begin() + global_L);
}
time_initialization_ += WallTimer::get_time_mark();
// Searching
if (num_threads_ == 1) { // Single threads
// std::sort(
// set_L.begin(),
// set_L.end());
subsearch_with_top_m(
local_M_max,
query_id,
local_L,
set_L,
0,
local_queues_sizes[0],
top_m_candidates_list[0],
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
} else { // Multiple threads
const dataf *query_data = queries_load_ + query_id * dimension_;
const idi num_queues = num_threads_;
idi local_M = 1;
idi iter = 0;
std::vector<idi> ks(num_queues, 0);
time_sequential_phase_ -= WallTimer::get_time_mark();
{// Sequential Search for M = 1, 2.
idi &k = ks[0];
while (k < global_L && local_M < local_M_middle) {
++iter;
subsearch_top_m_for_one_iteration(
iter,
k,
local_M,
query_id,
query_data,
global_L,
set_L,
0,
local_queues_sizes[0],
top_m_candidates_list[0],
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
{// Double M
if (local_M < local_M_max) {
local_M <<= 1;
}
}
}
}
time_sequential_phase_ += WallTimer::get_time_mark();
time_parallel_phase_ -= WallTimer::get_time_mark();
distf bound_lth = set_L[global_L - 1].distance_;
{// Parallel Search for M >= 4, or local_M_middle
time_assign_s_ -=WallTimer::get_time_mark();
{// Assign elements from Queue[0] to others
idi dst_i = 1;
for (idi e_i = 1; e_i < global_L; ++e_i) {
idi dest_sub = e_i % num_queues;
if (0 == dest_sub) {
set_L[dst_i++] = set_L[e_i];
} else {
set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i];
}
}
local_queues_sizes[0] = dst_i;
}
std::fill(ks.begin(), ks.end(), 0);
selecting_unchecked_top_M_seq(
query_id,
iter,
set_L,
ks,
local_M,
num_queues,
local_queues_starts,
local_queues_sizes,
local_m_counts);
time_assign_s_ +=WallTimer::get_time_mark();
double tmp_time_pick_top_m = 0;
uint64_t tmp_count_add_to_queue = 0;
uint8_t not_finished = 1;
double tmp_time_distance_computation = 0;
double tmp_time_add_to_queue = 0;
while (true) {
time_expand_ -= WallTimer::get_time_mark();
not_finished = 0;
++iter;
#pragma omp parallel for reduction(+ : tmp_count_computation) \
reduction(+ : tmp_time_pick_top_m) \
reduction(+ : tmp_count_add_to_queue) \
reduction(+ : tmp_time_distance_computation) \
reduction(+ : tmp_time_add_to_queue)
for (idi q_i = 0; q_i < num_queues; ++q_i) {
tmp_time_pick_top_m -= WallTimer::get_time_mark();
idi L_value = q_i == 0 ? global_L : local_L;
idi &k = ks[q_i];
idi &local_queue_size = local_queues_sizes[q_i];
auto &local_top_m_candidates = top_m_candidates_list[q_i];
idi local_m_count = local_m_counts[q_i];
// if (local_M < num_queues && !local_m_count) {
// local_m_count = 1;
// }
tmp_time_pick_top_m += WallTimer::get_time_mark();
if (!local_m_count) {
continue;
}
not_finished = 1;
const idi local_queue_start = local_queues_starts[q_i];
subsearch_top_m_for_one_iteration_lth_mth(
bound_lth,
iter,
k,
local_m_count,
query_id,
query_data,
L_value,
set_L,
local_queue_start,
local_queue_size,
local_top_m_candidates,
is_visited,
tmp_count_computation,
tmp_time_pick_top_m,
tmp_count_add_to_queue,
tmp_time_distance_computation,
tmp_time_add_to_queue);
}
time_add_to_queue_ += tmp_time_add_to_queue;
tmp_time_add_to_queue = 0;
time_distance_computation_ += tmp_time_distance_computation;
tmp_time_distance_computation = 0;
count_add_to_queue_ += tmp_count_add_to_queue;
tmp_count_add_to_queue = 0;
time_pick_top_m_ += tmp_time_pick_top_m;
tmp_time_pick_top_m = 0;
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
time_expand_ += WallTimer::get_time_mark();
if (!not_finished) {
break;
}
{// Scale M
if (local_M < local_M_max) {
local_M <<= 1;
}
// else {
// local_M = value_M_max;
// }
}
time_select_ -= WallTimer::get_time_mark();
#pragma omp parallel sections
{
#pragma omp section
{// Setecting and update local_queues_lengths
// time_select_L_ -= WallTimer::get_time_mark();
bound_lth = selecting_top_L_seq(
set_L,
global_L,
// local_L,
num_queues,
local_queues_starts,
local_queues_sizes);
// time_select_L_ += WallTimer::get_time_mark();
}
#pragma omp section
{
// time_select_M_ -= WallTimer::get_time_mark();
selecting_unchecked_top_M_seq(
query_id,
iter,
set_L,
ks,
local_M,
num_queues,
local_queues_starts,
local_queues_sizes,
local_m_counts);
// time_select_M_ += WallTimer::get_time_mark();
}
}
time_select_ += WallTimer::get_time_mark();
// {//test
// printf("query_id: %u "
// "iter: %u",
// query_id,
// iter);
// printf(" local_queues_sizes:");
// for (idi i = 0; i < num_queues; ++i) {
// printf(" %u", local_queues_sizes[i]);
// }
// printf(" local_m_counts:");
// for (idi i = 0; i < num_queues; ++i) {
// printf(" %u", local_m_counts[i]);
// }
// printf(" ks:");
// for (idi i = 0; i < num_queues; ++i) {
// printf(" %u", ks[i]);
// }
// printf("\n");
// }
}
}
time_parallel_phase_ += WallTimer::get_time_mark();
}
// time_merge_ -= WallTimer::get_time_mark();
time_ending_ -= WallTimer::get_time_mark();
{// Return the results to set_K
std::vector<idi> pointer(num_threads_, 0);
// get the first
distf min_dist = FLT_MAX;
idi min_q_i;
idi min_id;
idi min_sub;
idi last_id;
for (int q_i = 0; q_i < num_threads_; ++q_i) {
if (pointer[q_i] >= local_queues_sizes[q_i]) {
continue;
}
idi sub = pointer[q_i] + local_queues_starts[q_i];
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
}
}
set_K[0] = set_L[min_sub].id_;
// {//test
// printf("query_id: %u "
// "[%u]: "
// "(%u, %f)\n",
// query_id,
// 0,
// set_L[min_sub].id_, set_L[min_sub].distance_);
// }
++pointer[min_q_i];
last_id = set_K[0];
bool is_finished = false;
idi k_i = 1;
while (k_i < K && !is_finished) {
is_finished = true;
min_dist = FLT_MAX;
for (int q_i = 0; q_i < num_threads_; ++q_i) {
const idi local_queue_size = local_queues_sizes[q_i];
idi sub = pointer[q_i] + local_queues_starts[q_i];
while (pointer[q_i] < local_queue_size
&& set_L[sub].id_ == last_id) {
++pointer[q_i];
++sub;
}
if (pointer[q_i] >= local_queue_size) {
continue;
}
is_finished = false;
distf tmp_dist = set_L[sub].distance_;
idi tmp_id = set_L[sub].id_;
if (tmp_dist < min_dist) {
min_dist = tmp_dist;
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
} else if (tmp_dist == min_dist && tmp_id < min_id) {
min_id = tmp_id;
min_q_i = q_i;
min_sub = sub;
}
}
set_K[k_i] = set_L[min_sub].id_;
// {//test
// printf("query_id: %u "
// "[%u]: "
// "(%u, %f)\n",
// query_id,
// k_i,
// set_L[min_sub].id_, set_L[min_sub].distance_);
// }
++pointer[min_q_i];
++k_i;
}
}
// time_merge_ += WallTimer::get_time_mark();
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0);
}
time_ending_ += WallTimer::get_time_mark();
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
//inline void Searching::para_search_with_top_m_subsearch_v2(
// const idi local_M_max,
// const idi query_id,
// const idi K,
// const idi global_L,
// const idi local_L,
// const idi total_L,
// const idi init_queue_end,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const std::vector<idi> &local_queues_bases,
// std::vector<idi> &local_queues_ends,
// std::vector< std::vector<idi> > &top_m_candidates_list,
// std::vector<idi> &top_m_ends,
// boost::dynamic_bitset<> &is_visited)
//{
// uint64_t tmp_count_computation = 0;
// {// Initialization
// // is_visited flag array
////#pragma omp parallel for
//// Cannot use OMP for bit array is_visited!
// for (idi c_i = 0; c_i < total_L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < total_L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
//
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// idi local_queue_base = local_queues_bases[q_i];
// idi init_ids_base = q_i * init_queue_end;
// idi init_ids_bound = init_ids_base + init_queue_end;
// for (idi id_i = init_ids_base; id_i < init_ids_bound; ++id_i) {
// idi v_id = init_ids[id_i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[local_queue_base++] = Candidate(v_id, dist, false); // False means not checked.
// }
// local_queues_ends[q_i] = init_queue_end;
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
////#pragma omp parallel for reduction(+ : tmp_count_computation)
//// for (unsigned i = 0; i < total_L; i++) {
//// unsigned v_id = init_ids[i];
//// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
//// dataf norm = *v_data++;
//// ++tmp_count_computation;
//// distf dist = compute_distance_with_norm(v_data, query_data, norm);
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
//// }
//// count_distance_computation_ += tmp_count_computation;
//// tmp_count_computation = 0;
// }
//
// // Searching
// if (num_threads_ == 1) { // Single threads
//// local_queues_lengths[0] = local_L;
// std::sort(
// set_L.begin(),
// set_L.end());
// subsearch_with_top_m(
// local_M_max,
// query_id,
// local_L,
// set_L,
// 0,
// local_queues_ends[0],
// top_m_candidates_list[0],
// is_visited,
// tmp_count_computation);
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// } else { // Multiple threads
//// std::fill(local_queues_lengths.begin(), local_queues_lengths.end(), local_L);
// const dataf *query_data = queries_load_ + query_id * dimension_;
// const idi num_queues = num_threads_;
//#pragma omp parallel for
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// idi local_queue_base = local_queues_bases[q_i];
// std::sort(
// set_L.begin() + local_queue_base,
// set_L.begin() + local_queue_base + init_queue_end);
// }
// distf bound_lth = FLT_MAX;
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// bound_lth = std::min(bound_lth, set_L[local_queues_bases[q_i] + init_queue_end - 1].distance_);
// }
//// const idi local_queue_length = (L - 1) / num_queues + 1;
// std::vector<idi> ks(num_queues, 0);
// idi iter = 0;
// idi local_M = 1;
// uint8_t not_finished = 1;
// while (not_finished) {
// not_finished = 0;
// ++iter;
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi q_i = 0; q_i < num_queues; ++q_i) {
// idi &k = ks[q_i];
// idi &local_queue_end = local_queues_ends[q_i];
// auto &local_top_m_candidates = top_m_candidates_list[q_i];
// if (k >= local_queue_end) {
// continue;
// }
// not_finished = 1;
// const idi local_queue_base = local_queues_bases[q_i];
//
// subsearch_top_m_for_one_iteration(
// bound_lth,
// iter,
// k,
// local_M,
// query_id,
// query_data,
// local_L,
// set_L,
// local_queue_base,
// local_queue_end,
// local_top_m_candidates,
// is_visited,
// tmp_count_computation);
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
// {// Setecting and update local_queues_lengths
// time_merge_ -= WallTimer::get_time_mark();
// bound_lth = selecting_top_L_seq(
// set_L,
// global_L,
//// local_L,
// num_queues,
// local_queues_bases,
// local_queues_ends);
// time_merge_ += WallTimer::get_time_mark();
//// {// local_queues_ends
//// printf("query_id: %u "
//// "iter: %u",
////// "local_queues_ends:",
//// query_id,
//// iter);
//// for (idi q_i = 0; q_i < num_queues; ++q_i) {
//// printf(" [%u]: %u",
//// q_i,
//// local_queues_ends[q_i]);
//// }
//// printf("\n");
//// }
// }
// {// Scale M
// if (local_M < local_M_max) {
// local_M <<= 1;
// }
//// else {
//// local_M = value_M_max;
//// }
// }
// }
// }
//
// time_merge_ -= WallTimer::get_time_mark();
// {// Return the results to set_K
// std::vector<idi> pointer(num_threads_, 0);
// // get the first
// distf min_dist = FLT_MAX;
// idi min_q_i;
// idi min_sub;
// idi last_id;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// if (pointer[q_i] >= local_queues_ends[q_i]) {
// continue;
// }
// idi sub = pointer[q_i] + local_queues_bases[q_i];
// distf tmp_dist = set_L[sub].distance_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[0] = set_L[min_sub].id_;
// ++pointer[min_q_i];
// last_id = set_K[0];
//
// bool is_finished = false;
// idi k_i = 1;
// while (k_i < K && !is_finished) {
// is_finished = true;
// min_dist = FLT_MAX;
// for (int q_i = 0; q_i < num_threads_; ++q_i) {
// const idi local_queue_end = local_queues_ends[q_i];
// if (pointer[q_i] >= local_queue_end) {
// continue;
// }
// is_finished = false;
// idi sub = pointer[q_i] + local_queues_bases[q_i];
// while (set_L[sub].id_ == last_id
// && pointer[q_i] < local_queue_end) {
// ++pointer[q_i];
// sub = pointer[q_i] + local_queues_bases[q_i];
// }
// if (pointer[q_i] >= local_queue_end) {
// continue;
// }
//// if (set_L[sub].id_ == last_id) {
//// // Duplicate
//// ++pointer[q_i];
//// continue;
//// }
// distf tmp_dist = set_L[sub].distance_;
// if (tmp_dist < min_dist) {
// min_dist = tmp_dist;
// min_q_i = q_i;
// min_sub = sub;
// }
// }
// set_K[k_i] = set_L[min_sub].id_;
// ++pointer[min_q_i];
// ++k_i;
// }
// }
// time_merge_ += WallTimer::get_time_mark();
//// {// Return the results to set_K
//// // How to deal with duplicate?
//// idi last_id = set_L[0].id_;
//// set_K[0] = last_id;
//// idi k_i = 1;
//// idi l_i = 1;
//// while (k_i < K && l_i < L) {
//// if (last_id == set_L[l_i].id_) {
//// ++l_i;
//// continue;
//// }
//// last_id = set_L[l_i++].id_;
//// set_K[k_i++] = last_id;
//// }
//// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
//// std::fill(local_queues_ends.begin(), local_queues_ends.end(), init_queue_end);
// }
//
//// {//test
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
/*
* 6/27/2020-12:33
* Do searching on the local_set_L
* local_set_L is already sorted
* is_visited is already set up.
*/
inline void Searching::subsearch_for_simple_search(
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi base_set_L,
idi &set_L_end,
// std::vector<uint8_t> &is_visited,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation)
{
const dataf *query_data = queries_load_ + query_id * dimension_;
// idi local_top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi iter = 0;
// idi M = 1; // value of M
while (k < local_L) {
++iter;
// {//test
// printf("query_id: %u "
// "iter: %u\n",
// query_id,
// iter);
// }
// Select the top-1 unchecked candidate
idi top_1;
idi last_k = local_L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < set_L_end; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
top_1 = set_L[index_set_L].id_;
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
// local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_;
break;
}
if (last_k == local_L) {
break;
}
idi nk = local_L;
// Push top-1' neighbors into the queue.
idi cand_id = top_1;
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
// {// Critical edition
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
// }
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++local_count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// {
// if (0 == query_id
// && (785802 == nb_id
// || 180955 == nb_id
// || 240996 == nb_id
// || 813701 == nb_id
// || 708177 == nb_id
// || 87578 == nb_id
// || 561813 == nb_id
// || 701258 == nb_id
// || 872728 == nb_id)) {
//// && 180955 == nb_id) {
// printf("parent: %u "
// "nb_id: %u "
// "dist: %f "
// "base_set_L: %u "
// "set_L_end: %u\n",
// cand_id,
// nb_id,
// dist,
// base_set_L,
// set_L_end);
// }
// }
if (dist > set_L[set_L_end - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
set_L_end,
local_L,
cand);
if (r < nk) {
nk = r;
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
}
/*
* 6/27/2020-12:26
* Is is good to use subsearch by every thread it self?
*/
inline void Searching::para_simple_search_subsearch(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited)
{
uint64_t tmp_count_computation = 0;
{// Initialization
// is_visited flag array
//#pragma omp parallel for
// Cannot use OMP for bit array is_visited!
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(
// set_L.begin(),
// set_L.begin() + L);
}
idi queue_end = L;
// Searching
if (num_threads_ == 1) { // Single threads
std::sort(
set_L.begin(),
set_L.end());
subsearch_for_simple_search(
query_id,
L,
set_L,
0,
queue_end,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
// {
//// {//test
//// for (idi i = 0; i < queue_end; ++i) {
//// printf("start: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// idi half_length = queue_end / 2;
// std::sort(
// set_L.begin(),
// set_L.begin() + half_length);
//// {//test
//// for (idi i = 0; i < half_length; ++i) {
//// printf("sorted: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// subsearch_for_simple_search(
// query_id,
// half_length, // local_L
// set_L,
// 0, // base_set_L
// half_length, // set_L_end
// is_visited,
// tmp_count_computation);
//
//// {//test
//// for (idi i = 0; i < half_length; ++i) {
//// printf("subsearched: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// std::sort(
// set_L.begin() + half_length,
// set_L.end());
//
//// {//test
//// for (idi i = half_length; i < queue_end; ++i) {
//// printf("sorted: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//
// subsearch_for_simple_search(
// query_id,
// half_length, // local_L
// set_L,
// half_length, // base_set_L
// half_length, // set_L_end
// is_visited,
// tmp_count_computation);
//// {//test
//// for (idi i = half_length; i < queue_end; ++i) {
//// printf("subsearched: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
//// {//test
//// for (idi i = 0; i < queue_end; ++i) {
//// printf("explored: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
// count_distance_computation_ += tmp_count_computation;
//
// std::vector <Candidate> tmp_set_L(L);
// std::merge(set_L.begin(), set_L.begin() + half_length,
// set_L.begin() + half_length, set_L.end(),
// tmp_set_L.begin());
// std::copy(tmp_set_L.begin(), tmp_set_L.end(), set_L.begin());
//// {//test
//// for (idi i = 0; i < queue_end; ++i) {
//// printf("merged: "
//// "query_id: %u "
//// "set_L[%u]: "
//// "(%u %f)\n",
//// query_id,
//// i,
//// set_L[i].id_, set_L[i].distance_);
//// }
//// }
// }
} else { // Multiple threads
const idi num_queues = num_threads_;
const idi local_queue_length = (L - 1) / num_queues + 1;
// Parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi q_i = 0; q_i < num_queues; ++q_i) {
idi local_queue_base = q_i * local_queue_length;
if (local_queue_base >= L) {
continue;
}
idi local_queue_end = local_queue_length;
if (local_queue_base + local_queue_end > L) {
local_queue_end = L - local_queue_base;
}
std::sort(
set_L.begin() + local_queue_base,
set_L.begin() + local_queue_base + local_queue_end);
subsearch_for_simple_search(
query_id,
local_queue_end, // local_L
set_L,
local_queue_base, // base_set_L
local_queue_end, // set_L_end
is_visited,
tmp_count_computation);
}
count_distance_computation_ += tmp_count_computation;
// Merge
// time_merge_ -= WallTimer::get_time_mark();
merge_in_set_L(
set_L,
L,
num_queues,
local_queue_length);
// time_merge_ += WallTimer::get_time_mark();
}
{// Return the results to set_K
// How to deal with duplicate?
idi last_id = set_L[0].id_;
set_K[0] = last_id;
idi k_i = 1;
idi l_i = 1;
while (k_i < K && l_i < L) {
if (last_id == set_L[l_i].id_) {
++l_i;
continue;
}
last_id = set_L[l_i++].id_;
set_K[k_i++] = last_id;
}
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
}
// {//test
// if (0 == query_id) {
// exit(1);
// }
// }
}
///*
// * 6/22/2020-09:38
// * A synchronized last element as the sentinel
// */
//inline void Searching::para_search_with_top_m_merge_queues_global_threshold(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited)
//{
//// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
// local_queues_ends[num_threads_ - 1] = L;
//
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// { // Single thread
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
//
// }
// }
//
// { // Multiple Threads
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// {// Local queues' ends
//// printf("query%u:iter: %u", query_id, tmp_count);
// idi total_elements = 0;
// for (int i_t = 0; i_t < num_threads_ - 1; ++i_t) {
// total_elements += local_queues_ends[i_t];
// }
// number_local_elements_ += total_elements;
//// printf(" total_elements: %u+%u\n", total_elements - local_queues_ends[num_threads_ - 1], local_queues_ends[num_threads_ - 1]);
//// for (int i_t = 0; i_t < num_threads_; ++i_t) {
//// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
//// }
//// printf("\n");
// }
//
//// // Merge. Merge all queues in parallel.
// {
// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
// if (r < nk) {
// nk = r;
// }
// }
// time_merge_ += WallTimer::get_time_mark();
// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
//
// }
// }
//
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// if (0 == query_id) {
//// exit(1);
//// }
//// }
//}
///*
// * 6/7/2020-16:55
// * Use 1 threads to scale M until the value_M_middle.
// * Then use multiple threads.
// * Except for Thread 0, other threads are collectors. They collect, but do not merge.
// * Only merge once after Thread 0 stops.
// */
//inline void Searching::para_search_with_top_m_merge_queues_collectors(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//// std::vector<distf> &local_thresholds)
//// BitVector &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
//// boost::sort::block_indirect_sort(
//// set_L.begin() + base_set_L,
//// set_L.begin() + base_set_L + L,
//// num_threads_);
// local_queues_ends[num_threads_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// // Single thread
// {
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// // Multiple Threads
// {
//// while (k < L/num_threads_/2) {
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi chunk_size;
// if (num_threads_ <= top_m_candidates_end) {
// chunk_size = (top_m_candidates_end - 1) / num_threads_ + 1;
// } else {
// chunk_size = 1;
// }
// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
////#pragma omp parallel for reduction(+ : tmp_count_computation)
//#pragma omp parallel for reduction(+ : tmp_count_computation) schedule(static, chunk_size)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
//// {
//// if (c_i < chunk_size && tid != 0) {
//// printf("query_id: %u "
//// "tmp_count: %u "
//// "chunk_size: %u "
//// "c_i: %u "
//// "tid: %u\n",
//// query_id,
//// tmp_count,
//// chunk_size,
//// c_i,
//// tid);
//// }
//// }
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
////// // Merge. Merge all queues in parallel.
//// {
//// time_merge_ -= WallTimer::get_time_mark();
//// if (num_threads_ > 1) {
////// idi r = merge_all_queues_queue_base(
////// set_L,
////// local_queues_ends,
////// queue_base,
////// real_threads,
////// local_queue_length,
////// L);
//// idi r = merge_all_queues_para_array(
//// set_L,
//// local_queues_ends,
//// local_queue_length,
//// L);
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// time_merge_ += WallTimer::get_time_mark();
//// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
//
//// // Merge only once after Master Thread stops.
//// {
//// time_merge_ -= WallTimer::get_time_mark();
//// if (num_threads_ > 1) {
////// idi r = merge_all_queues_queue_base(
////// set_L,
////// local_queues_ends,
////// queue_base,
////// real_threads,
////// local_queue_length,
////// L);
//// merge_all_queues_para_array(
//// set_L,
//// local_queues_ends,
//// local_queue_length,
//// L);
//// }
//// time_merge_ += WallTimer::get_time_mark();
//// }
// }
//
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
///*
// * 6/8/2020-16:39
// * Selecting rather than merging
// */
//inline void Searching::para_search_with_top_m_merge_queues_selecting(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
//// boost::sort::block_indirect_sort(
//// set_L.begin() + base_set_L,
//// set_L.begin() + base_set_L + L,
//// num_threads_);
// local_queues_ends[num_threads_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// // Single thread
// {
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// // Multiple Threads
// {
//// while (k < L/num_threads_/2) {
//// while (k < L) {
// while (true) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//// // Select M candidates
//// idi last_k = L;
////// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
//// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
//// idi index_set_L = c_i + base_set_L;
//// if (set_L[index_set_L].is_checked_) {
//// continue;
//// }
//// last_k = c_i; // Record the location of the last candidate selected.
//// set_L[index_set_L].is_checked_ = true;
//// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
//// }
//
// // Select M candidates
// {
// idi traverse_count = 0;
// idi bound_sub = L; // This is not always true!
// for (idi sub = 0; sub < bound_sub && top_m_candidates_end < M && traverse_count < L; ++sub) {
// for (int tid = 0; tid < num_threads_ && top_m_candidates_end < M && traverse_count < L; ++tid) {
// if (sub >= local_queues_ends[tid]) {
// continue;
// }
// idi index_set_L = tid * local_queue_length + sub;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
// }
//
// if (0 == top_m_candidates_end) {
// break;
// }
// }
//
//// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
//// idi r =
// add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
//// if (r < nk) {
//// nk = r;
//// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
//// // Merge. Merge all queues in parallel.
// {
// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_ > 1) {
//// idi r = merge_all_queues_queue_base(
//// set_L,
//// local_queues_ends,
//// queue_base,
//// real_threads,
//// local_queue_length,
//// L);
//// idi r =
// merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
//// if (r < nk) {
//// nk = r;
//// }
// }
// time_merge_ += WallTimer::get_time_mark();
// }
//// if (nk <= last_k) {
//// k = nk;
//// } else {
//// k = last_k + 1;
//// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
//
////#pragma omp parallel for
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// set_K[k_i] = set_L[k_i + base_set_L].id_;
////// set_K[k_i] = set_L[k_i].id_;
//// }
//
// {
// idi k_i = 0;
// idi bound_sub = K / num_threads_;
// for (idi sub = 0; sub < bound_sub; ++sub) {
// for (int tid = 0; tid < num_threads_; ++tid) {
// idi index_set_L = tid * local_queue_length + sub;
// set_K[k_i++] = set_L[index_set_L].id_;
// }
// }
// idi remain = K - k_i;
// if (remain) {
// for (int tid = 0; tid < remain; ++tid) {
// idi index_set_L = tid * local_queue_length + bound_sub;
// set_K[k_i++] = set_L[index_set_L].id_;
// }
// }
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
} // namespace PANNS
#endif //BATCH_SEARCHING_SEARCHING_H
|
residual_based_bdf_custom_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_BASED_BDF_CUSTOM_SCHEME )
#define KRATOS_RESIDUAL_BASED_BDF_CUSTOM_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "solving_strategies/schemes/residual_based_bdf_scheme.h"
#include "includes/variables.h"
#include "includes/kratos_parameters.h"
#include "includes/checks.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedBDFCustomScheme
* @ingroup KratosCore
* @brief BDF integration scheme (for dynamic problems)
* @details The second order Backward Differentiation Formula (BDF) method is a two step second order accurate method.
* This scheme is a generalization of the only displacement scheme, where any list of variables and its derivatives can be considered instead
* Look at the base class for more details
* @see ResidualBasedBDFScheme
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class ResidualBasedBDFCustomScheme
: public ResidualBasedBDFScheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedBDFCustomScheme );
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef ResidualBasedImplicitTimeScheme<TSparseSpace,TDenseSpace> ImplicitBaseType;
typedef ResidualBasedBDFScheme<TSparseSpace,TDenseSpace> BDFBaseType;
typedef typename ImplicitBaseType::TDataType TDataType;
typedef typename ImplicitBaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename ImplicitBaseType::TSystemMatrixType TSystemMatrixType;
typedef typename ImplicitBaseType::TSystemVectorType TSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef typename BaseType::Pointer BaseTypePointer;
typedef VectorComponentAdaptor< array_1d< double, 3 > > ComponentType;
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The BDF method
* @param ThisParameters The parameters containing the list of variables to consider
* @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives
*/
explicit ResidualBasedBDFCustomScheme(Parameters ThisParameters)
{
// Getting default parameters
Parameters default_parameters = GetDefaultParameters();
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// Now here call the base class constructor
BDFBaseType( ThisParameters["integration_order"].GetInt());
// Creating variables list
CreateVariablesList(ThisParameters);
}
/**
* @brief Constructor. The BDF method
* @param Order The integration order
* @param ThisParameters The parameters containing the list of variables to consider
* @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives
*/
explicit ResidualBasedBDFCustomScheme(
const std::size_t Order = 2,
Parameters ThisParameters = Parameters(R"({})")
)
:BDFBaseType(Order)
{
// Getting default parameters
Parameters default_parameters = GetDefaultParameters();
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// Creating variables list
CreateVariablesList(ThisParameters);
}
/** Copy Constructor.
*/
explicit ResidualBasedBDFCustomScheme(ResidualBasedBDFCustomScheme& rOther)
:BDFBaseType(rOther)
,mDoubleVariable(rOther.mDoubleVariable)
,mFirstDoubleDerivatives(rOther.mFirstDoubleDerivatives)
,mSecondDoubleDerivatives(rOther.mSecondDoubleDerivatives)
{
}
/**
* Clone
*/
BaseTypePointer Clone() override
{
return BaseTypePointer( new ResidualBasedBDFCustomScheme(*this) );
}
/** Destructor.
*/
~ResidualBasedBDFCustomScheme
() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief This is the place to initialize the Scheme.
* @details This is intended to be called just once when the strategy is initialized
* @param rModelPart The model part of the problem to solve
*/
void Initialize(ModelPart& rModelPart) override
{
KRATOS_TRY
BDFBaseType::Initialize(rModelPart);
// The current process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Getting dimension
KRATOS_WARNING_IF("ResidualBasedBDFCustomScheme", !r_current_process_info.Has(DOMAIN_SIZE)) << "DOMAIN_SIZE not defined. Please define DOMAIN_SIZE. 3D case will be assumed" << std::endl;
const std::size_t domain_size = r_current_process_info.Has(DOMAIN_SIZE) ? r_current_process_info.GetValue(DOMAIN_SIZE) : 3;
if (domain_size != mDomainSize) {
const std::size_t total_number_of_variables = mDoubleVariable.size();
// We remove the third component
if (domain_size == 2) {
const std::size_t number_variables_added = total_number_of_variables/3;
for (std::size_t i = 0; i < number_variables_added; ++i) {
mDoubleVariable.erase(mDoubleVariable.begin() + (2 + 2 * i));
mFirstDoubleDerivatives.erase(mDoubleVariable.begin() + (2 + 2 * i));
mSecondDoubleDerivatives.erase(mDoubleVariable.begin() + (2 + 2 * i));
}
} else if (domain_size == 3) { // We need to add the third component
const std::size_t number_variables_added = total_number_of_variables/2;
for (std::size_t i = 0; i < number_variables_added; ++i) {
const std::string variable_name = ((*(mDoubleVariable.begin() + 2 * i))->GetSourceVariable()).Name();
const auto& r_var_z = KratosComponents<Variable<double>>::Get(variable_name + "_Z");
mDoubleVariable.push_back(&r_var_z);
mFirstDoubleDerivatives.push_back(&(r_var_z.GetTimeDerivative()));
mSecondDoubleDerivatives.push_back(&((r_var_z.GetTimeDerivative()).GetTimeDerivative()));
}
} else {
KRATOS_ERROR << "DOMAIN_SIZE can onbly be 2 or 3. It is: " << domain_size << std::endl;
}
mDomainSize = domain_size;
}
KRATOS_CATCH("")
}
/**
* @brief It initializes time step solution. Only for reasons if the time step solution is restarted
* @param rModelPart The model part of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
BDFBaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
const auto it_node_begin = rModelPart.Nodes().begin();
// Auxiliar fixed value
bool fixed = false;
#pragma omp parallel for private(fixed)
for(int i = 0; i < num_nodes; ++i) {
auto it_node = it_node_begin + i;
std::size_t counter = 0;
for (auto p_var : mDoubleVariable) {
fixed = false;
// Derivatives
const auto& dvar = *mFirstDoubleDerivatives[counter];
const auto& d2var = *mSecondDoubleDerivatives[counter];
if (it_node->HasDofFor(d2var)) {
if (it_node->IsFixed(d2var)) {
it_node->Fix(*p_var);
fixed = true;
}
}
if (it_node->HasDofFor(dvar)) {
if (it_node->IsFixed(dvar) && !fixed) {
it_node->Fix(*p_var);
}
}
counter++;
}
}
KRATOS_CATCH("ResidualBasedBDFCustomScheme.InitializeSolutionStep");
}
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step x = xold + vold * Dt
* @param rModelPart The model of the problem to solve
* @param rDofSet set of all primary variables
* @param A LHS matrix
* @param Dx Incremental update of primary variables
* @param b RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY;
// Getting process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Getting delta time
const double delta_time = r_current_process_info[DELTA_TIME];
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
// Getting first node iterator
const auto it_node_begin = rModelPart.Nodes().begin();
#pragma omp parallel for
for(int i = 0; i< num_nodes; ++i) {
auto it_node = it_node_begin + i;
std::size_t counter = 0;
for (auto p_var : mDoubleVariable) {
// Derivatives
const auto& dvar = *mFirstDoubleDerivatives[counter];
const auto& d2var = *mSecondDoubleDerivatives[counter];
ComputePredictComponent(it_node, *p_var, dvar, d2var, delta_time);
counter++;
}
// Updating time derivatives
UpdateFirstDerivative(it_node);
UpdateSecondDerivative(it_node);
}
KRATOS_CATCH( "" );
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided.
* @details Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model of the problem to solve
* @return Zero means all ok
*/
int Check(const ModelPart& rModelPart) const override
{
KRATOS_TRY;
const int err = BDFBaseType::Check(rModelPart);
if(err!=0) return err;
// Check for variables keys
// Verify that the variables are correctly initialized
for ( auto p_var : mDoubleVariable)
KRATOS_CHECK_VARIABLE_KEY((*p_var))
for ( auto p_var : mFirstDoubleDerivatives)
KRATOS_CHECK_VARIABLE_KEY((*p_var))
for ( auto p_var : mSecondDoubleDerivatives)
KRATOS_CHECK_VARIABLE_KEY((*p_var))
// Check that variables are correctly allocated
for(auto& r_node : rModelPart.Nodes()) {
for ( auto p_var : mDoubleVariable)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*p_var), r_node)
for ( auto p_var : mFirstDoubleDerivatives)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*p_var), r_node)
for ( auto p_var : mSecondDoubleDerivatives)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*p_var), r_node)
for ( auto p_var : mDoubleVariable)
KRATOS_CHECK_DOF_IN_NODE((*p_var), r_node)
}
KRATOS_CATCH( "" );
return 0;
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBDFCustomScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
std::vector<const Variable<double>*> mDoubleVariable; /// The double variables
std::vector<const Variable<double>*> mFirstDoubleDerivatives; /// The first derivative double variable to compute
std::vector<const Variable<double>*> mSecondDoubleDerivatives; /// The second derivative double variable to compute
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Updating first time derivative (velocity)
* @param itNode the node interator
*/
inline void UpdateFirstDerivative(NodesArrayType::iterator itNode) override
{
// DOUBLES
std::size_t counter = 0;
for (auto p_var : mDoubleVariable) {
double& dotun0 = itNode->FastGetSolutionStepValue(*mFirstDoubleDerivatives[counter]);
dotun0 = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(*p_var);
for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order)
dotun0 += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(*p_var, i_order);
counter++;
}
}
/**
* @brief Updating second time derivative (acceleration)
* @param itNode the node interator
*/
inline void UpdateSecondDerivative(NodesArrayType::iterator itNode) override
{
// DOUBLES
std::size_t counter = 0;
for (auto p_var : mFirstDoubleDerivatives) {
double& dot2un0 = itNode->FastGetSolutionStepValue(*mSecondDoubleDerivatives[counter]);
dot2un0 = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(*p_var);
for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order)
dot2un0 += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(*p_var, i_order);
counter++;
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
std::size_t mDomainSize = 3; /// This auxiliar variable is used to store the domain size of the problem
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This method reduces the code duplication for each components when computing the prediction
* @param itNode The node iterator of the node currently being computed
* @param rVariable The variable currently being integrated
* @param rDerivedVariable The first time derivative of the current variable
* @param rDerived2Variable The second time derivative of the current variable
* @param DeltaTime The increment of time for the time integration
*/
template<class TClassVar>
void ComputePredictComponent(
NodesArrayType::iterator itNode,
const TClassVar& rVariable,
const TClassVar& rDerivedVariable,
const TClassVar& rDerived2Variable,
const double DeltaTime
)
{
// Values
const double dot2un1 = itNode->FastGetSolutionStepValue(rDerived2Variable, 1);
const double dotun1 = itNode->FastGetSolutionStepValue(rDerivedVariable, 1);
const double un1 = itNode->FastGetSolutionStepValue(rVariable, 1);
const double dot2un0 = itNode->FastGetSolutionStepValue(rDerived2Variable);
double& dotun0 = itNode->FastGetSolutionStepValue(rDerivedVariable);
double& un0 = itNode->FastGetSolutionStepValue(rVariable);
if (itNode->HasDofFor(rDerived2Variable) && itNode->IsFixed(rDerived2Variable)) {
dotun0 = dot2un0;
for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order)
dotun0 -= BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(rDerivedVariable, i_order);
dotun0 /= BDFBaseType::mBDF[0];
un0 = dotun0;
for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order)
un0 -= BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(rVariable, i_order);
un0 /= BDFBaseType::mBDF[0];
} else if (itNode->HasDofFor(rDerivedVariable) && itNode->IsFixed(rDerivedVariable)) {
un0 = dotun0;
for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order)
un0 -= BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(rVariable, i_order);
un0 /= BDFBaseType::mBDF[0];
} else if (!itNode->IsFixed(rVariable)) {
un0 = un1 + DeltaTime * dotun1 + 0.5 * std::pow(DeltaTime, 2) * dot2un1;
}
}
/**
* @brief This method creates the list of variables
* @param ThisParameters The configuration parameters
*/
void CreateVariablesList(Parameters ThisParameters)
{
const std::size_t n_variables = ThisParameters["solution_variables"].size();
// The current dimension
mDomainSize = ThisParameters["domain_size"].GetInt();
const auto variable_names = ThisParameters["solution_variables"].GetStringArray();
for (std::size_t p_var = 0; p_var < n_variables; ++p_var){
const std::string& variable_name = variable_names[p_var];
if(KratosComponents<Variable<double>>::Has(variable_name)){
const auto& r_var = KratosComponents<Variable<double>>::Get(variable_name);
mDoubleVariable.push_back(&r_var);
mFirstDoubleDerivatives.push_back(&(r_var.GetTimeDerivative()));
mSecondDoubleDerivatives.push_back(&((r_var.GetTimeDerivative()).GetTimeDerivative()));
} else if (KratosComponents< Variable< array_1d< double, 3> > >::Has(variable_name)) {
// Components
const auto& r_var_x = KratosComponents<Variable<double>>::Get(variable_name+"_X");
const auto& r_var_y = KratosComponents<Variable<double>>::Get(variable_name+"_Y");
mDoubleVariable.push_back(&r_var_x);
mDoubleVariable.push_back(&r_var_y);
mFirstDoubleDerivatives.push_back(&(r_var_x.GetTimeDerivative()));
mFirstDoubleDerivatives.push_back(&(r_var_y.GetTimeDerivative()));
mSecondDoubleDerivatives.push_back(&((r_var_x.GetTimeDerivative()).GetTimeDerivative()));
mSecondDoubleDerivatives.push_back(&((r_var_y.GetTimeDerivative()).GetTimeDerivative()));
if (mDomainSize == 3) {
const auto& r_var_z = KratosComponents<Variable<double>>::Get(variable_name+"_Z");
mDoubleVariable.push_back(&r_var_z);
mFirstDoubleDerivatives.push_back(&(r_var_z.GetTimeDerivative()));
mSecondDoubleDerivatives.push_back(&((r_var_z.GetTimeDerivative()).GetTimeDerivative()));
}
} else {
KRATOS_ERROR << "Only double and vector variables are allowed in the variables list." ;
}
}
}
/**
* @brief This method returns the defaulr parameters in order to avoid code duplication
* @return Returns the default parameters
*/
Parameters GetDefaultParameters()
{
Parameters default_parameters = Parameters(R"(
{
"name" : "ResidualBasedBDFCustomScheme",
"domain_size" : 3,
"integration_order" : 2,
"solution_variables" : ["DISPLACEMENT"]
})" );
return default_parameters;
}
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBDFCustomScheme */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BDF_CUSTOM_SCHEME defined */
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriately.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image,
ExceptionInfo *exception)
{
double
gamma,
log_mean,
mean,
sans;
MagickStatusType
status;
ssize_t
i;
log_mean=log(0.5);
if (image->channel_mask == DefaultChannels)
{
/*
Apply gamma correction equally across all given channels.
*/
(void) GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception));
}
/*
Auto-gamma each channel separately.
*/
status=MagickTrue;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ChannelType
channel_mask;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i));
status=GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception);
(void) SetImageChannelMask(image,channel_mask);
if (status == MagickFalse)
break;
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image,
ExceptionInfo *exception)
{
return(MinMaxStretchImage(image,0.0,0.0,1.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast,ExceptionInfo *exception)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
coefficients[2],
intercept,
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImage(image,PolynomialFunction,2,coefficients,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C L A H E I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLAHEImage() is a variant of adaptive histogram equalization in which the
% contrast amplification is limited, so as to reduce this problem of noise
% amplification.
%
% Adapted from implementation by Karel Zuiderveld, karel@cv.ruu.nl in
% "Graphics Gems IV", Academic Press, 1994.
%
% The format of the CLAHEImage method is:
%
% MagickBooleanType CLAHEImage(Image *image,const size_t width,
% const size_t height,const size_t number_bins,const double clip_limit,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the tile divisions to use in horizontal direction.
%
% o height: the height of the tile divisions to use in vertical direction.
%
% o number_bins: number of bins for histogram ("dynamic range").
%
% o clip_limit: contrast limit for localised changes in contrast. A limit
% less than 1 results in standard non-contrast limited AHE.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _RangeInfo
{
unsigned short
min,
max;
} RangeInfo;
static void ClipCLAHEHistogram(const double clip_limit,const size_t number_bins,
size_t *histogram)
{
#define NumberCLAHEGrays (65536)
ssize_t
i;
size_t
cumulative_excess,
previous_excess,
step;
ssize_t
excess;
/*
Compute total number of excess pixels.
*/
cumulative_excess=0;
for (i=0; i < (ssize_t) number_bins; i++)
{
excess=(ssize_t) histogram[i]-(ssize_t) clip_limit;
if (excess > 0)
cumulative_excess+=excess;
}
/*
Clip histogram and redistribute excess pixels across all bins.
*/
step=cumulative_excess/number_bins;
excess=(ssize_t) (clip_limit-step);
for (i=0; i < (ssize_t) number_bins; i++)
{
if ((double) histogram[i] > clip_limit)
histogram[i]=(size_t) clip_limit;
else
if ((ssize_t) histogram[i] > excess)
{
cumulative_excess-=histogram[i]-excess;
histogram[i]=(size_t) clip_limit;
}
else
{
cumulative_excess-=step;
histogram[i]+=step;
}
}
/*
Redistribute remaining excess.
*/
do
{
size_t
*p;
size_t
*q;
previous_excess=cumulative_excess;
p=histogram;
q=histogram+number_bins;
while ((cumulative_excess != 0) && (p < q))
{
step=number_bins/cumulative_excess;
if (step < 1)
step=1;
for (p=histogram; (p < q) && (cumulative_excess != 0); p+=step)
if ((double) *p < clip_limit)
{
(*p)++;
cumulative_excess--;
}
p++;
}
} while ((cumulative_excess != 0) && (cumulative_excess < previous_excess));
}
static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const size_t number_bins,
const unsigned short *lut,const unsigned short *pixels,size_t *histogram)
{
const unsigned short
*p;
ssize_t
i;
/*
Classify the pixels into a gray histogram.
*/
for (i=0; i < (ssize_t) number_bins; i++)
histogram[i]=0L;
p=pixels;
for (i=0; i < (ssize_t) tile_info->height; i++)
{
const unsigned short
*q;
q=p+tile_info->width;
while (p < q)
histogram[lut[*p++]]++;
q+=clahe_info->width;
p=q-tile_info->width;
}
}
static void InterpolateCLAHE(const RectangleInfo *clahe_info,const size_t *Q12,
const size_t *Q22,const size_t *Q11,const size_t *Q21,
const RectangleInfo *tile,const unsigned short *lut,unsigned short *pixels)
{
ssize_t
y;
unsigned short
intensity;
/*
Bilinear interpolate four tiles to eliminate boundary artifacts.
*/
for (y=(ssize_t) tile->height; y > 0; y--)
{
ssize_t
x;
for (x=(ssize_t) tile->width; x > 0; x--)
{
intensity=lut[*pixels];
*pixels++=(unsigned short) (PerceptibleReciprocal((double) tile->width*
tile->height)*(y*((double) x*Q12[intensity]+(tile->width-x)*
Q22[intensity])+(tile->height-y)*((double) x*Q11[intensity]+
(tile->width-x)*Q21[intensity])));
}
pixels+=(clahe_info->width-tile->width);
}
}
static void GenerateCLAHELut(const RangeInfo *range_info,
const size_t number_bins,unsigned short *lut)
{
ssize_t
i;
unsigned short
delta;
/*
Scale input image [intensity min,max] to [0,number_bins-1].
*/
delta=(unsigned short) ((range_info->max-range_info->min)/number_bins+1);
for (i=(ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++)
lut[i]=(unsigned short) ((i-range_info->min)/delta);
}
static void MapCLAHEHistogram(const RangeInfo *range_info,
const size_t number_bins,const size_t number_pixels,size_t *histogram)
{
double
scale,
sum;
ssize_t
i;
/*
Rescale histogram to range [min-intensity .. max-intensity].
*/
scale=(double) (range_info->max-range_info->min)/number_pixels;
sum=0.0;
for (i=0; i < (ssize_t) number_bins; i++)
{
sum+=histogram[i];
histogram[i]=(size_t) (range_info->min+scale*sum);
if (histogram[i] > range_info->max)
histogram[i]=range_info->max;
}
}
static MagickBooleanType CLAHE(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const RangeInfo *range_info,
const size_t number_bins,const double clip_limit,unsigned short *pixels)
{
MemoryInfo
*tile_cache;
unsigned short
*p;
size_t
limit,
*tiles;
ssize_t
y;
unsigned short
*lut;
/*
Constrast limited adapted histogram equalization.
*/
if (clip_limit == 1.0)
return(MagickTrue);
tile_cache=AcquireVirtualMemory((size_t) clahe_info->x*number_bins,
clahe_info->y*sizeof(*tiles));
if (tile_cache == (MemoryInfo *) NULL)
return(MagickFalse);
lut=(unsigned short *) AcquireQuantumMemory(NumberCLAHEGrays,sizeof(*lut));
if (lut == (unsigned short *) NULL)
{
tile_cache=RelinquishVirtualMemory(tile_cache);
return(MagickFalse);
}
tiles=(size_t *) GetVirtualMemoryBlob(tile_cache);
limit=(size_t) (clip_limit*(tile_info->width*tile_info->height)/number_bins);
if (limit < 1UL)
limit=1UL;
/*
Generate greylevel mappings for each tile.
*/
GenerateCLAHELut(range_info,number_bins,lut);
p=pixels;
for (y=0; y < (ssize_t) clahe_info->y; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) clahe_info->x; x++)
{
size_t
*histogram;
histogram=tiles+(number_bins*(y*clahe_info->x+x));
GenerateCLAHEHistogram(clahe_info,tile_info,number_bins,lut,p,histogram);
ClipCLAHEHistogram((double) limit,number_bins,histogram);
MapCLAHEHistogram(range_info,number_bins,tile_info->width*
tile_info->height,histogram);
p+=tile_info->width;
}
p+=clahe_info->width*(tile_info->height-1);
}
/*
Interpolate greylevel mappings to get CLAHE image.
*/
p=pixels;
for (y=0; y <= (ssize_t) clahe_info->y; y++)
{
OffsetInfo
offset;
RectangleInfo
tile;
ssize_t
x;
tile.height=tile_info->height;
tile.y=y-1;
offset.y=tile.y+1;
if (y == 0)
{
/*
Top row.
*/
tile.height=tile_info->height >> 1;
tile.y=0;
offset.y=0;
}
else
if (y == (ssize_t) clahe_info->y)
{
/*
Bottom row.
*/
tile.height=(tile_info->height+1) >> 1;
tile.y=clahe_info->y-1;
offset.y=tile.y;
}
for (x=0; x <= (ssize_t) clahe_info->x; x++)
{
tile.width=tile_info->width;
tile.x=x-1;
offset.x=tile.x+1;
if (x == 0)
{
/*
Left column.
*/
tile.width=tile_info->width >> 1;
tile.x=0;
offset.x=0;
}
else
if (x == (ssize_t) clahe_info->x)
{
/*
Right column.
*/
tile.width=(tile_info->width+1) >> 1;
tile.x=clahe_info->x-1;
offset.x=tile.x;
}
InterpolateCLAHE(clahe_info,
tiles+(number_bins*(tile.y*clahe_info->x+tile.x)), /* Q12 */
tiles+(number_bins*(tile.y*clahe_info->x+offset.x)), /* Q22 */
tiles+(number_bins*(offset.y*clahe_info->x+tile.x)), /* Q11 */
tiles+(number_bins*(offset.y*clahe_info->x+offset.x)), /* Q21 */
&tile,lut,p);
p+=tile.width;
}
p+=clahe_info->width*(tile.height-1);
}
lut=(unsigned short *) RelinquishMagickMemory(lut);
tile_cache=RelinquishVirtualMemory(tile_cache);
return(MagickTrue);
}
MagickExport MagickBooleanType CLAHEImage(Image *image,const size_t width,
const size_t height,const size_t number_bins,const double clip_limit,
ExceptionInfo *exception)
{
#define CLAHEImageTag "CLAHE/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
MagickBooleanType
status;
MagickOffsetType
progress;
MemoryInfo
*pixel_cache;
RangeInfo
range_info;
RectangleInfo
clahe_info,
tile_info;
size_t
n;
ssize_t
y;
unsigned short
*pixels;
/*
Configure CLAHE parameters.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
range_info.min=0;
range_info.max=NumberCLAHEGrays-1;
tile_info.width=width;
if (tile_info.width == 0)
tile_info.width=image->columns >> 3;
tile_info.height=height;
if (tile_info.height == 0)
tile_info.height=image->rows >> 3;
tile_info.x=0;
if ((image->columns % tile_info.width) != 0)
tile_info.x=(ssize_t) tile_info.width-(image->columns % tile_info.width);
tile_info.y=0;
if ((image->rows % tile_info.height) != 0)
tile_info.y=(ssize_t) tile_info.height-(image->rows % tile_info.height);
clahe_info.width=image->columns+tile_info.x;
clahe_info.height=image->rows+tile_info.y;
clahe_info.x=(ssize_t) clahe_info.width/tile_info.width;
clahe_info.y=(ssize_t) clahe_info.height/tile_info.height;
pixel_cache=AcquireVirtualMemory(clahe_info.width,clahe_info.height*
sizeof(*pixels));
if (pixel_cache == (MemoryInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
pixels=(unsigned short *) GetVirtualMemoryBlob(pixel_cache);
colorspace=image->colorspace;
if (TransformImageColorspace(image,LabColorspace,exception) == MagickFalse)
{
pixel_cache=RelinquishVirtualMemory(pixel_cache);
return(MagickFalse);
}
/*
Initialize CLAHE pixels.
*/
image_view=AcquireVirtualCacheView(image,exception);
progress=0;
status=MagickTrue;
n=0;
for (y=0; y < (ssize_t) clahe_info.height; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(tile_info.x >> 1),y-
(tile_info.y >> 1),clahe_info.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) clahe_info.width; x++)
{
pixels[n++]=ScaleQuantumToShort(p[0]);
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status=CLAHE(&clahe_info,&tile_info,&range_info,number_bins == 0 ?
(size_t) 128 : MagickMin(number_bins,256),clip_limit,pixels);
if (status == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
/*
Push CLAHE pixels to CLAHE image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
n=clahe_info.width*(tile_info.y >> 1);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
n+=tile_info.x >> 1;
for (x=0; x < (ssize_t) image->columns; x++)
{
q[0]=ScaleShortToQuantum(pixels[n++]);
q+=GetPixelChannels(image);
}
n+=(clahe_info.width-image->columns-(tile_info.x >> 1));
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
pixel_cache=RelinquishVirtualMemory(pixel_cache);
if (TransformImageColorspace(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*clut_map;
ssize_t
i;
ssize_t adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map));
if (clut_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
clut_view=AcquireVirtualCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetPixelInfo(clut_image,clut_map+i);
status=InterpolatePixelInfo(clut_image,clut_view,method,
(double) i*(clut_image->columns-adjust)/MaxMap,(double) i*
(clut_image->rows-adjust)/MaxMap,clut_map+i,exception);
if (status == MagickFalse)
break;
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelTrait
traits;
GetPixelInfoPixel(image,q,&pixel);
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.red))].red;
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.green))].green;
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.blue))].blue;
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.black))].black;
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.alpha))].alpha;
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map);
if ((clut_image->alpha_trait != UndefinedPixelTrait) &&
((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection,ExceptionInfo *exception)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MagickPathExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*cdl_map;
ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
(void) GetNextToken(p,&p,MagickPathExtent,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power))));
cdl_map[i].green=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power))));
cdl_map[i].blue=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power))));
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Apply transfer function to colormap.
*/
double
luma;
luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+
0.07217f*image->colormap[i].blue;
image->colormap[i].red=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma;
image->colormap[i].green=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma;
image->colormap[i].blue=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma;
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+
0.07217f*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q);
SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q);
SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Contrast(const int sign,double *red,double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (double *) NULL);
assert(green != (double *) NULL);
assert(blue != (double *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen,ExceptionInfo *exception)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse)
return(MagickTrue);
#endif
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
Contrast(sign,&red,&green,&blue);
image->colormap[i].red=(MagickRealType) red;
image->colormap[i].green=(MagickRealType) green;
image->colormap[i].blue=(MagickRealType) blue;
}
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
red;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by 'stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% 'enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
*black,
*histogram,
*stretch_map,
*white;
ImageType
type;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
type=IdentifyImageType(image,exception);
if (IsGrayImageType(type) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace,exception);
black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black));
white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*stretch_map));
if ((black == (double *) NULL) || (white == (double *) NULL) ||
(histogram == (double *) NULL) || (stretch_map == (double *) NULL))
{
if (stretch_map != (double *) NULL)
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (white != (double *) NULL)
white=(double *) RelinquishMagickMemory(white);
if (black != (double *) NULL)
black=(double *) RelinquishMagickMemory(black);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
pixel=GetPixelIntensity(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
if (image->channel_mask != DefaultChannels)
pixel=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(pixel))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black/white levels.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
ssize_t
j;
black[i]=0.0;
white[i]=MaxRange(QuantumRange);
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > black_point)
break;
}
black[i]=(double) j;
intensity=0.0;
for (j=(ssize_t) MaxMap; j != 0; j--)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white[i]=(double) j;
}
histogram=(double *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*stretch_map));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
j;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
gamma;
gamma=PerceptibleReciprocal(white[i]-black[i]);
if (j < (ssize_t) black[i])
stretch_map[GetPixelChannels(image)*j+i]=0.0;
else
if (j > (ssize_t) white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange;
else
if (black[i] != white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum(
(double) (MaxMap*gamma*(j-black[i])));
}
}
if (image->storage_class == PseudoClass)
{
ssize_t
j;
/*
Stretch-contrast colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,RedPixelChannel);
image->colormap[j].red=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,GreenPixelChannel);
image->colormap[j].green=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,BluePixelChannel);
image->colormap[j].blue=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,AlphaPixelChannel);
image->colormap[j].alpha=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i];
}
}
}
/*
Stretch-contrast image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (black[j] == white[j])
continue;
q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastStretchImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
white=(double *) RelinquishMagickMemory(white);
black=(double *) RelinquishMagickMemory(black);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define EnhanceImageTag "Enhance/Image"
#define EnhancePixel(weight) \
mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \
distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \
distance_squared=(4.0+mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \
distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \
distance_squared+=(7.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \
distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \
distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \
distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \
distance_squared+=(5.0-mean)*distance*distance; \
if (distance_squared < 0.069) \
{ \
aggregate.red+=(weight)*GetPixelRed(image,r); \
aggregate.green+=(weight)*GetPixelGreen(image,r); \
aggregate.blue+=(weight)*GetPixelBlue(image,r); \
aggregate.black+=(weight)*GetPixelBlack(image,r); \
aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \
total_weight+=(weight); \
} \
r+=GetPixelChannels(image);
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
enhance_image=CloneImage(image,0,0,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse)
{
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2);
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
distance,
distance_squared,
mean,
total_weight;
PixelInfo
aggregate;
const Quantum
*magick_restrict r;
GetPixelInfo(image,&aggregate);
total_weight=0.0;
GetPixelInfoPixel(image,p+center,&pixel);
r=p;
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
r=p+GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+2*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0);
EnhancePixel(40.0); EnhancePixel(10.0);
r=p+3*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+4*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
if (total_weight > MagickEpsilon)
{
pixel.red=((aggregate.red+total_weight/2.0)/total_weight);
pixel.green=((aggregate.green+total_weight/2.0)/total_weight);
pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight);
pixel.black=((aggregate.black+total_weight/2.0)/total_weight);
pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight);
}
SetPixelViaPixelInfo(enhance_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(enhance_image);
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image,
ExceptionInfo *exception)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
double
black[2*CompositePixelChannel+1],
*equalize_map,
*histogram,
*map,
white[2*CompositePixelChannel+1];
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateEqualizeImage(image,exception) != MagickFalse)
return(MagickTrue);
#endif
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*equalize_map));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map));
if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) ||
(map == (double *) NULL))
{
if (map != (double *) NULL)
map=(double *) RelinquishMagickMemory(map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (equalize_map != (double *) NULL)
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
intensity=(double) p[i];
if ((image->channel_mask & SyncChannels) != 0)
intensity=GetPixelIntensity(image,p);
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(intensity))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
ssize_t
j;
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
map[GetPixelChannels(image)*j+i]=intensity;
}
}
(void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*equalize_map));
(void) memset(black,0,sizeof(*black));
(void) memset(white,0,sizeof(*white));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
j;
black[i]=map[i];
white[i]=map[GetPixelChannels(image)*MaxMap+i];
if (black[i] != white[i])
for (j=0; j <= (ssize_t) MaxMap; j++)
equalize_map[GetPixelChannels(image)*j+i]=(double)
ScaleMapToQuantum((double) ((MaxMap*(map[
GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i])));
}
histogram=(double *) RelinquishMagickMemory(histogram);
map=(double *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
ssize_t
j;
/*
Equalize colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
RedPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].red=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+
channel];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
GreenPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].green=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+
channel];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
BluePixelChannel);
if (black[channel] != white[channel])
image->colormap[j].blue=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+
channel];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
AlphaPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].alpha=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+
channel];
}
}
}
/*
Equalize image.
*/
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j]))
continue;
q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const double gamma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const double gamma,
ExceptionInfo *exception)
{
#define GammaImageTag "Gamma/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/
MaxMap,PerceptibleReciprocal(gamma))));
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Gamma-correct colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].red))];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].green))];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].blue))];
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].alpha))];
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType)
q[j]))];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GammaImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the image to grayscale.
%
% The format of the GrayscaleImage method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method ,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the pixel intensity method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method,ExceptionInfo *exception)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
#endif
/*
Grayscale image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
red,
intensity;
red=(MagickRealType) GetPixelRed(image,q);
green=(MagickRealType) GetPixelGreen(image,q);
blue=(MagickRealType) GetPixelBlue(image,q);
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/3.0);
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(image,ClampToQuantum(intensity),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image,ExceptionInfo *exception)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
double
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetPixelInfo(hald_image,&zero);
hald_view=AcquireVirtualCacheView(hald_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
area,
offset;
HaldInfo
point;
PixelInfo
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q);
offset=point.x+level*floor(point.y)+cube_size*floor(point.z);
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
pixel1=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
pixel2=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel3=zero;
area=point.y;
if (hald_image->interpolate == NearestInterpolatePixel)
area=(point.y < 0.5) ? 0.0 : 1.0;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
area,&pixel3);
offset+=cube_size;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel4=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
area,&pixel4);
pixel=zero;
area=point.z;
if (hald_image->interpolate == NearestInterpolatePixel)
area=(point.z < 0.5)? 0.0 : 1.0;
CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha,
area,&pixel);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,ClampToQuantum(pixel.red),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,ClampToQuantum(pixel.black),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImage() below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const double pixel)
{
double
level_pixel,
scale;
scale=PerceptibleReciprocal(white_point-black_point);
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),
PerceptibleReciprocal(gamma));
return(level_pixel);
}
MagickExport MagickBooleanType LevelImage(Image *image,const double black_point,
const double white_point,const double gamma,ExceptionInfo *exception)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].red));
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].green));
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].blue));
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].alpha));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(double) q[j]));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImage() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImage() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used to de-contrast a greyscale image to the exact levels
% specified. Or by using specific levels for each channel of an image you
% can convert a gray-scale image to any linear color gradient, according to
% those levels.
%
% The format of the LevelizeImage method is:
%
% MagickBooleanType LevelizeImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma,
ExceptionInfo *exception)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) LevelizeValue(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) LevelizeValue(
image->colormap[i].alpha);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=LevelizeValue(q[j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColors() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriately. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,
% const PixelInfo *black_color,const PixelInfo *white_color,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelImageColors(Image *image,
const PixelInfo *black_color,const PixelInfo *white_color,
const MagickBooleanType invert,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) == MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
else
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelizeImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelizeImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelizeImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define LinearStretchImageTag "LinearStretch/Image"
CacheView
*image_view;
double
*histogram,
intensity;
MagickBooleanType
status;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(double *) RelinquishMagickMemory(histogram);
status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black),
(double) ScaleMapToQuantum((MagickRealType) white),1.0,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and hue.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,double *red,
double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,double *red,
double *green,double *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,double *red,
double *green,double *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,double *red,
double *green,double *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,double *red,
double *green,double *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,
const IlluminantType illuminant,double *red,double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,illuminant,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHabToRGB(luma,chroma,hue,illuminant,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,
const IlluminantType illuminant,double *red,double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,illuminant,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHuvToRGB(luma,chroma,hue,illuminant,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate,
ExceptionInfo *exception)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace = UndefinedColorspace;
const char
*artifact;
double
percent_brightness = 100.0,
percent_hue = 100.0,
percent_saturation = 100.0;
GeometryInfo
geometry_info;
IlluminantType
illuminant = D65Illuminant;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
flags=ParseGeometry(modulate,&geometry_info);
if ((flags & RhoValue) != 0)
percent_brightness=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_saturation=geometry_info.sigma;
if ((flags & XiValue) != 0)
percent_hue=geometry_info.xi;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
artifact=GetImageArtifact(image,"color:illuminant");
if (artifact != (const char *) NULL)
{
illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
{
illuminant=UndefinedIlluminant;
colorspace=UndefinedColorspace;
}
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
/*
Modulate image colormap.
*/
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
illuminant,&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
illuminant,&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateModulateImage(image,percent_brightness,percent_hue,
percent_saturation,colorspace,exception) != MagickFalse)
return(MagickTrue);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
illuminant,&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
illuminant,&red,&green,&blue);
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImage method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale,ExceptionInfo *exception)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Negate colormap.
*/
if (grayscale != MagickFalse)
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
if( grayscale != MagickFalse )
{
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
if (IsPixelGray(image,q) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image,
ExceptionInfo *exception)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImage(image,black_point,white_point,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
ImageMagick 6 has a version of this function which uses LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const double contrast,const double midpoint,
ExceptionInfo *exception)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
#define ScaledSig(x) ( ClampToQuantum(QuantumRange* \
ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
#define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \
InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Convenience macros.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Side effect: may clamp values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
{
ssize_t
i;
if( sharpen != MagickFalse )
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) ScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) ScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) ScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) ScaledSig(
image->colormap[i].alpha);
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) InverseScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) InverseScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) InverseScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) InverseScaledSig(
image->colormap[i].alpha);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if( sharpen != MagickFalse )
q[i]=ScaledSig(q[i]);
else
q[i]=InverseScaledSig(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e B a l a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteBalanceImage() applies white balancing to an image according to a
% grayworld assumption in the LAB colorspace.
%
% The format of the WhiteBalanceImage method is:
%
% MagickBooleanType WhiteBalanceImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteBalanceImage(Image *image,
ExceptionInfo *exception)
{
#define WhiteBalanceImageTag "WhiteBalance/Image"
CacheView
*image_view;
const char
*artifact;
double
a_mean,
b_mean;
MagickOffsetType
progress;
MagickStatusType
status;
ssize_t
y;
/*
White balance image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=TransformImageColorspace(image,LabColorspace,exception);
a_mean=0.0;
b_mean=0.0;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
a_mean+=QuantumScale*GetPixela(image,p)-0.5;
b_mean+=QuantumScale*GetPixelb(image,p)-0.5;
p+=GetPixelChannels(image);
}
}
a_mean/=((double) image->columns*image->rows);
b_mean/=((double) image->columns*image->rows);
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
a,
b;
/*
Scale the chroma distance shifted according to amount of luminance.
*/
a=(double) GetPixela(image,q)-1.1*GetPixelL(image,q)*a_mean;
b=(double) GetPixelb(image,q)-1.1*GetPixelL(image,q)*b_mean;
SetPixela(image,ClampToQuantum(a),q);
SetPixelb(image,ClampToQuantum(b),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,WhiteBalanceImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
artifact=GetImageArtifact(image,"white-balance:vibrance");
if (artifact != (const char *) NULL)
{
ChannelType
channel_mask;
double
black_point = 0.0;
GeometryInfo
geometry_info;
MagickStatusType
flags;
/*
Level the a & b channels.
*/
flags=ParseGeometry(artifact,&geometry_info);
if ((flags & RhoValue) != 0)
black_point=geometry_info.rho;
if ((flags & PercentValue) != 0)
black_point*=(double) (QuantumRange/100.0);
channel_mask=SetImageChannelMask(image,(ChannelType) (aChannel |
bChannel));
status&=LevelImage(image,black_point,(double) QuantumRange-black_point,
1.0,exception);
(void) SetImageChannelMask(image,channel_mask);
}
status&=TransformImageColorspace(image,sRGBColorspace,exception);
return(status != 0 ? MagickTrue : MagickFalse);
}
|
GB_unop__expm1_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__expm1_fp32_fp32
// op(A') function: GB_unop_tran__expm1_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = expm1f (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = expm1f (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = expm1f (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXPM1 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__expm1_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = expm1f (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__expm1_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(8*t1+Ny+13,16)),floord(16*t2+Ny+12,16)),floord(16*t1-16*t2+Nz+Ny+11,16));t3++) {
for (t4=max(max(max(0,ceild(t1-31,32)),ceild(16*t2-Nz-252,256)),ceild(16*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(8*t1+Nx+13,256)),floord(16*t2+Nx+12,256)),floord(16*t3+Nx+12,256)),floord(16*t1-16*t2+Nz+Nx+11,256));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),16*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),16*t3+14),256*t4+254),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
mm.c | #include<stdio.h>
#include<stdlib.h>
#include<malloc.h>
#include<omp.h>
#define row 2500
#define col 2500
int main() {
int i,j,k;
double wtime;
int **a=NULL,**b=NULL,**c=NULL;
printf("1.Allocate memory------\n");
/*先申请每一行指针的内存*/
a = (int **)malloc(sizeof(int *)*row);
b = (int **)malloc(sizeof(int *)*row);
c = (int **)malloc(sizeof(int *)*row);
/*申请各行内存*/
for(i=0; i<row; i++) {
*(a+i)=(int *)malloc(sizeof(int)*col);
*(b+i)=(int *)malloc(sizeof(int)*col);
*(c+i)=(int *)malloc(sizeof(int)*col);
}
printf("2.Initialize array------\n");
for(i=0; i<row; i++) {
for(j=0; j<col; j++) {
a[i][j]=rand()%600 + 1;
b[i][j]=rand()%2000 + 1;
c[i][j]=0;
}
}
printf("3.Start matrix multiplication------\n");
wtime = omp_get_wtime ();
#pragma omp parallel for schedule(guided,100) private(i,j,k) num_threads(3)
for(i=0; i<row; i++) {
for(j=0; j<col; j++) {
for(k=0; k<row; k++) {
c[i][j]+=a[i][k]*b[k][j];
}
}
}
wtime = omp_get_wtime () - wtime;
printf("4.Output result------\n");
for(i=0; i<row; i++) {
for(j=0; j<col; j++) {
if(i%200==0 && j%200==0)printf("%d\n",c[i][j]);
}
}
printf ( "\n" );
printf ( " Time = %g seconds.\n", wtime );
/*
*释放内存
*/
for(i=0; i<col; i++) {
free(*(a+i));
free(*(b+i));
free(*(c+i));
}
free(a);
free(b);
free(c);
return 0;
}
|
lf_flux.c | #include "nconv2d.h"
void lf_flux(double f_M, double f_P,
double nx, double ny,
double *num_flux)
{
double E_M, G_M, E_P, G_P;
nodal_flux(f_M, &E_M, &G_M);
nodal_flux(f_P, &E_P, &G_P);
double c = max(fabs(f_M), fabs(f_P));
*num_flux = 0.5 * ((E_M + E_P) * nx + (G_M + G_P) * ny) +
0.5 * c * (f_M - f_P);
return;
}
/* @brief calculate the surface flux deviation for strong form.
*
* Usages:
* [dflux] = lf_flux(h, h_ext, nx, ny, eidM, eidP, eidtype);
*/
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[])
{
/* check input & output */
if (nrhs != 7)
mexErrMsgTxt("Wrong number of input arguments.");
if (nlhs != 1)
mexErrMsgTxt("Wrong number of output arguments.");
/* get inputs */
double *h = mxGetPr(prhs[0]);
double *h_ext = mxGetPr(prhs[1]);
double *nx = mxGetPr(prhs[2]);
double *ny = mxGetPr(prhs[3]);
double *eidM = mxGetPr(prhs[4]);
double *eidP = mxGetPr(prhs[5]);
signed char *eidtype = (signed char *)mxGetData(prhs[6]); // int8
/* get dimensions */
size_t Nfp = mxGetM(prhs[4]);
size_t K = mxGetN(prhs[4]);
size_t Np = mxGetM(prhs[0]);
/* allocate output array */
plhs[0] = mxCreateDoubleMatrix((mwSize)Nfp, (mwSize)K, mxREAL);
double *dflux = mxGetPr(plhs[0]);
#ifdef _OPENMP /* set number of threads */
#pragma omp parallel for num_threads(DG_THREADS)
#endif
for (int i = 0; i < K; i++)
{
int ind = i * Nfp;
for (int j = 0; j < Nfp; j++)
{
int iM = (int)eidM[ind] - 1; // change index to C type
int iP = (int)eidP[ind] - 1;
double f_M = h[iM]; // local and adjacent node values
double varP = h[iP];
// outward normal vector of local element
double nx_ = nx[ind];
double ny_ = ny[ind];
double f_ext; // external values on local nodes
f_ext = h_ext[iM];
bc_type type = (bc_type)eidtype[ind];
// get adjacent values, considering various boudnary conditions
double f_P;
int info = bound_cond(f_M, varP, f_ext, nx_, ny_, type, &f_P);
// if(info) mexErrMsgTxt("Unknown boundary conditions.");
double numflux, E, G;
lf_flux(f_M, f_P, nx_, ny_, &numflux);
nodal_flux(f_M, &E, &G);
dflux[ind] = -numflux + nx_ * E + ny_ * G;
ind++;
}
}
return;
} |
vtp_fmt_plug.c | /*
* Cracker for MD5 based authentication in VTP.
*
* This software is Copyright (c) 2014 Alexey Lapitsky <lex at
* realisticgroup.com> and Dhiru Kholia <dhiru at openwall.com>, and it is
* hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without#
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_vtp;
#elif FMT_REGISTERS_H
john_register_one(&fmt_vtp);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
// Tuned on core i7 4-core HT
// 64 - 19k
// 128 - 27k
// 256 - 30.5k ** chosen **
// 512 - 30.5k
// 1k - 28.5k
// 2k - 28.5k (times wobble)
#ifndef OMP_SCALE
#ifdef __MIC__
#define OMP_SCALE 4096
#else
#define OMP_SCALE 256
#endif // __MIC__
#endif // OMP_SCALE
#endif // _OPENMP
#include "arch.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "vtp"
#define FORMAT_NAME "\"MD5 based authentication\" VTP"
#define FORMAT_TAG "$vtp$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 55 // keep under 1 MD5 block AND this is now tied into logic in vtp_secret_derive()
#define BINARY_SIZE 16
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define HEXCHARS "0123456789abcdef"
static struct fmt_tests tests[] = {
{"$vtp$2$196$14000107000105dc000186a164656661756c740014000105000505dc000186a56368656e6100000010000103000605dc000186a6666666001800020c03ea05dc00018a8a666464692d64656661756c743000030d03eb117800018a8b74726372662d64656661756c7400000001010ccc040103ed0701000208010007090100072000040f03ec05dc00018a8c666464696e65742d64656661756c7400030100012400050d03ed117800018a8d74726272662d64656661756c740000000201000f03010002$80$0201010c646f6d61696e313233343536000000000000000000000000000000000000000000000015000000003134313030393134333631376010913064949d6f47a53b2ad68ef06b0000000106010002$6010913064949d6f47a53b2ad68ef06b", "123"},
{"$vtp$1$184$14000107000105dc000186a164656661756c740014000105000505dc000186a568656c6c6f0000002000020c03ea05dc00018a8a666464692d64656661756c7401010000040100002800031203eb05dc00018a8b746f6b656e2d72696e672d64656661756c74000001010000040100002400040f03ec05dc00018a8c666464696e65742d64656661756c740002010000030100012400050d03ed05dc00018a8d74726e65742d64656661756c740000000201000003010002$77$0101010c646f6d61696e313233343536000000000000000000000000000000000000000000000010000000003134313030393134313432372212dd93025abc600281d74ddda8a21c0101000200$2212dd93025abc600281d74ddda8a21c", "123"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static unsigned char (*secret)[16];
static int *saved_len, dirty;
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
/* VTP summary advertisement packet, partially based on original Yersinia code */
typedef struct {
unsigned char version;
unsigned char code;
unsigned char followers;
unsigned char domain_name_length;
unsigned char domain_name[32]; // zero padded
uint32_t revision; // 4 bytes
uint32_t updater; // 4 bytes
unsigned char update_timestamp[12]; // zero'ed during MAC calculations
unsigned char md5_checksum[16];
} vtp_summary_packet;
static struct custom_salt {
int length;
vtp_summary_packet vsp;
int vlans_data_length;
unsigned char vlans_data[8192];
int salt_length;
unsigned char salt[2048];
int trailer_length;
int version;
unsigned char trailer_data[64];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_num_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
saved_len = mem_calloc(sizeof(*saved_len), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
secret = mem_calloc(sizeof(*secret), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(secret);
MEM_FREE(crypt_out);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *ptrkeep;
int res;
p = ciphertext;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return 0;
ptrkeep = strdup(ciphertext);
p = &ptrkeep[TAG_LENGTH];
if ((p = strtokm(p, "$")) == NULL) /* version */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res != 1 && res != 2) // VTP version 3 support is pending
goto err; // FIXME: fprintf(stderr, ... for version 3?
if ((p = strtokm(NULL, "$")) == NULL) /* vlans len */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* vlans data */
goto err;
if (strlen(p) / 2 != res)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt len */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if (strlen(p) / 2 != res)
goto err;
if (!ishexlc(p))
goto err;
if (((atoi16[ARCH_INDEX(p[6])]<<4)|atoi16[ARCH_INDEX(p[7])]) >
sizeof(cur_salt->vsp.domain_name))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* hash */
goto err;
if (strlen(p) != BINARY_SIZE * 2)
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(ptrkeep);
return 1;
err:
MEM_FREE(ptrkeep);
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
int i;
char *p, *q;
memset(&cs, 0, SALT_SIZE);
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
p = ciphertext;
cs.version = atoi(p);
q = p + 2;
cs.vlans_data_length = atoi(q);
q = strchr(q, '$') + 1; // at vlans_data
for (i = 0; i < cs.vlans_data_length; i++)
cs.vlans_data[i] = (atoi16[ARCH_INDEX(q[2 * i])] << 4) |
atoi16[ARCH_INDEX(q[2 * i + 1])];
q = strchr(q, '$') + 1; // at salt_length
cs.salt_length = atoi(q);
q = strchr(q, '$') + 1; // at salt
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(q[2 * i])] << 4) |
atoi16[ARCH_INDEX(q[2 * i + 1])];
if (cs.salt_length > 72) { /* we have trailing bytes */
cs.trailer_length = cs.salt_length - 72;
memcpy(cs.trailer_data, cs.salt + 72, cs.trailer_length);
}
cs.vsp.version = cs.salt[0]; // based on Wireshark
cs.vsp.code = cs.salt[1];
// Zero out various fields for MAC calculation
cs.vsp.followers = 0;
memset(cs.vsp.update_timestamp, 0, 12);
memset(cs.vsp.md5_checksum, 0, 16);
// fill rest of the data
cs.vsp.domain_name_length = cs.salt[3];
if (cs.vsp.domain_name_length > sizeof(cs.vsp.domain_name))
cs.vsp.domain_name_length = sizeof(cs.vsp.domain_name);
memcpy(cs.vsp.domain_name, cs.salt + 4, cs.vsp.domain_name_length);
memcpy((unsigned char*)&cs.vsp.revision, cs.salt + 36, 4);
memcpy((unsigned char*)&cs.vsp.updater, cs.salt + 36 + 4, 4);
return (void*)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void vtp_secret_derive(char *password, int length, unsigned char *output)
{
#if 0
/* old code kept as a easier to read view of what is being done */
MD5_CTX ctx;
unsigned char *cp, buf[64];
unsigned int password_idx = 0;
int i, j;
if (length == 0) {
memset(output, 0, 16);
return;
}
MD5_Init(&ctx);
for(i = 0; i < 1563; i++) { /* roughly 1 MB */
cp = buf;
for (j = 0; j < 64; j++) /* treat password as a cyclic generator */
*cp++ = password[password_idx++ % length];
MD5_Update(&ctx, buf, 64);
}
MD5_Final(output, &ctx);
#else
// Speed went from 8k to 28k. I think it should be VERY easy to add SIMD code here.
// That would gain us another 4x or so speed. TODO for someone to play with ;)
MD5_CTX ctx;
unsigned char *cp, buf[55][64];
int bufs_used = 0, local_cnt = 0;
int i, j;
if (length == 0) {
memset(output, 0, 16);
return;
}
cp = buf[bufs_used];
/* treat password as a cyclic generator */
for (;;) {
/* note this WILL exit. Modular math assures will do so in 'length' buffers or */
/* less. with PLAINTEXTLEN set to 55 bytes, we only need 55 buffers to assure a cycle */
if (local_cnt + length <= 64) {
memcpy(&cp[local_cnt], password, length);
local_cnt += length;
if (local_cnt == 64) {
/* we ended a word at end of buffer, so we have the cycle */
bufs_used++;
break;
}
} else {
int spill = local_cnt+length-64;
memcpy(&cp[local_cnt], password, length-spill);
cp = buf[++bufs_used];
memcpy(cp, &password[length-spill], spill);
local_cnt = spill;
}
}
MD5_Init(&ctx);
for(i = 0, j=0; i < 1563; ++i) { /* roughly 1 MB */
MD5_Update(&ctx, buf[j++], 64);
if (j == bufs_used)
j = 0;
}
MD5_Final(output, &ctx);
#endif
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
MD5_CTX ctx;
// space for (secret + SUMMARY ADVERTISEMENT + VLANS DATA + secret)
// derive and append "secret", but do it only the FIRST time for a password (not for extra salts).
if (dirty)
vtp_secret_derive(saved_key[index], saved_len[index], secret[index]);
MD5_Init(&ctx);
MD5_Update(&ctx, secret[index], 16);
// append vtp_summary_packet
MD5_Update(&ctx, &cur_salt->vsp, sizeof(vtp_summary_packet));
// add trailing bytes (for VTP version >= 2)
if (cur_salt->version != 1)
MD5_Update(&ctx, cur_salt->trailer_data, cur_salt->trailer_length);
// append vlans_data
MD5_Update(&ctx, cur_salt->vlans_data, cur_salt->vlans_data_length);
// append "secret" again
MD5_Update(&ctx, secret[index], 16);
MD5_Final((unsigned char*)crypt_out[index], &ctx);
}
dirty = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void vtp_set_key(char *key, int index)
{
saved_len[index] = strlen(key);
strnzcpy(saved_key[index], key, sizeof(saved_key[0]));
dirty = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_vtp = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
vtp_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
GB_unop__identity_int64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_fp64)
// op(A') function: GB (_unop_tran__identity_int64_fp64)
// C type: int64_t
// A type: double
// cast: int64_t cij = GB_cast_to_int64_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = GB_cast_to_int64_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = GB_cast_to_int64_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_fp64)
(
int64_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
int64_t z = GB_cast_to_int64_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
int64_t z = GB_cast_to_int64_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
encoding.h |
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#include "tracing.h"
#include "colorManagement.h"
struct Camera{
glm::vec3 position, rotationAxis;
float rotation, renderFov;
glm::vec3 background;
};
struct Options{
std::string renderName, encodeType;
u16 renderWidth, renderHeight;
u8 renderChannels, renderSamples;
Camera camMan;
bool palette = false;
Palette pal;
Options(std::string renderN, std::string encodeT,u16 renderW, u16 renderH, u8 renderC, u8 renderS): renderName(renderN), encodeType(encodeT),renderWidth(renderW),
renderHeight(renderH), renderChannels(renderC), renderSamples(renderS){}
};
u8 convertVec(float d){
return fmax(0.0f, fmin(255.0f, d * 255.0f));
}
glm::vec3 calculateWin(float fov, float x, float y, u16 w, u16 h){
float i = (2*(x + 0.5f)/(float)w - 1)*tan(fov/2.0f)*w/(float)h;
float j = -(2*(y + 0.5f)/(float)h - 1)*tan(fov/2.0f);
return glm::vec3(i, j, -1);
}
void PNGEncode(std::vector<Object*> objects, std::vector<Light*> lights, Options opts){
u8* render = new u8[opts.renderWidth * opts.renderHeight * opts.renderChannels];
glm::mat3 rotMat = glm::rotate(glm::radians(opts.camMan.rotation), opts.camMan.rotationAxis);
auto timeThen = std::chrono::system_clock::now(), timeNow = std::chrono::system_clock::now();
float elapsedTime = 0.0f;
#pragma omp parallel for
for(int x = 0; x < opts.renderWidth; x++){
for(int y = 0; y < opts.renderHeight; y++){
timeNow = std::chrono::system_clock::now();
std::chrono::duration<float> deltaChrono = timeNow - timeThen;
timeThen = timeNow;
glm::vec3 finalResult;
for(int sample = 0; sample < opts.renderSamples; sample++){
float sampleX = (x + 0.5f + ((sample < 2) ? -0.25f : 0.25f));
float sampleY = (y + 0.5f + ((sample >= 2) ? -0.25f : 0.25f));
glm::vec3 dir = rotMat * glm::normalize(calculateWin(opts.camMan.renderFov, sampleX, sampleY, opts.renderWidth, opts.renderHeight));
Ray currentRay(opts.camMan.position, dir);
finalResult += cast_ray(currentRay, objects, lights, opts.camMan.background);
}
finalResult /= opts.renderSamples;
render[opts.renderChannels *(x + y * opts.renderWidth)] = convertVec(finalResult.x);
render[opts.renderChannels *(x + y * opts.renderWidth)+ 1] = convertVec(finalResult.y);
render[opts.renderChannels *(x + y * opts.renderWidth) + 2] = convertVec(finalResult.z);
elapsedTime += deltaChrono.count();
}
}
if(!opts.palette){
stbi_write_png(opts.renderName.c_str(), opts.renderWidth, opts.renderHeight, opts.renderChannels, render, 0);
std::cout << "Time rendered: " << elapsedTime << std::endl;
}
else{
writeRenderPalettized(opts.pal, render, opts.renderWidth, opts.renderHeight, opts.renderChannels);
std::cout << "Oh. It was palettized too. Enjoy!" << std::endl;
std::cout << "Time rendered: " << elapsedTime << std::endl;
}
delete[] render;
} |
GB_dense_subassign_22_template.c | //------------------------------------------------------------------------------
// GB_dense_subassign_22_template: C += b where C is dense and b is a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
{
//--------------------------------------------------------------------------
// get C
//--------------------------------------------------------------------------
GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ;
const int64_t cnz = GB_NNZ (C) ;
//--------------------------------------------------------------------------
// C += b where C is dense and b is a scalar
//--------------------------------------------------------------------------
int64_t pC ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cnz ; pC++)
{
GB_BINOP (GB_CX (pC), GB_CX (pC), bwork, 0, 0) ;
}
}
|
yescrypt-simd.h | /*-
* Copyright 2009 Colin Percival
* Copyright 2012-2014 Alexander Peslyak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*/
/*
* On 64-bit, enabling SSE4.1 helps our pwxform code indirectly, via avoiding
* gcc bug 54349 (fixed for gcc 4.9+). On 32-bit, it's of direct help. AVX
* and XOP are of further help either way.
*/
#ifndef __SSE4_1__
// #warning "Consider enabling SSE4.1, AVX, or XOP in the C compiler for significantly better performance"
#endif
#include <emmintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "sha256.h"
#include "sysendian.h"
#include "yescrypt.h"
#include "yescrypt-platform.h"
#if __STDC_VERSION__ >= 199901L
/* have restrict */
#elif defined(__GNUC__)
#define restrict __restrict
#else
#define restrict
#endif
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
#define PREFETCH_OUT(x, hint) /* disabled */
#ifdef __XOP__
#define ARX(out, in1, in2, s) \
out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s));
#else
#define ARX(out, in1, in2, s) \
{ \
__m128i T = _mm_add_epi32(in1, in2); \
out = _mm_xor_si128(out, _mm_slli_epi32(T, s)); \
out = _mm_xor_si128(out, _mm_srli_epi32(T, 32-s)); \
}
#endif
#define SALSA20_2ROUNDS \
/* Operate on "columns" */ \
ARX(X1, X0, X3, 7) \
ARX(X2, X1, X0, 9) \
ARX(X3, X2, X1, 13) \
ARX(X0, X3, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x93); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x39); \
\
/* Operate on "rows" */ \
ARX(X3, X0, X1, 7) \
ARX(X2, X3, X0, 9) \
ARX(X1, X2, X3, 13) \
ARX(X0, X1, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x39); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x93);
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3).
*/
#define SALSA20_8_BASE(maybe_decl, out) \
{ \
maybe_decl Y0 = X0; \
maybe_decl Y1 = X1; \
maybe_decl Y2 = X2; \
maybe_decl Y3 = X3; \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
(out)[0] = X0 = _mm_add_epi32(X0, Y0); \
(out)[1] = X1 = _mm_add_epi32(X1, Y1); \
(out)[2] = X2 = _mm_add_epi32(X2, Y2); \
(out)[3] = X3 = _mm_add_epi32(X3, Y3); \
}
#define SALSA20_8(out) \
SALSA20_8_BASE(__m128i, out)
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3) ^ (Z0 ... Z3).
*/
#define SALSA20_8_XOR_ANY(maybe_decl, Z0, Z1, Z2, Z3, out) \
X0 = _mm_xor_si128(X0, Z0); \
X1 = _mm_xor_si128(X1, Z1); \
X2 = _mm_xor_si128(X2, Z2); \
X3 = _mm_xor_si128(X3, Z3); \
SALSA20_8_BASE(maybe_decl, out)
#define SALSA20_8_XOR_MEM(in, out) \
SALSA20_8_XOR_ANY(__m128i, (in)[0], (in)[1], (in)[2], (in)[3], out)
#define SALSA20_8_XOR_REG(out) \
SALSA20_8_XOR_ANY(/* empty */, Y0, Y1, Y2, Y3, out)
typedef union {
uint32_t w[16];
__m128i q[4];
} salsa20_blk_t;
/**
* blockmix_salsa8(Bin, Bout, r):
* Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
* bytes in length; the output Bout must also be the same size.
*/
static inline void
blockmix_salsa8(const salsa20_blk_t *restrict Bin,
salsa20_blk_t *restrict Bout, size_t r)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
PREFETCH(&Bin[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH(&Bin[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
X0 = Bin[r * 2 + 1].q[0];
X1 = Bin[r * 2 + 1].q[1];
X2 = Bin[r * 2 + 1].q[2];
X3 = Bin[r * 2 + 1].q[3];
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[r * 2 + 1].q, Bout[r * 2 + 1].q)
}
/*
* (V)PSRLDQ and (V)PSHUFD have higher throughput than (V)PSRLQ on some CPUs
* starting with Sandy Bridge. Additionally, PSHUFD uses separate source and
* destination registers, whereas the shifts would require an extra move
* instruction for our code when building without AVX. Unfortunately, PSHUFD
* is much slower on Conroe (4 cycles latency vs. 1 cycle latency for PSRLQ)
* and somewhat slower on some non-Intel CPUs (luckily not including AMD
* Bulldozer and Piledriver). Since for many other CPUs using (V)PSHUFD is a
* win in terms of throughput or/and not needing a move instruction, we
* currently use it despite of the higher latency on some older CPUs. As an
* alternative, the #if below may be patched to only enable use of (V)PSHUFD
* when building with SSE4.1 or newer, which is not available on older CPUs
* where this instruction has higher latency.
*/
#if 1
#define HI32(X) \
_mm_shuffle_epi32((X), _MM_SHUFFLE(2,3,0,1))
#elif 0
#define HI32(X) \
_mm_srli_si128((X), 4)
#else
#define HI32(X) \
_mm_srli_epi64((X), 32)
#endif
#if defined(__x86_64__) && (defined(__ICC) || defined(__llvm__))
/* Intel's name, also supported by recent gcc */
#define EXTRACT64(X) _mm_cvtsi128_si64(X)
#elif defined(__x86_64__) && !defined(_MSC_VER) && !defined(__OPEN64__)
/* gcc got the 'x' name earlier than non-'x', MSVC and Open64 had bugs */
#define EXTRACT64(X) _mm_cvtsi128_si64x(X)
#elif defined(__x86_64__) && defined(__SSE4_1__)
/* No known bugs for this intrinsic */
#include <smmintrin.h>
#define EXTRACT64(X) _mm_extract_epi64((X), 0)
#elif defined(__SSE4_1__)
/* 32-bit */
#include <smmintrin.h>
#if 0
/* This is currently unused by the code below, which instead uses these two
* intrinsics explicitly when (!defined(__x86_64__) && defined(__SSE4_1__)) */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_extract_epi32((X), 1) << 32))
#endif
#else
/* 32-bit or compilers with known past bugs in _mm_cvtsi128_si64*() */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32))
#endif
/* This is tunable */
#define S_BITS 8
/* Not tunable in this implementation, hard-coded in a few places */
#define S_SIMD 2
#define S_P 4
/* Number of S-boxes. Not tunable by design, hard-coded in a few places. */
#define S_N 2
/* Derived values. Not tunable except via S_BITS above. */
#define S_SIZE1 (1 << S_BITS)
#define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8)
#define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK)
#define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD * 8)
#if !defined(__x86_64__) && defined(__SSE4_1__)
/* 32-bit with SSE4.1 */
#define PWXFORM_X_T __m128i
#define PWXFORM_SIMD(X, x, s0, s1) \
x = _mm_and_si128(X, _mm_set1_epi64x(S_MASK2)); \
s0 = *(const __m128i *)(S0 + (uint32_t)_mm_cvtsi128_si32(x)); \
s1 = *(const __m128i *)(S1 + (uint32_t)_mm_extract_epi32(x, 1)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#else
/* 64-bit, or 32-bit without SSE4.1 */
#define PWXFORM_X_T uint64_t
#define PWXFORM_SIMD(X, x, s0, s1) \
x = EXTRACT64(X) & S_MASK2; \
s0 = *(const __m128i *)(S0 + (uint32_t)x); \
s1 = *(const __m128i *)(S1 + (x >> 32)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#endif
#define PWXFORM_ROUND \
PWXFORM_SIMD(X0, x0, s00, s01) \
PWXFORM_SIMD(X1, x1, s10, s11) \
PWXFORM_SIMD(X2, x2, s20, s21) \
PWXFORM_SIMD(X3, x3, s30, s31)
#define PWXFORM \
{ \
PWXFORM_X_T x0, x1, x2, x3; \
__m128i s00, s01, s10, s11, s20, s21, s30, s31; \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
}
#define XOR4(in) \
X0 = _mm_xor_si128(X0, (in)[0]); \
X1 = _mm_xor_si128(X1, (in)[1]); \
X2 = _mm_xor_si128(X2, (in)[2]); \
X3 = _mm_xor_si128(X3, (in)[3]);
#define OUT(out) \
(out)[0] = X0; \
(out)[1] = X1; \
(out)[2] = X2; \
(out)[3] = X3;
/**
* blockmix_pwxform(Bin, Bout, r, S):
* Compute Bout = BlockMix_pwxform{salsa20/8, r, S}(Bin). The input Bin must
* be 128r bytes in length; the output Bout must also be the same size.
*/
static void
blockmix(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S) {
blockmix_salsa8(Bin, Bout, r);
return;
}
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
/* X <-- B_{r1 - 1} */
X0 = Bin[r].q[0];
X1 = Bin[r].q[1];
X2 = Bin[r].q[2];
X3 = Bin[r].q[3];
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
}
#define XOR4_2(in1, in2) \
X0 = _mm_xor_si128((in1)[0], (in2)[0]); \
X1 = _mm_xor_si128((in1)[1], (in2)[1]); \
X2 = _mm_xor_si128((in1)[2], (in2)[2]); \
X3 = _mm_xor_si128((in1)[3], (in2)[3]);
static inline uint32_t
blockmix_salsa8_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
} else {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
}
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q)
SALSA20_8_XOR_MEM(Bin2[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q)
SALSA20_8_XOR_MEM(Bin2[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[r * 2 + 1].q, Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
static uint32_t
blockmix_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S)
return blockmix_salsa8_xor(Bin1, Bin2, Bout, r, Bin2_in_ROM);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r], _MM_HINT_NTA)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_NTA)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
} else {
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef XOR4
#define XOR4(in, out) \
(out)[0] = Y0 = _mm_xor_si128((in)[0], (out)[0]); \
(out)[1] = Y1 = _mm_xor_si128((in)[1], (out)[1]); \
(out)[2] = Y2 = _mm_xor_si128((in)[2], (out)[2]); \
(out)[3] = Y3 = _mm_xor_si128((in)[3], (out)[3]);
static inline uint32_t
blockmix_salsa8_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r)
{
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
r--;
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q, Bin2[0].q)
SALSA20_8_XOR_REG(Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q, Bin2[i * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q, Bin2[i * 2].q)
SALSA20_8_XOR_REG(Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
#define XOR4_Y \
X0 = _mm_xor_si128(X0, Y0); \
X1 = _mm_xor_si128(X1, Y1); \
X2 = _mm_xor_si128(X2, Y2); \
X3 = _mm_xor_si128(X3, Y3);
static uint32_t
blockmix_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
if (!S)
return blockmix_salsa8_xor_save(Bin1, Bin2, Bout, r);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
XOR4(Bin1[i].q, Bin2[i].q)
/* X <-- H'(X \xor B_i) */
XOR4_Y
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q, Bin2[i].q)
XOR4_Y
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef ARX
#undef SALSA20_2ROUNDS
#undef SALSA20_8
#undef SALSA20_8_XOR_ANY
#undef SALSA20_8_XOR_MEM
#undef SALSA20_8_XOR_REG
#undef PWXFORM_SIMD_1
#undef PWXFORM_SIMD_2
#undef PWXFORM_ROUND
#undef PWXFORM
#undef OUT
#undef XOR4
#undef XOR4_2
#undef XOR4_Y
/**
* integerify(B, r):
* Return the result of parsing B_{2r-1} as a little-endian integer.
*/
static inline uint32_t
integerify(const salsa20_blk_t * B, size_t r)
{
return B[2 * r - 1].w[0];
}
/**
* smix1(B, r, N, flags, V, NROM, shared, XY, S):
* Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 128r bytes in length. The value N must be even and no
* smaller than 2. The array V must be aligned to a multiple of 64 bytes, and
* arrays B and XY to a multiple of at least 16 bytes (aligning them to 64
* bytes as well saves cache lines, but might result in cache bank conflicts).
*/
static void
smix1(uint8_t * B, size_t r, uint32_t N, yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = V, * Y;
uint32_t i, j;
size_t k;
/* 1: X <-- B */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
if (NROM && (VROM_mask & 1)) {
uint32_t n;
salsa20_blk_t * V_n;
const salsa20_blk_t * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, S);
X = &V[2 * s];
if ((1 & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j = integerify(Y, r) & (NROM - 1);
V_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
j = blockmix_xor(Y, V_j, X, r, 1, S);
} else {
/* X <-- H(X) */
blockmix(Y, X, r, S);
j = integerify(X, r);
}
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V_n[i * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((n + i) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 1, S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((N - 1) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
}
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 1, S);
} else if (flags & YESCRYPT_RW) {
uint32_t n;
salsa20_blk_t * V_n, * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[2 * s];
blockmix(Y, X, r, S);
j = integerify(X, r);
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
Y = &V_n[i * s];
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
j = blockmix_xor(X, V_j, Y, r, 0, S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 0, S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 0, S);
} else {
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < N - 1; i += 2) {
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[(i + 1) * s];
blockmix(Y, X, r, S);
}
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
X = XY;
blockmix(Y, X, r, S);
}
/* B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S):
* Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 256r bytes in length. The value N must be a power of 2
* greater than 1. The value Nloop must be even. The array V must be aligned
* to a multiple of 64 bytes, and arrays B and XY to a multiple of at least 16
* bytes (aligning them to 64 bytes as well saves cache lines, but might result
* in cache bank conflicts).
*/
static void
smix2(uint8_t * B, size_t r, uint32_t N, uint64_t Nloop,
yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM,
const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = XY, * Y = &XY[s];
uint64_t i;
uint32_t j;
size_t k;
if (Nloop == 0)
return;
/* X <-- B' */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
i = Nloop / 2;
/* 7: j <-- Integerify(X) mod N */
j = integerify(X, r) & (N - 1);
/*
* Normally, NROM implies YESCRYPT_RW, but we check for these separately
* because YESCRYPT_PARALLEL_SMIX resets YESCRYPT_RW for the smix2() calls
* operating on the entire V.
*/
if (NROM && (flags & YESCRYPT_RW)) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(X, V_j, Y, r, S);
if (((i + 1) & VROM_mask) == 1) {
const salsa20_blk_t * VROM_j;
j &= NROM - 1;
VROM_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, VROM_j, X, r, 1, S);
} else {
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(Y, V_j, X, r, S);
}
j &= N - 1;
V_j = &V[j * s];
}
} else if (NROM) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((i + 1) & VROM_mask) == 1) {
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
j &= N - 1;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 1, S);
j &= N - 1;
V_j = &V[j * s];
}
} else if (flags & YESCRYPT_RW) {
/* 6: for i = 0 to N - 1 do */
do {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(X, V_j, Y, r, S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(Y, V_j, X, r, S);
j &= N - 1;
} while (--i);
} else {
/* 6: for i = 0 to N - 1 do */
do {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(X, V_j, Y, r, 0, S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 0, S);
j &= N - 1;
} while (--i);
}
/* 10: B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* p2floor(x):
* Largest power of 2 not greater than argument.
*/
static uint64_t
p2floor(uint64_t x)
{
uint64_t y;
while ((y = x & (x - 1)))
x = y;
return x;
}
/**
* smix(B, r, N, p, t, flags, V, NROM, shared, XY, S):
* Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the
* temporary storage V must be 128rN bytes in length; the temporary storage XY
* must be 256r or 256rp bytes in length (the larger size is required with
* OpenMP-enabled builds). The value N must be a power of 2 greater than 1.
* The array V must be aligned to a multiple of 64 bytes, and arrays B and
* XY to a multiple of at least 16 bytes (aligning them to 64 bytes as well
* saves cache lines and helps avoid false sharing in OpenMP-enabled builds
* when p > 1, but it might also result in cache bank conflicts).
*/
static void
smix(uint8_t * B, size_t r, uint32_t N, uint32_t p, uint32_t t,
yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
size_t s = 2 * r;
uint32_t Nchunk = N / p;
uint64_t Nloop_all, Nloop_rw;
uint32_t i;
Nloop_all = Nchunk;
if (flags & YESCRYPT_RW) {
if (t <= 1) {
if (t)
Nloop_all *= 2; /* 2/3 */
Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */
} else {
Nloop_all *= t - 1;
}
} else if (t) {
if (t == 1)
Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */
Nloop_all *= t;
}
Nloop_rw = 0;
if (flags & __YESCRYPT_INIT_SHARED)
Nloop_rw = Nloop_all;
else if (flags & YESCRYPT_RW)
Nloop_rw = Nloop_all / p;
Nchunk &= ~(uint32_t)1; /* round down to even */
Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */
Nloop_rw &= ~(uint64_t)1; /* round down to even */
#ifdef _OPENMP
#pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw)
{
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint32_t Vchunk = i * Nchunk;
uint8_t * Bp = &B[128 * r * i];
salsa20_blk_t * Vp = &V[Vchunk * s];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
uint32_t Np = (i < p - 1) ? Nchunk : (N - Vchunk);
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
if (Sp)
smix1(Bp, 1, S_SIZE_ALL / 128,
flags & ~YESCRYPT_PWXFORM,
Sp, NROM, shared, XYp, NULL);
if (!(flags & __YESCRYPT_INIT_SHARED_2))
smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp);
smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp,
NROM, shared, XYp, Sp);
}
if (Nloop_all > Nloop_rw) {
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint8_t * Bp = &B[128 * r * i];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
smix2(Bp, r, N, Nloop_all - Nloop_rw,
flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp);
}
}
#ifdef _OPENMP
}
#endif
}
/**
* yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen,
* N, r, p, t, flags, buf, buflen):
* Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
* p, buflen), or a revision of scrypt as requested by flags and shared, and
* write the result into buf. The parameters r, p, and buflen must satisfy
* r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power
* of 2 greater than 1. (This optimized implementation currently additionally
* limits N to the range from 8 to 2^31, but other implementation might not.)
*
* t controls computation time while not affecting peak memory usage. shared
* and flags may request special modes as described in yescrypt.h. local is
* the thread-local data structure, allowing to preserve and reuse a memory
* allocation across calls, thereby reducing its overhead.
*
* Return 0 on success; or -1 on error.
*/
int
yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
const uint8_t * passwd, size_t passwdlen,
const uint8_t * salt, size_t saltlen,
uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags,
uint8_t * buf, size_t buflen)
{
yescrypt_region_t tmp;
uint64_t NROM;
size_t B_size, V_size, XY_size, need;
uint8_t * B, * S;
salsa20_blk_t * V, * XY;
uint8_t sha256[32];
/*
* YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose,
* so don't let it have side-effects. Without this adjustment, it'd
* enable the SHA-256 password pre-hashing and output post-hashing,
* because any deviation from classic scrypt implies those.
*/
if (p == 1)
flags &= ~YESCRYPT_PARALLEL_SMIX;
/* Sanity-check parameters */
if (flags & ~YESCRYPT_KNOWN_FLAGS) {
errno = EINVAL;
return -1;
}
#if SIZE_MAX > UINT32_MAX
if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
errno = EFBIG;
return -1;
}
#endif
if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
errno = EFBIG;
return -1;
}
if (N > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((N & (N - 1)) != 0) || (N <= 7) || (r < 1) || (p < 1)) {
errno = EINVAL;
return -1;
}
if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 7)) {
errno = EINVAL;
return -1;
}
if ((r > SIZE_MAX / 256 / p) ||
(N > SIZE_MAX / 128 / r)) {
errno = ENOMEM;
return -1;
}
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX) &&
(N > SIZE_MAX / 128 / (r * p))) {
errno = ENOMEM;
return -1;
}
#endif
if ((flags & YESCRYPT_PWXFORM) &&
#ifndef _OPENMP
(flags & YESCRYPT_PARALLEL_SMIX) &&
#endif
p > SIZE_MAX / S_SIZE_ALL) {
errno = ENOMEM;
return -1;
}
NROM = 0;
if (shared->shared1.aligned) {
NROM = shared->shared1.aligned_size / ((size_t)128 * r);
if (NROM > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((NROM & (NROM - 1)) != 0) || (NROM <= 7) ||
!(flags & YESCRYPT_RW)) {
errno = EINVAL;
return -1;
}
}
/* Allocate memory */
V = NULL;
V_size = (size_t)128 * r * N;
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX))
V_size *= p;
#endif
need = V_size;
if (flags & __YESCRYPT_INIT_SHARED) {
if (local->aligned_size < need) {
if (local->base || local->aligned ||
local->base_size || local->aligned_size) {
errno = EINVAL;
return -1;
}
if (!alloc_region(local, need))
return -1;
}
V = (salsa20_blk_t *)local->aligned;
need = 0;
}
B_size = (size_t)128 * r * p;
need += B_size;
if (need < B_size) {
errno = ENOMEM;
return -1;
}
XY_size = (size_t)256 * r;
#ifdef _OPENMP
XY_size *= p;
#endif
need += XY_size;
if (need < XY_size) {
errno = ENOMEM;
return -1;
}
if (flags & YESCRYPT_PWXFORM) {
size_t S_size = S_SIZE_ALL;
#ifdef _OPENMP
S_size *= p;
#else
if (flags & YESCRYPT_PARALLEL_SMIX)
S_size *= p;
#endif
need += S_size;
if (need < S_size) {
errno = ENOMEM;
return -1;
}
}
if (flags & __YESCRYPT_INIT_SHARED) {
if (!alloc_region(&tmp, need))
return -1;
B = (uint8_t *)tmp.aligned;
XY = (salsa20_blk_t *)((uint8_t *)B + B_size);
} else {
init_region(&tmp);
if (local->aligned_size < need) {
if (free_region(local))
return -1;
if (!alloc_region(local, need))
return -1;
}
B = (uint8_t *)local->aligned;
V = (salsa20_blk_t *)((uint8_t *)B + B_size);
XY = (salsa20_blk_t *)((uint8_t *)V + V_size);
}
S = NULL;
if (flags & YESCRYPT_PWXFORM)
S = (uint8_t *)XY + XY_size;
if (t || flags) {
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, passwd, passwdlen);
SHA256_Final(sha256, &ctx);
passwd = sha256;
passwdlen = sizeof(sha256);
}
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size);
if (t || flags)
memcpy(sha256, B, sizeof(sha256));
if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) {
smix(B, r, N, p, t, flags, V, NROM, shared, XY, S);
} else {
uint32_t i;
/* 2: for i = 0 to p - 1 do */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S)
#endif
for (i = 0; i < p; i++) {
/* 3: B_i <-- MF(B_i, N) */
#ifdef _OPENMP
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags,
&V[(size_t)2 * r * i * N],
NROM, shared,
&XY[(size_t)4 * r * i],
S ? &S[S_SIZE_ALL * i] : S);
#else
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V,
NROM, shared, XY, S);
#endif
}
}
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen);
/*
* Except when computing classic scrypt, allow all computation so far
* to be performed on the client. The final steps below match those of
* SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so
* far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of
* SCRAM's use of SHA-1) would be usable with yescrypt hashes.
*/
if ((t || flags) && buflen == sizeof(sha256)) {
/* Compute ClientKey */
{
HMAC_SHA256_CTX ctx;
HMAC_SHA256_Init(&ctx, buf, buflen);
HMAC_SHA256_Update(&ctx, "Client Key", 10);
HMAC_SHA256_Final(sha256, &ctx);
}
/* Compute StoredKey */
{
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, sha256, sizeof(sha256));
SHA256_Final(buf, &ctx);
}
}
if (free_region(&tmp))
return -1;
/* Success! */
return 0;
}
|
openmp_yield.c | #include <omp.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#define NUM_TASKS 1000
#define VERBOSE
static volatile int flag_one_cntr = 0;
static volatile int flag_two_cntr = 0;
int main(int argc, char **argv)
{
#pragma omp parallel
#pragma omp master
for (int i = 0; i < NUM_TASKS+omp_get_num_threads()-1; ++i) {
#pragma omp task untied firstprivate(i)
{
if (omp_get_thread_num() > 0) {
// trap all but thread 0
printf("Trapping thread %d\n", omp_get_thread_num());
while(flag_two_cntr != NUM_TASKS) { }
printf("Un-Trapping thread %d\n", omp_get_thread_num());
} else {
int task_id = ++flag_one_cntr;
#pragma omp taskyield
// when we come back we first check the counter
if (task_id == 1) {
if (task_id == flag_one_cntr) {
printf("NOOP\n");
}
// some other tasks were running in between
else if (flag_two_cntr == (NUM_TASKS - 1)) {
printf("STACK (unlimited)\n");
} else if (flag_two_cntr == flag_one_cntr-1) {
printf("STACK(depth=%d)\n", flag_one_cntr);
} else if (flag_one_cntr == (NUM_TASKS) /*&& flag_two_cntr == 0*/) {
printf("CYCLIC\n");
} else if (flag_one_cntr > 0 /*&& flag_two_cntr == 0*/) {
printf("N-CYCLIC (N=%d)\n", flag_one_cntr);
} else {
printf("UNKNOWN: flag_one_cntr: %d, flag_two_cntr: %d\n", flag_one_cntr, flag_two_cntr);
}
}
// quirk for Cray compiler
(void)flag_two_cntr;
#pragma omp taskyield
++flag_two_cntr;
} // thread-trap
} // pragma omp task
} // for()
return 0;
}
|
kmp_num_teams.c | // RUN: %libomp-compile-and-run
// UNSUPPORTED: gcc
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define NT 8
#ifdef __cplusplus
extern "C" {
#endif
typedef int kmp_int32;
typedef struct ident {
kmp_int32 reserved_1;
kmp_int32 flags;
kmp_int32 reserved_2;
kmp_int32 reserved_3;
char const *psource;
} ident_t;
extern int __kmpc_global_thread_num(ident_t *);
extern void __kmpc_push_num_teams_51(ident_t *, kmp_int32, kmp_int32, kmp_int32,
kmp_int32);
#ifdef __cplusplus
}
#endif
void check_num_teams(int num_teams_lb, int num_teams_ub, int thread_limit) {
int nteams, nthreads;
int a = 0;
int gtid = __kmpc_global_thread_num(NULL);
__kmpc_push_num_teams_51(NULL, gtid, num_teams_lb, num_teams_ub,
thread_limit);
#pragma omp teams default(shared)
{
int priv_nteams;
int team_num = omp_get_team_num();
if (team_num == 0)
nteams = omp_get_num_teams();
priv_nteams = omp_get_num_teams();
#pragma omp parallel
{
int priv_nthreads;
int thread_num = omp_get_thread_num();
int teams_ub, teams_lb, thr_limit;
if (team_num == 0 && thread_num == 0)
nthreads = omp_get_num_threads();
priv_nthreads = omp_get_num_threads();
teams_ub = (num_teams_ub ? num_teams_ub : priv_nteams);
teams_lb = (num_teams_lb ? num_teams_lb : teams_ub);
thr_limit = (thread_limit ? thread_limit : priv_nthreads);
if (priv_nteams < teams_lb || priv_nteams > teams_ub) {
fprintf(stderr, "error: invalid number of teams=%d\n", priv_nteams);
exit(1);
}
if (priv_nthreads > thr_limit) {
fprintf(stderr, "error: invalid number of threads=%d\n", priv_nthreads);
exit(1);
}
#pragma omp atomic
a++;
}
}
if (a != nteams * nthreads) {
fprintf(stderr, "error: a (%d) != nteams * nthreads (%d)\n", a,
nteams * nthreads);
exit(1);
} else {
printf("#teams %d, #threads %d: Hello!\n", nteams, nthreads);
}
}
int main(int argc, char *argv[]) {
omp_set_num_threads(NT);
check_num_teams(1, 8, 2);
check_num_teams(2, 2, 2);
check_num_teams(2, 2, 0);
check_num_teams(8, 16, 2);
check_num_teams(9, 16, 0);
check_num_teams(9, 16, 2);
check_num_teams(2, 3, 0);
check_num_teams(0, 0, 2);
check_num_teams(0, 4, 0);
check_num_teams(0, 2, 2);
printf("Test Passed\n");
return 0;
}
|
fixup.c | /*---------------------------------------------------------------------------------
FIXUP.C
-Repair integration failures
-Apply geometrical floors to density and internal energy
-Apply ceiling to Lorentz factor
-Apply ceiling to total fluid entropy (only if ELECTRONS enabled)
-Apply ceiling to sigma and plasma beta inverse
-Apply temperature ceiling
-Replace inversion failure grid zones with values interpolated from
neighbouring zones
---------------------------------------------------------------------------------*/
#include "decs.h"
// Floor Codes: bit masks
#define HIT_FLOOR_GEOM_RHO 1
#define HIT_FLOOR_GEOM_U 2
#define HIT_FLOOR_B_RHO 4
#define HIT_FLOOR_B_U 8
#define HIT_FLOOR_TEMP 16
#define HIT_FLOOR_GAMMA 32
#define HIT_FLOOR_KTOT 64
// Point in m, around which to steepen floor prescription, eventually toward r^-3
#define FLOOR_R_CHAR 10
static struct FluidState *Stmp;
void fixup_ceiling(struct GridGeom *G, struct FluidState *S, int i, int j);
void fixup_floor(struct GridGeom *G, struct FluidState *S, int i, int j);
// Apply floors to density, internal energy
void fixup(struct GridGeom *G, struct FluidState *S)
{
timer_start(TIMER_FIXUP);
static int firstc = 1;
if (firstc)
{
Stmp = calloc(1,sizeof(struct FluidState));
firstc = 0;
}
#pragma omp parallel for simd
ZLOOPALL fflag[j][i] = 0;
#pragma omp parallel for collapse(2)
ZLOOP fixup_ceiling(G, S, i, j);
// Bulk call before bsq calculation below
get_state_vec(G, S, CENT, 0, N2-1, 0, N1-1);
#pragma omp parallel for collapse(2)
ZLOOP fixup_floor(G, S, i, j);
// Some debug info about floors
#if DEBUG
int n_geom_rho = 0, n_geom_u = 0, n_b_rho = 0, n_b_u = 0, n_temp = 0, n_gamma = 0, n_ktot = 0;
#pragma omp parallel for collapse(2) reduction(+:n_geom_rho) reduction(+:n_geom_u) \
reduction(+:n_b_rho) reduction(+:n_b_u) reduction(+:n_temp) reduction(+:n_gamma) reduction(+:n_ktot)
ZLOOP {
int flag = fflag[j][i];
if (flag & HIT_FLOOR_GEOM_RHO) n_geom_rho++;
if (flag & HIT_FLOOR_GEOM_U) n_geom_u++;
if (flag & HIT_FLOOR_B_RHO) n_b_rho++;
if (flag & HIT_FLOOR_B_U) n_b_u++;
if (flag & HIT_FLOOR_TEMP) n_temp++;
if (flag & HIT_FLOOR_GAMMA) n_gamma++;
if (flag & HIT_FLOOR_KTOT) n_ktot++;
}
LOG("FLOORS:");
if (n_geom_rho > 0) LOGN("Hit %d GEOM_RHO", n_geom_rho);
if (n_geom_u > 0) LOGN("Hit %d GEOM_U", n_geom_u);
if (n_b_rho > 0) LOGN("Hit %d B_RHO", n_b_rho);
if (n_b_u > 0) LOGN("Hit %d B_U", n_b_u);
if (n_temp > 0) LOGN("Hit %d TEMPERATURE", n_temp);
if (n_gamma > 0) LOGN("Hit %d GAMMA", n_gamma);
if (n_ktot > 0) LOGN("Hit %d KTOT", n_ktot);
#endif
LOG("End fixup");
timer_stop(TIMER_FIXUP);
}
inline void fixup_ceiling(struct GridGeom *G, struct FluidState *S, int i, int j)
{
// First apply ceilings:
// 1. Limit gamma with respect to normal observer
double gamma = mhd_gamma_calc(G, S, i, j, CENT);
if (gamma > GAMMAMAX)
{
fflag[j][i] |= HIT_FLOOR_GAMMA;
double f = sqrt((GAMMAMAX*GAMMAMAX - 1.)/(gamma*gamma - 1.));
S->P[U1][j][i] *= f;
S->P[U2][j][i] *= f;
S->P[U3][j][i] *= f;
}
// 2. Limit KTOT
#if ELECTRONS
// Keep to KTOTMAX by controlling u, to avoid anomalous cooling from funnel wall
// Note: This operates on last iteration's KTOT, meaning the effective value can escape the ceiling
if (S->P[KTOT][j][i] > KTOTMAX)
{
fflag[j][i] |= HIT_FLOOR_KTOT;
S->P[UU][j][i] = KTOTMAX*pow(S->P[RHO][j][i],gam)/(gam-1.);
S->P[KTOT][j][i] = KTOTMAX;
}
#endif
}
inline void fixup_floor(struct GridGeom *G, struct FluidState *S, int i, int j)
{
// Then apply floors:
// 1. Geometric hard floors, not based on fluid relationships
double rhoflr_geom, uflr_geom;
if(METRIC == MKS)
{
double r, th, X[NDIM];
coord(i, j, CENT, X);
bl_coord(X, &r, &th);
// New, steeper floor in rho
// Previously raw r^-2, r^-1.5
double rhoscal = pow(r, -2.) * 1 / (1 + r/FLOOR_R_CHAR);
rhoflr_geom = RHOMIN*rhoscal;
uflr_geom = UUMIN*pow(rhoscal, gam);
// Impose overall minimum
// TODO These would only be hit at by r^-3 floors for r_out = 100,000M. Worth keeping?
rhoflr_geom = MY_MAX(rhoflr_geom, RHOMINLIMIT);
uflr_geom = MY_MAX(uflr_geom, UUMINLIMIT);
}
else if (METRIC == MINKOWSKI)
{
rhoflr_geom = RHOMIN*1.e-2;
uflr_geom = UUMIN*1.e-2;
}
// Record Geometric floor hits
if (rhoflr_geom > S->P[RHO][j][i]) fflag[j][i] |= HIT_FLOOR_GEOM_RHO;
if (uflr_geom > S->P[UU][j][i]) fflag[j][i] |= HIT_FLOOR_GEOM_U;
// 2. Magnetic floors: impose maximum magnetization sigma = bsq/rho, inverse beta prop. to bsq/U
double bsq = bsq_calc(S, i, j);
double rhoflr_b = bsq/BSQORHOMAX;
double uflr_b = bsq/BSQOUMAX;
// Record Magnetic floor hits
if (rhoflr_b > S->P[RHO][j][i]) fflag[j][i] |= HIT_FLOOR_B_RHO;
if (uflr_b > S->P[UU][j][i]) fflag[j][i] |= HIT_FLOOR_B_U;
// Evaluate highest U floor
double uflr_max = MY_MAX(uflr_geom, uflr_b);
// 3. Temperature ceiling: impose maximum temperature
// Take floors on U into account
double rhoflr_temp = MY_MAX(S->P[UU][j][i] / UORHOMAX, uflr_max / UORHOMAX);
// Record hitting temperature ceiling
if (rhoflr_temp > S->P[RHO][j][i]) fflag[j][i] |= HIT_FLOOR_TEMP; // Misnomer for consistency
// Evaluate highest RHO floor
double rhoflr_max = MY_MAX(MY_MAX(rhoflr_geom, rhoflr_b), rhoflr_temp);
if (rhoflr_max > S->P[RHO][j][i] || uflr_max > S->P[UU][j][i])
{ // Apply floors
// Initialize a dummy fluid parcel
PLOOP
{
Stmp->P[ip][j][i] = 0;
Stmp->U[ip][j][i] = 0;
}
// Add mass and internal energy, but not velocity
Stmp->P[RHO][j][i] = MY_MAX(0., rhoflr_max - S->P[RHO][j][i]);
Stmp->P[UU][j][i] = MY_MAX(0., uflr_max - S->P[UU][j][i]);
// Get conserved variables for the parcel
get_state(G, Stmp, i, j, CENT);
prim_to_flux(G, Stmp, i, j, 0, CENT, Stmp->U);
// And for the current state
prim_to_flux(G, S, i, j, 0, CENT, S->U);
// Add new conserved variables to current values
PLOOP
{
S->U[ip][j][i] += Stmp->U[ip][j][i];
S->P[ip][j][i] += Stmp->P[ip][j][i];
}
// Recover primitive variables
pflag[j][i] = U_to_P(G, S, i, j, CENT);
}
#if ELECTRONS
// Reset entropy after floors
S->P[KTOT][j][i] = (gam - 1.)*S->P[UU][j][i]/pow(S->P[RHO][j][i],gam);
#endif
}
// Replace bad points with values interpolated from neighbors
#define FLOOP for(int ip=0;ip<B1;ip++)
void fixup_utoprim(struct GridGeom *G, struct FluidState *S)
{
timer_start(TIMER_FIXUP);
// Flip the logic of the pflag[] so that it now indicates which cells are good
#pragma omp parallel for simd collapse(2)
ZLOOPALL
pflag[j][i] = !pflag[j][i];
#if DEBUG
int nbad_utop = 0;
#pragma omp parallel for simd collapse(2) reduction (+:nbad_utop)
ZLOOP
{
// Count the 0 = bad cells
nbad_utop += !pflag[j][i];
}
LOGN("Fixing %d bad cells", nbad_utop);
#endif
// Make sure we are not using ill defined physical corner regions
// TODO find a way to do this once, or put it in bounds at least?
for (int j = 0; j < NG; j++)
{
for (int i = 0; i < NG; i++)
{
pflag[j][i] = 0;
pflag[j][i+N1+NG] = 0;
pflag[j+N2+NG][i] = 0;
pflag[j+N2+NG][i+N1+NG] = 0;
}
}
#if DEBUG
// Keep track of how many points we fix
int nfixed_utop = 0;
#endif
ZLOOP
{
if (pflag[j][i] == 0)
{
double wsum = 0.;
double sum[B1];
FLOOP sum[ip] = 0.;
for (int l = -1; l < 2; l++)
{
for (int m = -1; m < 2; m++)
{
double w = 1./(abs(l) + abs(m) + 1)*pflag[j+m][i+l];
wsum += w;
FLOOP sum[ip] += w*S->P[ip][j+m][i+l];
}
}
if(wsum < 1.e-10)
{
fprintf(stderr, "fixup_utoprim: No usable neighbors at %d %d\n", i, j);
continue;
}
FLOOP S->P[ip][j][i] = sum[ip]/wsum;
#if DEBUG
nfixed_utop++;
#endif
// Make sure fixed values still abide by floors
fixup_ceiling(G, S, i, j);
get_state(G, S, i, j, CENT);
fixup_floor(G, S, i, j);
}
}
#if DEBUG
int nleft_utop = nbad_utop - nfixed_utop;
if(nleft_utop > 0) fprintf(stderr,"Cells STILL BAD after fixup_utoprim: %d\n", nleft_utop);
#endif
// Reset the pflag
#pragma omp parallel for simd
ZLOOPALL
pflag[j][i] = 0;
timer_stop(TIMER_FIXUP);
}
#undef FLOOP
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1-3*t2,2)),ceild(3*t1-2,4)),ceild(24*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(12*t1+Ny+15,16)),floord(24*t2+Ny+11,16)),floord(24*t1-24*t2+Nz+Ny+13,16));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-254,256)),ceild(3*t1-510,512)),ceild(24*t2-Nz-2035,2048)),ceild(16*t3-Ny-2035,2048));t4<=min(min(min(min(floord(4*Nt+Nx-9,2048),floord(12*t1+Nx+15,2048)),floord(24*t2+Nx+11,2048)),floord(16*t3+Nx+3,2048)),floord(24*t1-24*t2+Nz+Nx+13,2048));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),4*t3+2),512*t4+510);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(2048*t4,4*t5+4);
ubv=min(2048*t4+2047,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_unaryop__identity_fp32_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp32_uint16
// op(A') function: GB_tran__identity_fp32_uint16
// C type: float
// A type: uint16_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp32_uint16
(
float *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp32_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr63326.c | /* PR c/63326 */
/* { dg-do compile } */
/* { dg-options "-fopenmp" } */
void
f1 (int x)
{
int i;
if (x)
#pragma omp barrier /* { dg-error "may only be used in compound statements" } */
;
if (x)
#pragma omp flush /* { dg-error "may only be used in compound statements" } */
;
if (x)
#pragma omp taskwait /* { dg-error "may only be used in compound statements" } */
;
if (x)
#pragma omp taskyield /* { dg-error "may only be used in compound statements" } */
;
#pragma omp parallel
{
if (x)
#pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */
;
}
#pragma omp parallel
{
if (x)
#pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */
;
}
#pragma omp for ordered(1)
for (i = 0; i < 16; i++)
{
if (x)
#pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */
;
if (x)
#pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */
;
}
if (x)
#pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */
;
if (x)
#pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */
;
if (x)
#pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */
;
}
void
f2 (int x)
{
int i;
while (x)
#pragma omp barrier /* { dg-error "may only be used in compound statements" } */
;
while (x)
#pragma omp flush /* { dg-error "may only be used in compound statements" } */
;
while (x)
#pragma omp taskwait /* { dg-error "may only be used in compound statements" } */
;
while (x)
#pragma omp taskyield /* { dg-error "may only be used in compound statements" } */
;
#pragma omp parallel
{
while (x)
#pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */
;
}
#pragma omp parallel
{
while (x)
#pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */
;
}
#pragma omp for ordered(1)
for (i = 0; i < 16; i++)
{
while (x)
#pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */
;
while (x)
#pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */
;
}
while (x)
#pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */
;
while (x)
#pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */
;
while (x)
#pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */
;
}
void
f3 (int x)
{
int i;
for (x = 0; x < 10; x++)
#pragma omp barrier /* { dg-error "may only be used in compound statements" } */
;
for (x = 0; x < 10; x++)
#pragma omp flush /* { dg-error "may only be used in compound statements" } */
;
for (x = 0; x < 10; x++)
#pragma omp taskwait /* { dg-error "may only be used in compound statements" } */
;
for (x = 0; x < 10; x++)
#pragma omp taskyield /* { dg-error "may only be used in compound statements" } */
;
#pragma omp parallel
{
for (x = 0; x < 10; x++)
#pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */
;
}
#pragma omp parallel
{
for (x = 0; x < 10; x++)
#pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */
;
}
#pragma omp for ordered(1)
for (i = 0; i < 16; i++)
{
for (x = 0; x < 10; x++)
#pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */
;
for (x = 0; x < 10; x++)
#pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */
;
}
for (x = 0; x < 10; x++)
#pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */
;
for (x = 0; x < 10; x++)
#pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */
;
for (x = 0; x < 10; x++)
#pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */
;
}
void
f4 (int x)
{
int i;
{
do
#pragma omp barrier /* { dg-error "may only be used in compound statements" } */
while (0); /* { dg-error "before" "" { target c++ } } */
} /* { dg-error "before" "" { target c++ } } */
{
do
#pragma omp flush /* { dg-error "may only be used in compound statements" } */
while (0); /* { dg-error "before" "" { target c++ } } */
} /* { dg-error "before" "" { target c++ } } */
{
do
#pragma omp taskwait /* { dg-error "may only be used in compound statements" } */
while (0); /* { dg-error "before" "" { target c++ } } */
} /* { dg-error "before" "" { target c++ } } */
{
do
#pragma omp taskyield /* { dg-error "may only be used in compound statements" } */
while (0); /* { dg-error "before" "" { target c++ } } */
} /* { dg-error "before" "" { target c++ } } */
#pragma omp parallel
{
do
#pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */
while (0); /* { dg-error "before" "" { target c++ } } */
} /* { dg-error "before" "" { target c++ } } */
#pragma omp parallel
{
do
#pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */
while (0); /* { dg-error "before" "" { target c++ } } */
} /* { dg-error "before" "" { target c++ } } */
#pragma omp for ordered(1)
for (i = 0; i < 16; i++)
{
{
do
#pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */
while (0); /* { dg-error "before" "" { target c++ } } */
} /* { dg-error "before" "" { target c++ } } */
{
do
#pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */
while (0); /* { dg-error "before" "" { target c++ } } */
} /* { dg-error "before" "" { target c++ } } */
}
{
do
#pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */
while (0); /* { dg-error "before" "" { target c++ } } */
} /* { dg-error "before" "" { target c++ } } */
{
do
#pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */
while (0); /* { dg-error "before" "" { target c++ } } */
} /* { dg-error "before" "" { target c++ } } */
{
do
#pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */
while (0); /* { dg-error "before" "" { target c++ } } */
} /* { dg-error "before" "" { target c++ } } */
}
void
f5 (int x)
{
int i;
switch (x)
#pragma omp barrier /* { dg-error "may only be used in compound statements" } */
;
switch (x)
#pragma omp flush /* { dg-error "may only be used in compound statements" } */
;
switch (x)
#pragma omp taskwait /* { dg-error "may only be used in compound statements" } */
;
switch (x)
#pragma omp taskyield /* { dg-error "may only be used in compound statements" } */
;
#pragma omp parallel
{
switch (x)
#pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */
;
}
#pragma omp parallel
{
switch (x)
#pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */
;
}
#pragma omp for ordered(1)
for (i = 0; i < 16; i++)
{
switch (x)
#pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */
;
switch (x)
#pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */
;
}
switch (x)
#pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */
;
switch (x)
#pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */
;
switch (x)
#pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */
;
}
void
f6 (int x)
{
int i;
switch (x)
{
case 1:
#pragma omp barrier /* { dg-error "may only be used in compound statements" } */
;
}
switch (x)
{
case 1:
#pragma omp flush /* { dg-error "may only be used in compound statements" } */
;
}
switch (x)
{
case 1:
#pragma omp taskwait /* { dg-error "may only be used in compound statements" } */
;
}
switch (x)
{
case 1:
#pragma omp taskyield /* { dg-error "may only be used in compound statements" } */
;
}
#pragma omp parallel
{
switch (x)
{
case 1:
#pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */
;
}
}
#pragma omp parallel
{
switch (x)
{
case 1:
#pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */
;
}
}
#pragma omp for ordered(1)
for (i = 0; i < 16; i++)
{
switch (x)
{
case 1:
#pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */
;
}
switch (x)
{
case 1:
#pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */
;
}
}
switch (x)
{
case 1:
#pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */
;
}
switch (x)
{
case 1:
#pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */
;
}
switch (x)
{
case 1:
#pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */
;
}
}
void
f7 (int x)
{
int i;
switch (x)
{
default:
#pragma omp barrier /* { dg-error "may only be used in compound statements" } */
;
}
switch (x)
{
default:
#pragma omp flush /* { dg-error "may only be used in compound statements" } */
;
}
switch (x)
{
default:
#pragma omp taskwait /* { dg-error "may only be used in compound statements" } */
;
}
switch (x)
{
default:
#pragma omp taskyield /* { dg-error "may only be used in compound statements" } */
;
}
#pragma omp parallel
{
switch (x)
{
default:
#pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */
;
}
}
#pragma omp parallel
{
switch (x)
{
default:
#pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */
;
}
}
#pragma omp for ordered(1)
for (i = 0; i < 16; i++)
{
switch (x)
{
default:
#pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */
;
}
switch (x)
{
default:
#pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */
;
}
}
switch (x)
{
default:
#pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */
;
}
switch (x)
{
default:
#pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */
;
}
switch (x)
{
default:
#pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */
;
}
}
void
f8 (int x)
{
int i;
lab1:
#pragma omp barrier /* { dg-error "may only be used in compound statements" } */
;
lab2:
#pragma omp flush /* { dg-error "may only be used in compound statements" } */
;
lab3:
#pragma omp taskwait /* { dg-error "may only be used in compound statements" } */
;
lab4:
#pragma omp taskyield /* { dg-error "may only be used in compound statements" } */
;
#pragma omp parallel
{
lab5:
#pragma omp cancel parallel /* { dg-error "may only be used in compound statements" } */
;
}
#pragma omp parallel
{
lab6:
#pragma omp cancellation point parallel /* { dg-error "may only be used in compound statements" } */
;
}
#pragma omp for ordered(1)
for (i = 0; i < 16; i++)
{
lab7:
#pragma omp ordered depend(source) /* { dg-error "may only be used in compound statements" } */
;
lab8:
#pragma omp ordered depend(sink: i-1) /* { dg-error "may only be used in compound statements" } */
;
}
lab9:
#pragma omp target enter data map(to:i) /* { dg-error "may only be used in compound statements" } */
;
lab10:
#pragma omp target update to(i) /* { dg-error "may only be used in compound statements" } */
;
lab11:
#pragma omp target exit data map(from:i) /* { dg-error "may only be used in compound statements" } */
;
}
|
prepress.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR EEEEE PPPP RRRR EEEEE SSSSS SSSSS %
% P P R R E P P R R E SS SS %
% PPPP RRRR EEE PPPP RRRR EEE SSS SSS %
% P R R E P R R E SS SS %
% P R R EEEEE P R R EEEEE SSSSS SSSSS %
% %
% %
% MagickCore Prepress Methods %
% %
% Software Design %
% Cristy %
% October 2001 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/hashmap.h"
#include "MagickCore/image.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/prepress.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T o t a l I n k D e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageTotalInkDensity() returns the total ink density for a CMYK image.
% Total Ink Density (TID) is determined by adding the CMYK values in the
% darkest shadow area in an image.
%
% The format of the GetImageTotalInkDensity method is:
%
% double GetImageTotalInkDensity(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double GetImageTotalInkDensity(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
total_ink_density;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ColorSeparatedImageRequired","`%s'",image->filename);
return(0.0);
}
status=MagickTrue;
total_ink_density=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
density;
register const Quantum
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
density=(double) GetPixelRed(image,p)+GetPixelGreen(image,p)+
GetPixelBlue(image,p)+GetPixelBlack(image,p);
if (density > total_ink_density)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageTotalInkDensity)
#endif
{
if (density > total_ink_density)
total_ink_density=density;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
total_ink_density=0.0;
return(total_ink_density);
}
|
reduction.h | #ifndef __DACE_REDUCTION_H
#define __DACE_REDUCTION_H
#include <cstdint>
#include "types.h"
#include "math.h" // for ::min, ::max
#ifdef __CUDACC__
#include "../../../external/cub/cub/device/device_reduce.cuh"
#include "../../../external/cub/cub/block/block_reduce.cuh"
#endif
// Specializations for reductions implemented in frameworks like OpenMP, MPI
namespace dace {
// Internal type. See below for wcr_fixed external type, which selects
// the implementation according to T's properties.
template <ReductionType REDTYPE, typename T>
struct _wcr_fixed
{
static DACE_HDFI void reduce(T *ptr, const T& value);
static DACE_HDFI void reduce_atomic(T *ptr, const T& value);
DACE_HDFI T operator()(const T &a, const T &b) const;
};
// Custom reduction with a lambda function
template <typename T>
struct wcr_custom {
template <typename WCR>
static DACE_HDFI void reduce_atomic(WCR wcr, T *ptr, const T& value) {
// The slowest kind of atomic operations (locked/compare-and-swap),
// this should only happen in case of unrecognized lambdas
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
// Adapted from CUDA's pre-v8.0 double atomicAdd implementation
T old = *ptr, assumed;
do {
assumed = old;
old = atomicCAS(ptr, assumed, wcr(assumed, value));
} while (assumed != old);
#else
#pragma omp critical
*ptr = wcr(*ptr, value);
#endif
}
// Non-conflicting version --> no critical section
template <typename WCR>
static DACE_HDFI void reduce(WCR wcr, T *ptr, const T& value) {
*ptr = wcr(*ptr, value);
}
};
template <typename T>
struct _wcr_fixed<ReductionType::Sum, T> {
static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr += value; }
static DACE_HDFI void reduce_atomic(T *ptr, const T& value) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
atomicAdd(ptr, value);
#else
#pragma omp atomic
*ptr += value;
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a + b; }
};
// Implementation of double atomicAdd for CUDA architectures prior to 6.0
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600
template <>
struct _wcr_fixed<ReductionType::Sum, double> {
static DACE_HDFI void reduce(double *ptr, const double& value) { *ptr += value; }
static DACE_HDFI void reduce_atomic(double *ptr, const double& value) {
unsigned long long int* address_as_ull = (unsigned long long int*)ptr;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(value + __longlong_as_double(assumed)));
} while (assumed != old);
}
DACE_HDFI double operator()(const double &a, const double &b) const { return a + b; }
};
#endif
template <typename T>
struct _wcr_fixed<ReductionType::Product, T> {
static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr *= value; }
static DACE_HDFI void reduce_atomic(T *ptr, const T& value) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
wcr_custom<T>::reduce(
_wcr_fixed<ReductionType::Product, T>(), ptr, value);
#else
#pragma omp atomic
*ptr *= value;
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a * b; }
};
template <typename T>
struct _wcr_fixed<ReductionType::Min, T> {
static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr = ::min(*ptr, value); }
static DACE_HDFI void reduce_atomic(T *ptr, const T& value) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
atomicMin(ptr, value);
#else
wcr_custom<T>::reduce(
_wcr_fixed<ReductionType::Min, T>(), ptr, value);
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return ::min(a, b); }
};
template <typename T>
struct _wcr_fixed<ReductionType::Max, T> {
static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr = ::max(*ptr, value); }
static DACE_HDFI void reduce_atomic(T *ptr, const T& value) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
atomicMax(ptr, value);
#else
wcr_custom<T>::reduce(
_wcr_fixed<ReductionType::Max, T>(), ptr, value);
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return ::max(a, b); }
};
template <typename T>
struct _wcr_fixed<ReductionType::Logical_And, T> {
static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr = (*ptr && value); }
static DACE_HDFI void reduce_atomic(T *ptr, const T& value) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
atomicAnd(ptr, value ? T(1) : T(0));
#else
T val = (value ? T(1) : T(0));
#pragma omp atomic
*ptr &= val;
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a && b; }
};
template <typename T>
struct _wcr_fixed<ReductionType::Bitwise_And, T> {
static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr &= value; }
static DACE_HDFI void reduce_atomic(T *ptr, const T& value) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
atomicAnd(ptr, value);
#else
#pragma omp atomic
*ptr &= value;
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a & b; }
};
template <typename T>
struct _wcr_fixed<ReductionType::Logical_Or, T> {
static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr = (*ptr || value); }
static DACE_HDFI void reduce_atomic(T *ptr, const T& value) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
atomicOr(ptr, value ? T(1) : T(0));
#else
T val = (value ? T(1) : T(0));
#pragma omp atomic
*ptr |= val;
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a || b; }
};
template <typename T>
struct _wcr_fixed<ReductionType::Bitwise_Or, T> {
static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr |= value; }
static DACE_HDFI void reduce_atomic(T *ptr, const T& value) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
atomicOr(ptr, value);
#else
#pragma omp atomic
*ptr |= value;
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a | b; }
};
template <typename T>
struct _wcr_fixed<ReductionType::Logical_Xor, T> {
static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr = (*ptr != value); }
static DACE_HDFI void reduce_atomic(T *ptr, const T& value) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
atomicXor(ptr, value ? T(1) : T(0));
#else
T val = (value ? T(1) : T(0));
#pragma omp atomic
*ptr ^= val;
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a != b; }
};
template <typename T>
struct _wcr_fixed<ReductionType::Bitwise_Xor, T> {
static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr ^= value; }
static DACE_HDFI void reduce_atomic(T *ptr, const T& value) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
atomicXor(ptr, value);
#else
#pragma omp atomic
*ptr ^= value;
#endif
}
DACE_HDFI T operator()(const T &a, const T &b) const { return a ^ b; }
};
//////////////////////////////////////////////////////////////////////////
// Specialization that regresses to critical section / locked update for
// unsupported types
template<typename T>
using EnableIfScalar = typename std::enable_if<std::is_scalar<T>::value>::type;
// Any vector type that is not of length 1, or struct/complex types
// do not support atomics. In these cases, we regress to locked updates.
template <ReductionType REDTYPE, typename T, typename SFINAE = void>
struct wcr_fixed
{
static DACE_HDFI void reduce(T *ptr, const T& value)
{
_wcr_fixed<REDTYPE, T>::reduce(ptr, value);
}
static DACE_HDFI void reduce_atomic(T *ptr, const T& value)
{
wcr_custom<T>::template reduce_atomic(
_wcr_fixed<REDTYPE, T>(), ptr, value);
}
};
// When atomics are supported, use _wcr_fixed normally
template <ReductionType REDTYPE, typename T>
struct wcr_fixed<REDTYPE, T, EnableIfScalar<T> >
{
static DACE_HDFI void reduce(T *ptr, const T& value)
{
_wcr_fixed<REDTYPE, T>::reduce(ptr, value);
}
static DACE_HDFI void reduce_atomic(T *ptr, const T& value)
{
_wcr_fixed<REDTYPE, T>::reduce_atomic(ptr, value);
}
DACE_HDFI T operator()(const T &a, const T &b) const
{
return _wcr_fixed<REDTYPE, T>()(a, b);
}
};
} // namespace dace
#endif // __DACE_REDUCTION_H
|
MixedSolverSchurMP.h | /**
* This file is part of the Eigen Recursive Matrix Extension (ERME).
*
* Copyright (c) 2019 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#pragma once
#include "../Core.h"
#include "MixedSolver.h"
namespace Eigen::Recursive
{
/**
* Multi threaded implementation.
*/
template <typename UBlock, typename VBlock, typename WBlock, typename XType>
class MixedSymmetricRecursiveSolver<
SymmetricMixedMatrix2<Eigen::DiagonalMatrix<UBlock, -1>, Eigen::DiagonalMatrix<VBlock, -1>,
Eigen::SparseMatrix<WBlock, Eigen::RowMajor>>,
XType>
{
public:
using AType = SymmetricMixedMatrix2<Eigen::DiagonalMatrix<UBlock, -1>, Eigen::DiagonalMatrix<VBlock, -1>,
Eigen::SparseMatrix<WBlock, Eigen::RowMajor>>;
using AUType = typename AType::UType;
using AVType = typename AType::VType;
using AWType = typename AType::WType;
using AWTType = typename TransposeType<AWType>::Type;
using XUType = typename XType::UType;
using XVType = typename XType::VType;
using S1Type = Eigen::SparseMatrix<UBlock, Eigen::RowMajor>;
using S2Type = Eigen::SparseMatrix<VBlock, Eigen::RowMajor>;
void analyzePattern(const AType& A, const LinearSolverOptions& solverOptions)
{
#pragma omp single
{
n = A.u.rows();
m = A.v.rows();
Vinv.resize(m);
Y.resize(n, m);
Sdiag.resize(n);
ej.resize(n);
q.resize(m);
S1.resize(n, n);
P.resize(n);
tmp.resize(n);
if (solverOptions.solverType == LinearSolverOptions::SolverType::Direct)
{
std::terminate();
hasWT = true;
explizitSchur = true;
}
else
{
// TODO: add heurisitc here
hasWT = true;
explizitSchur = true;
}
if (hasWT)
{
transposeStructureOnly_omp(A.w, WT, transposeTargets);
}
patternAnalyzed = true;
}
}
void solve(AType& A, XType& x, XType& b, const LinearSolverOptions& solverOptions = LinearSolverOptions())
{
// Some references for easier access
const AUType& U = A.u;
const AVType& V = A.v;
const AWType& W = A.w;
XUType& da = x.u;
XVType& db = x.v;
const XUType& ea = b.u;
const XVType& eb = b.v;
if (!patternAnalyzed) analyzePattern(A, solverOptions);
transposeValueOnly_omp(A.w, WT, transposeTargets);
// U schur (S1)
#pragma omp for
for (int i = 0; i < m; ++i) Vinv.diagonal()(i) = V.diagonal()(i).get().inverse();
multSparseDiag_omp(W, Vinv, Y);
diagInnerProductTransposed_omp(Y, W, Sdiag);
#pragma omp for
for (int i = 0; i < n; ++i) Sdiag.diagonal()(i).get() = U.diagonal()(i).get() - Sdiag.diagonal()(i).get();
sparse_mv_omp(Y, eb, ej);
#pragma omp for
for (int i = 0; i < n; ++i)
{
ej(i).get() = ea(i).get() - ej(i).get();
da(i).get().setZero();
}
{
// A special implicit schur solver.
// We cannot use the recursive inner solver here.
// (Maybe a todo for the future)
// da.setZero();
}
Eigen::Index iters = solverOptions.maxIterativeIterations;
double tol = solverOptions.iterativeTolerance;
P.compute(Sdiag);
recursive_conjugate_gradient_OMP(
[&](const XUType& v, XUType& result) {
// x = U * p - Y * WT * p
sparse_mv_omp(WT, v, q);
sparse_mv_omp(Y, q, tmp);
#pragma omp for
for (int i = 0; i < v.rows(); ++i)
{
result(i).get() = (U.diagonal()(i).get() * v(i).get()) - tmp(i).get();
}
},
ej, da, P, iters, tol);
sparse_mv_omp(WT, da, q);
{
#pragma omp for
for (int i = 0; i < m; ++i)
{
q(i).get() = eb(i).get() - q(i).get();
}
}
multDiagVector_omp(Vinv, q, db);
}
private:
int n, m;
// ==== Solver tmps ====
XVType q;
AVType Vinv;
AWType Y;
S1Type S1;
Eigen::DiagonalMatrix<UBlock, -1> Sdiag;
XUType ej;
XUType tmp;
std::vector<int> transposeTargets;
AWTType WT;
RecursiveDiagonalPreconditioner<UBlock> P;
bool patternAnalyzed = false;
bool hasWT = true;
bool explizitSchur = true;
};
} // namespace Eigen::Recursive
|
requantize_relu_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void requantize_relu_pack8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int size = w * h;
int scale_in_data_size = scale_in_data.w;
int scale_out_data_size = scale_out_data.w;
int bias_data_size = bias_data.w;
// int8(relu(v * scale_in) * scale_out)
// int8_relu(v * (scale_in * scale_out))
// int8(relu(v * scale_in + bias) * scale_out)
// int8_relu(v * (scale_in * scale_out) + (bias * scale_out))
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
float32x4_t _v4 = vcvtq_f32_s32(vld1q_s32(intptr + 16));
float32x4_t _v5 = vcvtq_f32_s32(vld1q_s32(intptr + 20));
float32x4_t _v6 = vcvtq_f32_s32(vld1q_s32(intptr + 24));
float32x4_t _v7 = vcvtq_f32_s32(vld1q_s32(intptr + 28));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
_v2 = vmulq_f32(_v2, _scale0);
_v3 = vmulq_f32(_v3, _scale1);
_v4 = vmulq_f32(_v4, _scale0);
_v5 = vmulq_f32(_v5, _scale1);
_v6 = vmulq_f32(_v6, _scale0);
_v7 = vmulq_f32(_v7, _scale1);
vst1_s8(ptr, float2int8relu(_v0, _v1));
vst1_s8(ptr + 8, float2int8relu(_v2, _v3));
vst1_s8(ptr + 16, float2int8relu(_v4, _v5));
vst1_s8(ptr + 24, float2int8relu(_v6, _v7));
intptr += 32;
ptr += 32;
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
_v2 = vmulq_f32(_v2, _scale0);
_v3 = vmulq_f32(_v3, _scale1);
vst1_s8(ptr, float2int8relu(_v0, _v1));
vst1_s8(ptr + 8, float2int8relu(_v2, _v3));
intptr += 16;
ptr += 16;
}
for (; i < size; i++)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
vst1_s8(ptr, float2int8relu(_v0, _v1));
intptr += 8;
ptr += 8;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _bias0 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8);
float32x4_t _bias1 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
_bias0 = vmulq_f32(_bias0, _scale_out0);
_bias1 = vmulq_f32(_bias1, _scale_out1);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
float32x4_t _v4 = vcvtq_f32_s32(vld1q_s32(intptr + 16));
float32x4_t _v5 = vcvtq_f32_s32(vld1q_s32(intptr + 20));
float32x4_t _v6 = vcvtq_f32_s32(vld1q_s32(intptr + 24));
float32x4_t _v7 = vcvtq_f32_s32(vld1q_s32(intptr + 28));
_v0 = vfmaq_f32(_bias0, _v0, _scale0);
_v1 = vfmaq_f32(_bias1, _v1, _scale1);
_v2 = vfmaq_f32(_bias0, _v2, _scale0);
_v3 = vfmaq_f32(_bias1, _v3, _scale1);
_v4 = vfmaq_f32(_bias0, _v4, _scale0);
_v5 = vfmaq_f32(_bias1, _v5, _scale1);
_v6 = vfmaq_f32(_bias0, _v6, _scale0);
_v7 = vfmaq_f32(_bias1, _v7, _scale1);
vst1_s8(ptr, float2int8relu(_v0, _v1));
vst1_s8(ptr + 8, float2int8relu(_v2, _v3));
vst1_s8(ptr + 16, float2int8relu(_v4, _v5));
vst1_s8(ptr + 24, float2int8relu(_v6, _v7));
intptr += 32;
ptr += 32;
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
#if __aarch64__
_v0 = vfmaq_f32(_bias0, _v0, _scale0);
_v1 = vfmaq_f32(_bias1, _v1, _scale1);
_v2 = vfmaq_f32(_bias0, _v2, _scale0);
_v3 = vfmaq_f32(_bias1, _v3, _scale1);
#else // __aarch64__
_v0 = vmlaq_f32(_bias0, _v0, _scale0);
_v1 = vmlaq_f32(_bias1, _v1, _scale1);
_v2 = vmlaq_f32(_bias0, _v2, _scale0);
_v3 = vmlaq_f32(_bias1, _v3, _scale1);
#endif // __aarch64__
vst1_s8(ptr, float2int8relu(_v0, _v1));
vst1_s8(ptr + 8, float2int8relu(_v2, _v3));
intptr += 16;
ptr += 16;
}
for (; i < size; i++)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
#if __aarch64__
_v0 = vfmaq_f32(_bias0, _v0, _scale0);
_v1 = vfmaq_f32(_bias1, _v1, _scale1);
#else // __aarch64__
_v0 = vmlaq_f32(_bias0, _v0, _scale0);
_v1 = vmlaq_f32(_bias1, _v1, _scale1);
#endif // __aarch64__
vst1_s8(ptr, float2int8relu(_v0, _v1));
intptr += 8;
ptr += 8;
}
}
}
}
|
par_lr_interp.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.24 $
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
#define MAX_C_CONNECTIONS 100
#define HAVE_COMMON_C 1
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildStdInterp
* Comment: The interpolatory weighting can be changed with the sep_weight
* variable. This can enable not separating negative and positive
* off diagonals in the weight formula.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildStdInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
double trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int sep_weight, HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Int total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
double *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
double *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
HYPRE_Int *col_map_offd_P = NULL;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
double *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_Int *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int *found = NULL;
HYPRE_Int num_cols_P_offd;
HYPRE_Int newoff, loc_col;
HYPRE_Int A_ext_rows, full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_Int *Sop_j;
HYPRE_Int Soprows;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter, coarse_counter_offd;
HYPRE_Int *ihat = NULL;
HYPRE_Int *ihat_offd = NULL;
HYPRE_Int *ipnt = NULL;
HYPRE_Int *ipnt_offd = NULL;
HYPRE_Int strong_f_marker = -2;
/* Interpolation weight variables */
double *ahat = NULL;
double *ahat_offd = NULL;
double sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C;
double diagonal, distribute;
double alfa = 1.;
double beta = 1.;
/* Loop variables */
HYPRE_Int index;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, j, j1, jj, kk, k1;
HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx;
/* Definitions */
double zero = 0.0;
double one = 1.0;
double wall_time;
double wall_1 = 0;
double wall_2 = 0;
double wall_3 = 0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
newoff = 0;
full_off_procNodes = 0;
if (num_procs > 1)
{
/*----------------------------------------------------------------------
* Get the off processors rows for A and S, associated with columns in
* A_offd and S_offd.
*---------------------------------------------------------------------*/
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
A_ext_rows = hypre_CSRMatrixNumRows(A_ext);
Sop = hypre_ParCSRMatrixExtractBExt(S,A,0);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixJ(Sop);
Soprows = hypre_CSRMatrixNumRows(Sop);
/* Find nodes that are neighbors of neighbors, not found in offd */
newoff = new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j,
Soprows, col_map_offd, col_1, col_n,
Sop_i, Sop_j, CF_marker, comm_pkg);
if(newoff >= 0)
full_off_procNodes = newoff + num_cols_A_offd;
else
return hypre_error_flag;
/* Possibly add new points and new processors to the comm_pkg, all
* processors need new_comm_pkg */
/* AHB - create a new comm package just for extended info -
this will work better with the assumed partition*/
hypre_ParCSRFindExtendCommPkg(A, newoff, found,
&extend_comm_pkg);
if (full_off_procNodes)
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
if (num_functions > 1 && full_off_procNodes > 0)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, CF_marker,
full_off_procNodes, CF_marker_offd);
if(num_functions > 1)
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, dof_func,
full_off_procNodes, dof_func_offd);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
}
initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
coarse_counter_offd = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] >= 0)
{
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] >= 0)
{
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(double, P_diag_size);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(double, P_offd_size);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] += my_first_cpt;
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes,
fine_to_coarse_offd);
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] -= my_first_cpt;
}
/* Initialize ahat, which is a modification to a, used in the standard
* interpolation routine. */
if (n_fine)
{
ahat = hypre_CTAlloc(double, n_fine);
ihat = hypre_CTAlloc(HYPRE_Int, n_fine);
ipnt = hypre_CTAlloc(HYPRE_Int, n_fine);
}
if (full_off_procNodes)
{
ahat_offd = hypre_CTAlloc(double, full_off_procNodes);
ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
ahat[i] = 0;
ihat[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
ahat_offd[i] = 0;
ihat_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if(num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
if (debug_flag==4) wall_time = time_getWallclockSeconds();
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = i1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = k1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd]=i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{
loc_col = k1-col_1;
if(CF_marker[loc_col] >= 0)
{
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = loc_col;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] >= 0)
{
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_1 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
cnt_c = 0;
cnt_f = jj_end_row-jj_begin_row;
cnt_c_offd = 0;
cnt_f_offd = jj_end_row_offd-jj_begin_row_offd;
ihat[i] = cnt_f;
ipnt[cnt_f] = i;
ahat[cnt_f++] = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is direct neighbor */
i1 = A_diag_j[jj];
if (P_marker[i1] != strong_f_marker)
{
indx = ihat[i1];
if (indx > -1)
ahat[indx] += A_diag_data[jj];
else if (P_marker[i1] >= jj_begin_row)
{
ihat[i1] = cnt_c;
ipnt[cnt_c] = i1;
ahat[cnt_c++] += A_diag_data[jj];
}
else if (CF_marker[i1] != -3)
{
ihat[i1] = cnt_f;
ipnt[cnt_f] = i1;
ahat[cnt_f++] += A_diag_data[jj];
}
}
else
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
{
distribute = A_diag_data[jj]/A_diag_data[A_diag_i[i1]];
for (kk = A_diag_i[i1]+1; kk < A_diag_i[i1+1]; kk++)
{
k1 = A_diag_j[kk];
indx = ihat[k1];
if (indx > -1)
ahat[indx] -= A_diag_data[kk]*distribute;
else if (P_marker[k1] >= jj_begin_row)
{
ihat[k1] = cnt_c;
ipnt[cnt_c] = k1;
ahat[cnt_c++] -= A_diag_data[kk]*distribute;
}
else
{
ihat[k1] = cnt_f;
ipnt[cnt_f] = k1;
ahat[cnt_f++] -= A_diag_data[kk]*distribute;
}
}
if(num_procs > 1)
{
for (kk = A_offd_i[i1]; kk < A_offd_i[i1+1]; kk++)
{
k1 = A_offd_j[kk];
indx = ihat_offd[k1];
if(num_functions == 1 || dof_func[i1] == dof_func_offd[k1])
{
if (indx > -1)
ahat_offd[indx] -= A_offd_data[kk]*distribute;
else if (P_marker_offd[k1] >= jj_begin_row_offd)
{
ihat_offd[k1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = k1;
ahat_offd[cnt_c_offd++] -= A_offd_data[kk]*distribute;
}
else
{
ihat_offd[k1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = k1;
ahat_offd[cnt_f_offd++] -= A_offd_data[kk]*distribute;
}
}
}
}
}
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] != strong_f_marker)
{
indx = ihat_offd[i1];
if (indx > -1)
ahat_offd[indx] += A_offd_data[jj];
else if (P_marker_offd[i1] >= jj_begin_row_offd)
{
ihat_offd[i1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = i1;
ahat_offd[cnt_c_offd++] += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
ihat_offd[i1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = i1;
ahat_offd[cnt_f_offd++] += A_offd_data[jj];
}
}
else
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
distribute = A_offd_data[jj]/A_ext_data[A_ext_i[i1]];
for (kk = A_ext_i[i1]+1; kk < A_ext_i[i1+1]; kk++)
{
k1 = A_ext_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /*diag*/
loc_col = k1 - col_1;
indx = ihat[loc_col];
if (indx > -1)
ahat[indx] -= A_ext_data[kk]*distribute;
else if (P_marker[loc_col] >= jj_begin_row)
{
ihat[loc_col] = cnt_c;
ipnt[cnt_c] = loc_col;
ahat[cnt_c++] -= A_ext_data[kk]*distribute;
}
else
{
ihat[loc_col] = cnt_f;
ipnt[cnt_f] = loc_col;
ahat[cnt_f++] -= A_ext_data[kk]*distribute;
}
}
else
{
loc_col = -k1 - 1;
if(num_functions == 1 ||
dof_func_offd[loc_col] == dof_func_offd[i1])
{
indx = ihat_offd[loc_col];
if (indx > -1)
ahat_offd[indx] -= A_ext_data[kk]*distribute;
else if(P_marker_offd[loc_col] >= jj_begin_row_offd)
{
ihat_offd[loc_col] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = loc_col;
ahat_offd[cnt_c_offd++] -= A_ext_data[kk]*distribute;
}
else
{
ihat_offd[loc_col] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = loc_col;
ahat_offd[cnt_f_offd++] -= A_ext_data[kk]*distribute;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_2 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
diagonal = ahat[cnt_c];
ahat[cnt_c] = 0;
sum_pos = 0;
sum_pos_C = 0;
sum_neg = 0;
sum_neg_C = 0;
sum = 0;
sum_C = 0;
if(sep_weight == 1)
{
for (jj=0; jj < cnt_c; jj++)
{
if (ahat[jj] > 0)
{
sum_pos_C += ahat[jj];
}
else
{
sum_neg_C += ahat[jj];
}
}
if(num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos_C += ahat_offd[jj];
}
else
{
sum_neg_C += ahat_offd[jj];
}
}
}
sum_pos = sum_pos_C;
sum_neg = sum_neg_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
if (ahat[jj] > 0)
{
sum_pos += ahat[jj];
}
else
{
sum_neg += ahat[jj];
}
ahat[jj] = 0;
}
if(num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos += ahat_offd[jj];
}
else
{
sum_neg += ahat_offd[jj];
}
ahat_offd[jj] = 0;
}
}
if (sum_neg_C*diagonal) alfa = sum_neg/sum_neg_C/diagonal;
if (sum_pos_C*diagonal) beta = sum_pos/sum_pos_C/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
if (ahat[j1] > 0)
P_diag_data[jj] = -beta*ahat[j1];
else
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
ihat[ipnt[jj]] = -1;
if(num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
if (ahat_offd[j1] > 0)
P_offd_data[jj] = -beta*ahat_offd[j1];
else
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
ihat_offd[ipnt_offd[jj]] = -1;
}
}
else
{
for (jj=0; jj < cnt_c; jj++)
{
sum_C += ahat[jj];
}
if(num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
sum_C += ahat_offd[jj];
}
}
sum = sum_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
sum += ahat[jj];
ahat[jj] = 0;
}
if(num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
sum += ahat_offd[jj];
ahat_offd[jj] = 0;
}
}
if (sum_C*diagonal) alfa = sum/sum_C/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
ihat[ipnt[jj]] = -1;
if(num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
ihat_offd[ipnt_offd[jj]] = -1;
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_3 += wall_time;
fflush(NULL);
}
}
}
if (debug_flag==4)
{
hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n",
my_id, wall_1, wall_2, wall_3);
fflush(NULL);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
num_cols_P_offd = 0;
if(P_offd_size)
{
hypre_TFree(P_marker);
if (full_off_procNodes)
P_marker = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
for (i=0; i < full_off_procNodes; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
if(tmp_CF_marker_offd[index] >= 0)
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
}
if (num_cols_P_offd)
col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while( P_marker[index] == 0) index++;
col_map_offd_P[i] = index++;
}
for(i = 0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while (P_marker[index] == 0) index++;
col_map_offd_P[i] = fine_to_coarse_offd[index];
index++;
}
/* Sort the col_map_offd_P and P_offd_j correctly */
for(i = 0; i < num_cols_P_offd; i++)
P_marker[i] = col_map_offd_P[i];
/* Check if sort actually changed anything */
if(hypre_ssort(col_map_offd_P,num_cols_P_offd))
{
for(i = 0; i < P_offd_size; i++)
for(j = 0; j < num_cols_P_offd; j++)
if(P_marker[P_offd_j[i]] == col_map_offd_P[j])
{
P_offd_j[i] = j;
j = num_cols_P_offd;
}
}
hypre_TFree(P_marker);
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse);
hypre_TFree(P_marker);
hypre_TFree(ahat);
hypre_TFree(ihat);
hypre_TFree(ipnt);
if (full_off_procNodes)
{
hypre_TFree(ahat_offd);
hypre_TFree(ihat_offd);
hypre_TFree(ipnt_offd);
}
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(P_marker_offd);
hypre_TFree(CF_marker_offd);
hypre_TFree(tmp_CF_marker_offd);
if(num_functions > 1)
hypre_TFree(dof_func_offd);
hypre_TFree(found);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
double trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Int total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
double *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
double *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
HYPRE_Int *col_map_offd_P = NULL;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
double *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_Int *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int *found = NULL;
HYPRE_Int num_cols_P_offd;
HYPRE_Int newoff, loc_col;
HYPRE_Int A_ext_rows, full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_Int *Sop_j;
HYPRE_Int Soprows, sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
double sum, diagonal, distribute;
HYPRE_Int strong_f_marker;
/* Loop variables */
HYPRE_Int index;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, j, jj, kk, k1, jj1;
/* Threading variables */
HYPRE_Int my_thread_num, num_threads, start, stop;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1);
HYPRE_Int * diag_offset;
HYPRE_Int * fine_to_coarse_offset;
HYPRE_Int * offd_offset;
/* Definitions */
double zero = 0.0;
double one = 1.0;
double wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
newoff = 0;
full_off_procNodes = 0;
if (num_procs > 1)
{
/*----------------------------------------------------------------------
* Get the off processors rows for A and S, associated with columns in
* A_offd and S_offd.
*---------------------------------------------------------------------*/
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
A_ext_rows = hypre_CSRMatrixNumRows(A_ext);
Sop = hypre_ParCSRMatrixExtractBExt(S,A,0);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixJ(Sop);
Soprows = hypre_CSRMatrixNumRows(Sop);
/* Find nodes that are neighbors of neighbors, not found in offd */
newoff = new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j,
Soprows, col_map_offd, col_1, col_n,
Sop_i, Sop_j, CF_marker, comm_pkg);
if(newoff >= 0)
full_off_procNodes = newoff + num_cols_A_offd;
else
return hypre_error_flag;
/* Possibly add new points and new processors to the comm_pkg, all
* processors need new_comm_pkg */
/* AHB - create a new comm package just for extended info -
this will work better with the assumed partition*/
hypre_ParCSRFindExtendCommPkg(A, newoff, found,
&extend_comm_pkg);
if (full_off_procNodes)
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
if (num_functions > 1 && full_off_procNodes > 0)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, CF_marker,
full_off_procNodes, CF_marker_offd);
if(num_functions > 1)
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, dof_func,
full_off_procNodes, dof_func_offd);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
}
if (full_off_procNodes)
{
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
}
/* This function is smart enough to check P_marker and P_marker_offd only,
* and set them if they are not NULL. The other vectors are set regardless.*/
initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
/*-----------------------------------------------------------------------
* Initialize threading variables
*-----------------------------------------------------------------------*/
max_num_threads[0] = hypre_NumThreads();
diag_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
fine_to_coarse_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
offd_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
for(i=0; i < max_num_threads[0]; i++)
{
diag_offset[i] = 0;
fine_to_coarse_offset[i] = 0;
offd_offset[i] = 0;
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,start,stop,coarse_counter,jj_counter,jj_counter_offd, P_marker, P_marker_offd,jj,kk,i1,k1,loc_col,jj_begin_row,jj_begin_row_offd,jj_end_row,jj_end_row_offd,diagonal,sum,sgn,jj1,i2,distribute,strong_f_marker)
#endif
{
/* Parallelize by computing only over each thread's range of rows.
*
* The first large for loop computes ~locally~ for each thread P_diag_i,
* P_offd_i and fine_to_coarse. Then, the arrays are stitched together
* For eaxample the first phase would compute
* P_diag_i = [0, 2, 4, 7, 2, 5, 6]
* for two threads. P_diag_i[stop] points to the end of that
* thread's data, but P_diag_i[start] points to the end of the
* previous thread's row range. This is then stitched together at the
* end to yield,
* P_diag_i = [0, 2, 4, 7, 9, 14, 15].
*
* The second large for loop computes interpolation weights and is
* relatively straight-forward to thread.
*/
/* initialize thread-wise variables */
strong_f_marker = -2;
coarse_counter = 0;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (n_fine)
{
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
for (i = 0; i < n_fine; i++)
{ P_marker[i] = -1; }
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
for (i = 0; i < full_off_procNodes; i++)
{ P_marker_offd[i] = -1;}
}
/* this thread's row range */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ stop = n_fine; }
else
{ stop = (n_fine/num_threads)*(my_thread_num+1); }
/* loop over rows */
for (i = start; i < stop; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] >= 0)
{
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] >= 0)
{
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* End loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
P_diag_i[stop] = jj_counter;
P_offd_i[stop] = jj_counter_offd;
fine_to_coarse_offset[my_thread_num] = coarse_counter;
diag_offset[my_thread_num] = jj_counter;
offd_offset[my_thread_num] = jj_counter_offd;
/* Stitch P_diag_i, P_offd_i and fine_to_coarse together */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num == 0)
{
/* Calculate the offset for P_diag_i and P_offd_i for each thread */
for (i = 1; i < num_threads; i++)
{
diag_offset[i] = diag_offset[i-1] + diag_offset[i];
fine_to_coarse_offset[i] = fine_to_coarse_offset[i-1] + fine_to_coarse_offset[i];
offd_offset[i] = offd_offset[i-1] + offd_offset[i];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num > 0)
{
/* update row pointer array with offset,
* making sure to update the row stop index */
for (i = start+1; i <= stop; i++)
{
P_diag_i[i] += diag_offset[my_thread_num-1];
P_offd_i[i] += offd_offset[my_thread_num-1];
}
/* update fine_to_coarse by offsetting with the offset
* from the preceding thread */
for (i = start; i < stop; i++)
{
if(fine_to_coarse[i] >= 0)
{ fine_to_coarse[i] += fine_to_coarse_offset[my_thread_num-1]; }
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num == 0)
{
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(double, P_diag_size);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(double, P_offd_size);
}
}
/* Fine to coarse mapping */
if(num_procs > 1 && my_thread_num == 0)
{
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] += my_first_cpt;
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes,
fine_to_coarse_offd);
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] -= my_first_cpt;
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = start; i < stop; i++)
{
jj_begin_row = P_diag_i[i];
jj_begin_row_offd = P_offd_i[i];
jj_counter = jj_begin_row;
jj_counter_offd = jj_begin_row_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
/* Find local col number */
if(k1 >= col_1 && k1 < col_n)
{
loc_col = k1-col_1;
if(CF_marker[loc_col] >= 0)
{
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] >= 0)
{
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
if(i2 == i && (sgn*A_diag_data[jj1]) < 0)
diagonal += distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_ext_data[A_ext_i[i1]] < 0) sgn = -1;
for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if((P_marker[loc_col] >= jj_begin_row || loc_col == i)
&& (sgn*A_ext_data[jj1]) < 0)
sum += A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn*A_ext_data[jj1]) < 0)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row &&
(sgn*A_ext_data[jj1]) < 0)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
if(loc_col == i && (sgn*A_ext_data[jj1]) < 0)
diagonal += distribute*A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
/*-----------------------------------------------------------------------
* End large for loop over nfine
*-----------------------------------------------------------------------*/
if (n_fine)
{ hypre_TFree(P_marker); }
if (full_off_procNodes)
{ hypre_TFree(P_marker_offd); }
}
/*-----------------------------------------------------------------------
* End PAR_REGION
*-----------------------------------------------------------------------*/
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
num_cols_P_offd = 0;
if(P_offd_size)
{
hypre_TFree(P_marker);
if (full_off_procNodes)
P_marker = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < full_off_procNodes; i++)
P_marker[i] = 0;
/* These two loops set P_marker[i] to 1 if it appears in P_offd_j and if
* tmp_CF_marker_offd has i marked. num_cols_P_offd is then set to the
* total number of times P_marker is set */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,index) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if(tmp_CF_marker_offd[index] >= 0)
{ P_marker[index] = 1; }
}
num_cols_P_offd = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:num_cols_P_offd) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < full_off_procNodes; i++)
{
if(P_marker[i])
{ num_cols_P_offd++; }
}
if (num_cols_P_offd)
col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd);
/* col_map_offd_P[i] = index of i-th nonzero in P_marker
* JBS: Not worth parallelizing this for loop with OMP
*/
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while( P_marker[index] == 0) index++;
col_map_offd_P[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for(i = 0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
/* col_map_offd_P[i] = fine_to_coarse[ index of i-th nonzero in P_marker ]
* JBS: Not worth parallelizing this for loop with OMP
*/
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while (P_marker[index] == 0) index++;
col_map_offd_P[i] = fine_to_coarse_offd[index];
index++;
}
/* Sort the col_map_offd_P and P_offd_j correctly */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for(i = 0; i < num_cols_P_offd; i++)
P_marker[i] = col_map_offd_P[i];
/* Check if sort actually changed anything */
if(hypre_ssort(col_map_offd_P,num_cols_P_offd))
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j) HYPRE_SMP_SCHEDULE
#endif
for(i = 0; i < P_offd_size; i++)
for(j = 0; j < num_cols_P_offd; j++)
if(P_marker[P_offd_j[i]] == col_map_offd_P[j])
{
P_offd_j[i] = j;
j = num_cols_P_offd;
}
}
hypre_TFree(P_marker);
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_MatvecCommPkgCreate(P);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(max_num_threads);
hypre_TFree(fine_to_coarse);
hypre_TFree(diag_offset);
hypre_TFree(offd_offset);
hypre_TFree(fine_to_coarse_offset);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(CF_marker_offd);
hypre_TFree(tmp_CF_marker_offd);
if(num_functions > 1)
hypre_TFree(dof_func_offd);
hypre_TFree(found);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtPICCInterp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPICCInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
double trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Int total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
double *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
double *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
HYPRE_Int *col_map_offd_P = NULL;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int **ext_p, **ext_p_offd;*/
HYPRE_Int ccounter_offd;
/*HYPRE_Int *clist_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
double *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_Int *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int *found = NULL;
HYPRE_Int num_cols_P_offd;
HYPRE_Int newoff, loc_col;
HYPRE_Int A_ext_rows, full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_Int *Sop_j;
HYPRE_Int Soprows, sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter, coarse_counter_offd;
/* Interpolation weight variables */
double sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
HYPRE_Int index;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, j, jj, kk, k1, jj1;
HYPRE_Int ccounter;
/*HYPRE_Int *clist, ccounter;*/
/* Definitions */
double zero = 0.0;
double one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
newoff = 0;
full_off_procNodes = 0;
if (num_procs > 1)
{
/*----------------------------------------------------------------------
* Get the off processors rows for A and S, associated with columns in
* A_offd and S_offd.
*---------------------------------------------------------------------*/
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
A_ext_rows = hypre_CSRMatrixNumRows(A_ext);
Sop = hypre_ParCSRMatrixExtractBExt(S,A,0);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixJ(Sop);
Soprows = hypre_CSRMatrixNumRows(Sop);
/* Find nodes that are neighbors of neighbors, not found in offd */
newoff = new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j,
Soprows, col_map_offd, col_1, col_n,
Sop_i, Sop_j, CF_marker, comm_pkg);
if(newoff >= 0)
full_off_procNodes = newoff + num_cols_A_offd;
else
return hypre_error_flag;
/* Possibly add new points and new processors to the comm_pkg, all
* processors need new_comm_pkg */
/* AHB - create a new comm package just for extended info -
this will work better with the assumed partition*/
hypre_ParCSRFindExtendCommPkg(A, newoff, found,
&extend_comm_pkg);
if (full_off_procNodes)
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
if (num_functions > 1 && full_off_procNodes > 0)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, CF_marker,
full_off_procNodes, CF_marker_offd);
if(num_functions > 1)
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, dof_func,
full_off_procNodes, dof_func_offd);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
}
/*clist = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS);
for(i = 0; i < MAX_C_CONNECTIONS; i++)
clist[i] = 0;
if(num_procs > 1)
{
clist_offd = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS);
for(i = 0; i < MAX_C_CONNECTIONS; i++)
clist_offd[i] = 0;
}*/
initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
coarse_counter_offd = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/* Initialize ccounter for each f point */
ccounter = 0;
ccounter_offd = 0;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
/*clist[ccounter++] = i1;*/
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
/*qsort0(clist,0,ccounter-1);*/
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] > 0)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < P_offd_i[i])
{
/*clist_offd[ccounter_offd++] = i1;*/
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
/*qsort0(clist_offd,0,ccounter_offd-1);*/
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{ /* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
/*if(hypre_BinarySearch(clist,k1,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = S_diag_i[i1+1];
}*/
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
/*if(hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0)
{*/
common_c = 1;
break;
/*kk = S_offd_i[i1+1];
}*/
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if(CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
/*break;*/
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] == -1)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] == 2)
{
/*if(hypre_BinarySearch(clist,loc_col,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
/*if(hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >=
0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] > 0)
{
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
/*break;*/
}
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] > 0)
{
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
CF_marker[i1] = 1;
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] == 2)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(double, P_diag_size);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(double, P_offd_size);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
ccounter = start_indexing;
ccounter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] += my_first_cpt;
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes,
fine_to_coarse_offd);
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] -= my_first_cpt;
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if(num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
ccounter = 0;
ccounter_offd = 0;
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*clist[ccounter++] = i1;*/
}
}
}
/*qsort0(clist,0,ccounter-1);*/
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*clist_offd[ccounter_offd++] = i1;*/
}
}
}
/*qsort0(clist_offd,0,ccounter_offd-1);*/
}
for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search through F points */
i1 = S_diag_j[jj];
if(CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
/*if(hypre_BinarySearch(clist,k1,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = S_diag_i[i1+1];
}*/
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
/*if(hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0)
{*/
common_c = 1;
break;
/*kk = S_offd_i[i1+1];
}*/
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*break;*/
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if(CF_marker_offd[i1] == -1)
{ /* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] == 2)
{
/*if(hypre_BinarySearch(clist,loc_col,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
/*if(hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >=
0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
/* Find local col number */
if(k1 >= col_1 && k1 < col_n)
{
loc_col = k1-col_1;
if(CF_marker[loc_col] > 0)
{
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*break;*/
}
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] > 0)
{
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
if(i2 == i && (sgn*A_diag_data[jj1]) < 0)
diagonal += distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
diagonal += A_diag_data[jj];
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_ext_data[A_ext_i[i1]] < 0) sgn = -1;
for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if((P_marker[loc_col] >= jj_begin_row || loc_col == i)
&& (sgn*A_ext_data[jj1]) < 0)
sum += A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn*A_ext_data[jj1]) < 0)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row &&
(sgn*A_ext_data[jj1]) < 0)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
if(loc_col == i && (sgn*A_ext_data[jj1]) < 0)
diagonal += distribute*A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
diagonal += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
num_cols_P_offd = 0;
if(P_offd_size)
{
hypre_TFree(P_marker);
if (full_off_procNodes)
P_marker = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
for (i=0; i < full_off_procNodes; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
if(tmp_CF_marker_offd[index] >= 0)
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
}
if (num_cols_P_offd)
col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while( P_marker[index] == 0) index++;
col_map_offd_P[i] = index++;
}
for(i = 0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while (P_marker[index] == 0) index++;
col_map_offd_P[i] = fine_to_coarse_offd[index];
index++;
}
/* Sort the col_map_offd_P and P_offd_j correctly */
for(i = 0; i < num_cols_P_offd; i++)
P_marker[i] = col_map_offd_P[i];
/* Check if sort actually changed anything */
if(hypre_ssort(col_map_offd_P,num_cols_P_offd))
{
for(i = 0; i < P_offd_size; i++)
for(j = 0; j < num_cols_P_offd; j++)
if(P_marker[P_offd_j[i]] == col_map_offd_P[j])
{
P_offd_j[i] = j;
j = num_cols_P_offd;
}
}
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse);
hypre_TFree(P_marker);
/*hypre_TFree(clist);*/
if (num_procs > 1)
{
/*hypre_TFree(clist_offd);*/
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(P_marker_offd);
hypre_TFree(CF_marker_offd);
hypre_TFree(tmp_CF_marker_offd);
if(num_functions > 1)
hypre_TFree(dof_func_offd);
hypre_TFree(found);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildFFInterp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildFFInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
double trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Int total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
double *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
double *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
HYPRE_Int *col_map_offd_P = NULL;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
HYPRE_Int ccounter_offd;
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
double *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_Int *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int *found = NULL;
HYPRE_Int num_cols_P_offd;
HYPRE_Int newoff, loc_col;
HYPRE_Int A_ext_rows, full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_Int *Sop_j;
HYPRE_Int Soprows;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter, coarse_counter_offd;
/* Interpolation weight variables */
double sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
HYPRE_Int sgn = 1;
/* Loop variables */
HYPRE_Int index;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, j, jj, kk, k1, jj1;
HYPRE_Int ccounter;
/*HYPRE_Int *clist, ccounter;*/
/* Definitions */
double zero = 0.0;
double one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
newoff = 0;
full_off_procNodes = 0;
if (num_procs > 1)
{
/*----------------------------------------------------------------------
* Get the off processors rows for A and S, associated with columns in
* A_offd and S_offd.
*---------------------------------------------------------------------*/
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
A_ext_rows = hypre_CSRMatrixNumRows(A_ext);
Sop = hypre_ParCSRMatrixExtractBExt(S,A,0);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixJ(Sop);
Soprows = hypre_CSRMatrixNumRows(Sop);
/* Find nodes that are neighbors of neighbors, not found in offd */
newoff = new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j,
Soprows, col_map_offd, col_1, col_n,
Sop_i, Sop_j, CF_marker, comm_pkg);
if(newoff >= 0)
full_off_procNodes = newoff + num_cols_A_offd;
else
return hypre_error_flag;
/* Possibly add new points and new processors to the comm_pkg, all
* processors need new_comm_pkg */
/* AHB - create a new comm package just for extended info -
this will work better with the assumed partition*/
hypre_ParCSRFindExtendCommPkg(A, newoff, found,
&extend_comm_pkg);
if (full_off_procNodes)
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
if (num_functions > 1 && full_off_procNodes > 0)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, CF_marker,
full_off_procNodes, CF_marker_offd);
if(num_functions > 1)
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, dof_func,
full_off_procNodes, dof_func_offd);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
}
initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
coarse_counter_offd = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else
{
/* Initialize ccounter for each f point */
ccounter = 0;
ccounter_offd = 0;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] > 0)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] < 0)
{ /* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if(CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] < 0)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] > 0)
{
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] > 0)
{
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
CF_marker[i1] = 1;
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] == 2)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(double, P_diag_size);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(double, P_offd_size);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
ccounter = start_indexing;
ccounter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] += my_first_cpt;
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes,
fine_to_coarse_offd);
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] -= my_first_cpt;
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
jj_begin_row_offd = 0;
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if(num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
ccounter = 0;
ccounter_offd = 0;
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search through F points */
i1 = S_diag_j[jj];
if(CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if(CF_marker_offd[i1] == -1)
{ /* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
/* Find local col number */
if(k1 >= col_1 && k1 < col_n)
{
loc_col = k1-col_1;
if(CF_marker[loc_col] > 0)
{
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] > 0)
{
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
diagonal += A_diag_data[jj];
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_ext_data[A_ext_i[i1]] < 0) sgn = -1;
for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row && (sgn*A_ext_data[jj1])<0)
sum += A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn*A_ext_data[jj1]) < 0)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row &&
(sgn*A_ext_data[jj1]) < 0)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
diagonal += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
num_cols_P_offd = 0;
if(P_offd_size)
{
hypre_TFree(P_marker);
if (full_off_procNodes)
P_marker = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
for (i=0; i < full_off_procNodes; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
if(tmp_CF_marker_offd[index] >= 0)
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
}
if (num_cols_P_offd)
col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while( P_marker[index] == 0) index++;
col_map_offd_P[i] = index++;
}
for(i = 0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while (P_marker[index] == 0) index++;
col_map_offd_P[i] = fine_to_coarse_offd[index];
index++;
}
/* Sort the col_map_offd_P and P_offd_j correctly */
for(i = 0; i < num_cols_P_offd; i++)
P_marker[i] = col_map_offd_P[i];
/* Check if sort actually changed anything */
if(hypre_ssort(col_map_offd_P,num_cols_P_offd))
{
for(i = 0; i < P_offd_size; i++)
for(j = 0; j < num_cols_P_offd; j++)
if(P_marker[P_offd_j[i]] == col_map_offd_P[j])
{
P_offd_j[i] = j;
j = num_cols_P_offd;
}
}
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse);
hypre_TFree(P_marker);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(P_marker_offd);
hypre_TFree(CF_marker_offd);
hypre_TFree(tmp_CF_marker_offd);
if(num_functions > 1)
hypre_TFree(dof_func_offd);
hypre_TFree(found);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildFF1Interp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildFF1Interp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
double trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Int total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
double *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
double *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
HYPRE_Int *col_map_offd_P = NULL;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
HYPRE_Int ccounter_offd;
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
double *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_Int *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int *found = NULL;
HYPRE_Int num_cols_P_offd;
HYPRE_Int newoff, loc_col;
HYPRE_Int A_ext_rows, full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_Int *Sop_j;
HYPRE_Int Soprows;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter, coarse_counter_offd;
/* Interpolation weight variables */
double sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
HYPRE_Int sgn = 1;
/* Loop variables */
HYPRE_Int index;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, j, jj, kk, k1, jj1;
HYPRE_Int ccounter;
HYPRE_Int found_c = 0;
/* Definitions */
double zero = 0.0;
double one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
newoff = 0;
full_off_procNodes = 0;
if (num_procs > 1)
{
/*----------------------------------------------------------------------
* Get the off processors rows for A and S, associated with columns in
* A_offd and S_offd.
*---------------------------------------------------------------------*/
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
A_ext_rows = hypre_CSRMatrixNumRows(A_ext);
Sop = hypre_ParCSRMatrixExtractBExt(S,A,0);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixJ(Sop);
Soprows = hypre_CSRMatrixNumRows(Sop);
/* Find nodes that are neighbors of neighbors, not found in offd */
newoff = new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j,
Soprows, col_map_offd, col_1, col_n,
Sop_i, Sop_j, CF_marker, comm_pkg);
if(newoff >= 0)
full_off_procNodes = newoff + num_cols_A_offd;
else
return hypre_error_flag;
/* Possibly add new points and new processors to the comm_pkg, all
* processors need new_comm_pkg */
/* AHB - create a new comm package just for extended info -
this will work better with the assumed partition*/
hypre_ParCSRFindExtendCommPkg(A, newoff, found,
&extend_comm_pkg);
if (full_off_procNodes)
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
if (num_functions > 1 && full_off_procNodes > 0)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, CF_marker,
full_off_procNodes, CF_marker_offd);
if(num_functions > 1)
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, dof_func,
full_off_procNodes, dof_func_offd);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
}
initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
coarse_counter_offd = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else
{
/* Initialize ccounter for each f point */
ccounter = 0;
ccounter_offd = 0;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] > 0)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] < 0)
{ /* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
common_c = 1;
break;
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
found_c = 0;
for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if(CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
found_c = 1;
break;
}
}
}
if(num_procs > 1 && !found_c)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] < 0)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] > 0)
{
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
break;
}
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] > 0)
{
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
CF_marker[i1] = 1;
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] == 2)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(double, P_diag_size);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(double, P_offd_size);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
ccounter = start_indexing;
ccounter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] += my_first_cpt;
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes,
fine_to_coarse_offd);
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] -= my_first_cpt;
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
jj_begin_row_offd = 0;
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if(num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
ccounter = 0;
ccounter_offd = 0;
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search through F points */
i1 = S_diag_j[jj];
if(CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
common_c = 1;
break;
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
found_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
found_c = 1;
break;
}
}
}
if(num_procs > 1 && !found_c)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if(CF_marker_offd[i1] == -1)
{ /* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
/* Find local col number */
if(k1 >= col_1 && k1 < col_n)
{
loc_col = k1-col_1;
if(CF_marker[loc_col] > 0)
{
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
break;
}
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] > 0)
{
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
diagonal += A_diag_data[jj];
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_ext_data[A_ext_i[i1]] < 0) sgn = -1;
for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row && (sgn*A_ext_data[jj1])<0)
sum += A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn*A_ext_data[jj1]) < 0)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row &&
(sgn*A_ext_data[jj1]) < 0)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
diagonal += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
num_cols_P_offd = 0;
if(P_offd_size)
{
hypre_TFree(P_marker);
if (full_off_procNodes)
P_marker = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
for (i=0; i < full_off_procNodes; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
if(tmp_CF_marker_offd[index] >= 0)
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
}
if (num_cols_P_offd)
col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while( P_marker[index] == 0) index++;
col_map_offd_P[i] = index++;
}
for(i = 0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while (P_marker[index] == 0) index++;
col_map_offd_P[i] = fine_to_coarse_offd[index];
index++;
}
/* Sort the col_map_offd_P and P_offd_j correctly */
for(i = 0; i < num_cols_P_offd; i++)
P_marker[i] = col_map_offd_P[i];
/* Check if sort actually changed anything */
if(hypre_ssort(col_map_offd_P,num_cols_P_offd))
{
for(i = 0; i < P_offd_size; i++)
for(j = 0; j < num_cols_P_offd; j++)
if(P_marker[P_offd_j[i]] == col_map_offd_P[j])
{
P_offd_j[i] = j;
j = num_cols_P_offd;
}
}
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse);
hypre_TFree(P_marker);
/*hynre_TFree(clist);*/
if (num_procs > 1)
{
/*hypre_TFree(clist_offd);*/
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(P_marker_offd);
hypre_TFree(CF_marker_offd);
hypre_TFree(tmp_CF_marker_offd);
if(num_functions > 1)
hypre_TFree(dof_func_offd);
hypre_TFree(found);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
double trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Int total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
double *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
double *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
HYPRE_Int *col_map_offd_P = NULL;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
double *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_Int *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int *found = NULL;
HYPRE_Int num_cols_P_offd;
HYPRE_Int newoff, loc_col;
HYPRE_Int A_ext_rows, full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_Int *Sop_j;
HYPRE_Int Soprows, sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter, coarse_counter_offd;
/* Interpolation weight variables */
double sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
HYPRE_Int index;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, j, jj, kk, k1, jj1;
/* Definitions */
double zero = 0.0;
double one = 1.0;
double wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
newoff = 0;
full_off_procNodes = 0;
if (num_procs > 1)
{
/*----------------------------------------------------------------------
* Get the off processors rows for A and S, associated with columns in
* A_offd and S_offd.
*---------------------------------------------------------------------*/
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
A_ext_rows = hypre_CSRMatrixNumRows(A_ext);
Sop = hypre_ParCSRMatrixExtractBExt(S,A,0);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixJ(Sop);
Soprows = hypre_CSRMatrixNumRows(Sop);
/* Find nodes that are neighbors of neighbors, not found in offd */
newoff = new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j,
Soprows, col_map_offd, col_1, col_n,
Sop_i, Sop_j, CF_marker, comm_pkg);
if(newoff >= 0)
full_off_procNodes = newoff + num_cols_A_offd;
else
return hypre_error_flag;
/* Possibly add new points and new processors to the comm_pkg, all
* processors need new_comm_pkg */
/* AHB - create a new comm package just for extended info -
this will work better with the assumed partition*/
hypre_ParCSRFindExtendCommPkg(A, newoff, found,
&extend_comm_pkg);
if (full_off_procNodes)
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
if (num_functions > 1 && full_off_procNodes > 0)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, CF_marker,
full_off_procNodes, CF_marker_offd);
if(num_functions > 1)
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, dof_func,
full_off_procNodes, dof_func_offd);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
}
initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
coarse_counter_offd = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] >= 0)
{
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] >= 0)
{
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(double, P_diag_size);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(double, P_offd_size);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] += my_first_cpt;
alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes,
fine_to_coarse_offd);
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] -= my_first_cpt;
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
/* Find local col number */
if(k1 >= col_1 && k1 < col_n)
{
loc_col = k1-col_1;
if(CF_marker[loc_col] >= 0)
{
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] >= 0)
{
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row ) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_ext_data[A_ext_i[i1]] < 0) sgn = -1;
for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if((P_marker[loc_col] >= jj_begin_row )
&& (sgn*A_ext_data[jj1]) < 0)
sum += A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn*A_ext_data[jj1]) < 0)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]+1; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row &&
(sgn*A_ext_data[jj1]) < 0)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
num_cols_P_offd = 0;
if(P_offd_size)
{
hypre_TFree(P_marker);
if (full_off_procNodes)
P_marker = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
for (i=0; i < full_off_procNodes; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
if(tmp_CF_marker_offd[index] >= 0)
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
}
if (num_cols_P_offd)
col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while( P_marker[index] == 0) index++;
col_map_offd_P[i] = index++;
}
for(i = 0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while (P_marker[index] == 0) index++;
col_map_offd_P[i] = fine_to_coarse_offd[index];
index++;
}
/* Sort the col_map_offd_P and P_offd_j correctly */
for(i = 0; i < num_cols_P_offd; i++)
P_marker[i] = col_map_offd_P[i];
/* Check if sort actually changed anything */
if(hypre_ssort(col_map_offd_P,num_cols_P_offd))
{
for(i = 0; i < P_offd_size; i++)
for(j = 0; j < num_cols_P_offd; j++)
if(P_marker[P_offd_j[i]] == col_map_offd_P[j])
{
P_offd_j[i] = j;
j = num_cols_P_offd;
}
}
hypre_TFree(P_marker);
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse);
hypre_TFree(P_marker);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(P_marker_offd);
hypre_TFree(CF_marker_offd);
hypre_TFree(tmp_CF_marker_offd);
if(num_functions > 1)
hypre_TFree(dof_func_offd);
hypre_TFree(found);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
|
reduce.c | /*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*******************************************************************
NAME: reduce
PURPOSE: This program tests the efficiency with which a collection of
vectors that are distributed among the threads can be added in
elementwise fashion. The number of vectors per thread is two,
so that a reduction will take place even if the code runs on
just a single thread.
USAGE: The program takes as input the number of threads, the length
of the vectors, the number of times the reduction is repeated,
plus, optionally, the type of reduction algorithm . The default
algorithm is binary tree reduction with point-to-point
synchronization.
Note that vector reduction is not currently available in C
in the OpenMP standard (version 2.5).
<progname> <# threads> <# iterations> <vector length> [<algorithm>]
The output consists of diagnostics to make sure the
algorithm worked, and of timing statistics.
FUNCTIONS CALLED:
Other than OpenMP or standard C functions, the following
functions are used in this program:
wtime()
bail_out()
NOTES: The long-optimal algorithm is based on a distributed memory
algorithm decribed in:
Collective Communication; Theory, Practice, and Experience by
Chan, Heimlich, Purkayastha, Van de Geijn (to Appear). This
is a two-phase, multi-stage algorithm. In the first phase,
partial sums of the vectors are built by each thread locally.
In the second phase the partial sums are collected on the
master thread. In the distributed-memory algorithm the second
phase also has multiple stages. In the shared-memory algorithm
it is more efficient to let each thread write its contribution
into the master thread vector in a single stage.
HISTORY: Written by Rob Van der Wijngaart, March 2006.
*******************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_omp.h>
#define LINEAR 11
#define BINARY_BARRIER 12
#define BINARY_P2P 13
#define LONG_OPTIMAL 14
#define NONE 15
#define LOCAL 16
#define VEC0(id,i) vector[(id )*(vector_length)+i]
#define VEC1(id,i) vector[(id+nthread)*(vector_length)+i]
/* define shorthand for flag with cache line padding */
#define LINEWORDS 16
#define flag(i) flag[(i)*LINEWORDS]
int main(int argc, char ** argv)
{
int my_ID; /* Thread ID */
long vector_length; /* length of vectors to be aggregated */
long total_length; /* bytes needed to store reduction vectors */
double reduce_time, /* timing parameters */
avgtime;
double epsilon=1.e-8; /* error tolerance */
int group_size, /* size of aggregating half of thread pool */
old_size, /* group size in previous binary tree iteration */
i, id, iter, stage; /* dummies */
double element_value; /* reference element value for final vector */
char *algorithm; /* reduction algorithm selector */
int intalgorithm; /* integer encoding of algorithm selector */
int iterations; /* number of times the reduction is carried out */
int flag[MAX_THREADS*LINEWORDS]; /* used for pairwise synchronizations */
int start[MAX_THREADS],
end[MAX_THREADS];/* segments of vectors for bucket algorithm */
long segment_size;
int my_donor, my_segment;
int nthread_input, /* thread parameters */
nthread;
double RESTRICT *vector;/* vector pair to be reduced */
int num_error=0; /* flag that signals that requested and obtained
numbers of threads are the same */
/*****************************************************************************
** process and test input parameters
******************************************************************************/
if (argc != 4 && argc != 5){
printf("Usage: %s <# threads> <# iterations> <vector length> ", *argv);
printf("[<alghorithm>]\n");
printf("Algorithm: linear, binary-barrier, binary-p2p, or long-optimal\n");
return(EXIT_FAILURE);
}
/* Take number of threads to request from command line */
nthread_input = atoi(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
exit(EXIT_FAILURE);
}
omp_set_num_threads(nthread_input);
iterations = atoi(*++argv);
if (iterations < 1){
printf("ERROR: Iterations must be positive : %d \n", iterations);
exit(EXIT_FAILURE);
}
vector_length = atol(*++argv);
if (vector_length < 1){
printf("ERROR: vector length must be >= 1 : %d \n",vector_length);
exit(EXIT_FAILURE);
}
total_length = 2*nthread_input*sizeof(double)*vector_length;
vector = (double *) malloc(total_length);
if (!vector) {
printf("Could not allocate space for vectors\n");
exit(EXIT_FAILURE);
}
algorithm = "binary-p2p";
if (argc == 5) algorithm = *++argv;
intalgorithm = NONE;
if (!strcmp(algorithm,"linear" )) intalgorithm = LINEAR;
if (!strcmp(algorithm,"binary-barrier")) intalgorithm = BINARY_BARRIER;
if (!strcmp(algorithm,"binary-p2p" )) intalgorithm = BINARY_P2P;
if (!strcmp(algorithm,"long-optimal" )) intalgorithm = LONG_OPTIMAL;
if (intalgorithm == NONE) {
printf("Wrong algorithm: %s; choose linear, binary-barrier, ", algorithm);
printf("binary-p2p, or long-optimal\n");
exit(EXIT_FAILURE);
}
else {
if (nthread_input == 1) intalgorithm = LOCAL;
}
#pragma omp parallel private(i, old_size, group_size, my_ID, iter, start, end, \
segment_size, stage, id, my_donor, my_segment)
{
my_ID = omp_get_thread_num();
#pragma omp master
{
nthread = omp_get_num_threads();
printf("OpenMP Vector Reduction\n");
if (nthread != nthread_input) {
num_error = 1;
printf("ERROR: number of requested threads %d does not equal ",
nthread_input);
printf("number of spawned threads %d\n", nthread);
}
else {
printf("Number of threads = %d\n",nthread_input);
printf("Vector length = %d\n", vector_length);
printf("Reduction algorithm = %s\n", algorithm);
printf("Number of iterations = %d\n", iterations);
}
}
bail_out(num_error);
for (iter=0; iter<=iterations; iter++) {
/* start timer after a warmup iteration */
if (iter == 1) {
#pragma omp barrier
#pragma omp master
{
reduce_time = wtime();
}
}
/* in case of the long-optimal algorithm we need a barrier before the
reinitialization to make sure that we don't overwrite parts of the
vector before other threads are done with those parts */
if (intalgorithm == LONG_OPTIMAL) {
#pragma omp barrier
}
/* initialize the arrays, assuming first-touch memory placement */
for (i=0; i<vector_length; i++) {
VEC0(my_ID,i) = (double)(my_ID+1);
VEC1(my_ID,i) = (double)(my_ID+1+nthread);
}
if (intalgorithm == BINARY_P2P) {
/* we need a barrier before setting all flags to zero, to avoid
zeroing some that are still in use in a previous iteration */
#pragma omp barrier
flag(my_ID) = 0;
/* we also need a barrier after setting the flags, to make each is
visible to all threads, and to synchronize before the timer starts */
#pragma omp barrier
}
/* do actual reduction */
/* first do the "local" part, which is the same for all algorithms */
for (i=0; i<vector_length; i++) {
VEC0(my_ID,i) += VEC1(my_ID,i);
}
/* now do the "non-local" part */
switch (intalgorithm) {
case LOCAL:
break;
case LINEAR:
#pragma omp barrier
#pragma omp master
{
for (id=1; id<nthread; id++) {
for (i=0; i<vector_length; i++) {
VEC0(0,i) += VEC0(id,i);
}
}
}
break;
case BINARY_BARRIER:
group_size = nthread;
while (group_size >1) {
/* barrier to make sure threads have completed their updates before
the results are being read */
#pragma omp barrier
old_size = group_size;
group_size = (group_size+1)/2;
/* Threads in "first half" of group aggregate data from threads in
second half; must make sure the counterpart is within old group.
If group size is odd, the last thread in the group does not have
a counterpart. */
if (my_ID < group_size && my_ID+group_size<old_size) {
for (i=0; i<vector_length; i++) {
VEC0(my_ID,i) += VEC0(my_ID+group_size,i);
}
}
}
break;
case BINARY_P2P:
group_size = nthread;
while (group_size >1) {
old_size = group_size;
group_size = (group_size+1)/2;
/* synchronize between each pair of threads that collaborate to
aggregate a new subresult, to make sure the donor of the pair has
updated its vector in the previous round before it is being read */
if (my_ID < group_size && my_ID+group_size<old_size) {
while (flag(my_ID+group_size) == 0) {
#pragma omp flush
}
/* make sure I read the latest version of vector from memory */
#pragma omp flush
for (i=0; i<vector_length; i++) {
VEC0(my_ID,i) += VEC0(my_ID+group_size,i);
}
}
else {
if (my_ID < old_size) {
/* I am a producer of data in this iteration; make sure my
updated version can be seen by all threads */
flag(my_ID) = 1;
#pragma omp flush
}
}
}
break;
case LONG_OPTIMAL:
/* compute starts and ends of subvectors to be passed among threads */
segment_size = (vector_length+nthread-1)/nthread;
for (id=0; id<nthread; id++) {
start[id] = segment_size*id;
end[id] = MIN(vector_length,segment_size*(id+1));
}
/* first do the Bucket Reduce Scatter in nthread-1 stages */
my_donor = (my_ID-1+nthread)%nthread;
for (stage=1; stage<nthread; stage++) {
#pragma omp barrier
my_segment = (my_ID-stage+nthread)%nthread;
for (i=start[my_segment]; i<end[my_segment]; i++) {
VEC0(my_ID,i) += VEC0(my_donor,i);
}
}
/* next, each thread pushes its contribution into the master thread
vector; no need to synchronize, because of the push model */
my_segment = (my_ID+1)%nthread;
if (my_ID != 0)
for (i=start[my_segment]; i<end[my_segment]; i++) {
VEC0(0,i) = VEC0(my_ID,i);
}
break;
} /* end of algorithm switch statement */
} /* end of iter loop */
#pragma omp barrier
#pragma omp master
{
reduce_time = wtime() - reduce_time;
}
} /* end of OpenMP parallel region */
/* verify correctness */
element_value = (double)nthread*(2.0*(double)nthread+1.0);
for (i=0; i<vector_length; i++) {
if (ABS(VEC0(0,i) - element_value) >= epsilon) {
printf("First error at i=%d; value: %lf; reference value: %lf\n",
i, VEC0(0,i), element_value);
exit(EXIT_FAILURE);
}
}
printf("Solution validates\n");
#ifdef VERBOSE
printf("Element verification value: %lf\n", element_value);
#endif
avgtime = reduce_time/iterations;
printf("Rate (MFlops/s): %lf Avg time (s): %lf\n",
1.0E-06 * (2.0*nthread-1.0)*vector_length/avgtime, avgtime);
exit(EXIT_SUCCESS);
}
|
matrix.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/thread-private.h"
#include "magick/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MaxTextExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickSizeType MagickMin(const MagickSizeType x,
const MagickSizeType y)
{
if (x < y)
return(x);
return(y);
}
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(MatrixInfo *restrict matrix_info,
MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
{
int
status;
status=posix_fallocate(matrix_info->file,offset+1,extent-offset);
if (status != 0)
return(MagickFalse);
}
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) ResetMagickMemory(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AllocateSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
(void) AcquireMagickResource(MemoryResource,matrix_info->length);
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
register ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **)NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
DestroySemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns). Also represents
% the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usually 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns', you
% can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickExport MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
register ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) ResetMagickMemory(columns,0,rank*sizeof(*columns));
(void) ResetMagickMemory(rows,0,rank*sizeof(*rows));
(void) ResetMagickMemory(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n);
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickExport void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
register ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor,
value;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&value);
min_value=value;
max_value=value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
register PixelPacket
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
q->red=ClampToQuantum(value);
q->green=q->red;
q->blue=q->red;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the ResetMagickMemory method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
register ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickSignature);
if (matrix_info->type != DiskCache)
{
(void) ResetMagickMemory(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
register ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
dropout-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file dropout-inl.h
* \brief
* \author Bing Xu, Da Zheng, Hang Zhang
*/
#ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_
#define MXNET_OPERATOR_NN_DROPOUT_INL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../random/sampler.h"
#include "../tensor/elemwise_binary_broadcast_op.h"
#if defined(USE_MKL) && defined(_OPENMP)
#include <omp.h>
#include <mkl_vml_functions.h>
#include <mkl_vsl.h>
#endif // USE_MKL && _OPENMP
namespace dropout {
enum DropoutOpInputs {kData};
enum DropoutOpOutputs {kOut, kMask};
enum DropoutOpForwardResource {kRandom};
enum DropoutOpMode {kTraining, kAlways};
} // namespace dropout
namespace mxnet {
namespace op {
const int MAX_DIM = 5;
struct DropoutParam : public dmlc::Parameter<DropoutParam> {
float p;
int mode;
TShape axes;
DMLC_DECLARE_PARAMETER(DropoutParam) {
DMLC_DECLARE_FIELD(p).set_default(0.5)
.set_range(0, 1)
.describe("Fraction of the input that gets dropped out during training time.");
DMLC_DECLARE_FIELD(mode)
.add_enum("training", dropout::kTraining)
.add_enum("always", dropout::kAlways)
.set_default(dropout::kTraining)
.describe("Whether to only turn on dropout during training or to also turn on for inference.");
DMLC_DECLARE_FIELD(axes).set_default(TShape())
.describe("Axes for variational dropout kernel.");
}
}; // struct DropoutParam
template<typename xpu, typename DType>
class DropoutOp {
#if defined(USE_MKL) && defined(_OPENMP)
static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen,
int n, double p, int* r) {
typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1);
const int seed = 17 + genImpl.rand() % 4096; // NOLINT(runtime/threadsafe_fn)
const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel num_threads(nthr)
{
const int ithr = omp_get_thread_num();
const int avg_amount = (n + nthr - 1) / nthr;
const int my_offset = ithr * avg_amount;
const int my_amount = std::min(my_offset + avg_amount, n) - my_offset;
if (my_amount > 0) {
VSLStreamStatePtr stream;
vslNewStream(&stream, VSL_BRNG_MCG31, seed + my_offset);
vslSkipAheadStream(stream, my_offset);
viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p);
vslDeleteStream(&stream);
}
}
}
// MKL forward pass
static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<cpu> *s, RandGenerator<cpu, DType> *pgen,
const double pkeep,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data) {
// BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer
// will be too small, so we can;t use MKL in those cases
if (sizeof(DType) >= sizeof(int)) {
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s);
DType *outptr = out.dptr_;
DType *dataptr = data.dptr_;
auto maskptr = reinterpret_cast<int *>(mask.dptr_);
int count = mask.shape_[0] * mask.shape_[1];
BernoulliGenerate(*pgen, count, pkeep, maskptr);
const float pk_1 = 1.0f / pkeep;
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
outptr[i] = dataptr[i] * maskptr[i] * pk_1;
}
return true;
}
return false;
}
// MKL backward pass
static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<cpu> *s, const double pkeep,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &out_grad) {
if (sizeof(DType) >= sizeof(int)) {
Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s);
DType *ingradptr = gdata.dptr_;
const DType *outgradptr = grad.dptr_;
auto maskptr = reinterpret_cast<int *>(mask.dptr_);
int count = mask.shape_[0] * mask.shape_[1];
const float pk_1 = 1.0f / pkeep;
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
ingradptr[i] = outgradptr[i] * maskptr[i] * pk_1;
}
return true;
}
return false;
}
#ifdef __CUDACC__
// GPU never uses MKL
static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<gpu> *s, RandGenerator<gpu, DType> *pgen,
const double pkeep,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data) {
return false;
}
static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<gpu> *s, const double pkeep,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &out_grad) {
return false;
}
#endif // __CUDACC__
#else // #if defined(USE_MKL) && defined(_OPENMP)
static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<xpu> *s, RandGenerator<xpu, DType> *pgen,
const double pkeep,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data) {
return false;
}
static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<xpu> *s, const double pkeep,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &out_grad) {
return false;
}
#endif // #if defined(USE_MKL) && defined(_OPENMP)
public:
/*!
* \brief Dropout kernel, compute dropout tensor
*/
struct DropoutKernel {
/*!
* \brief Dropout kernel function
* \param id Thread number (0-based representing count)
* \param gen Random number generator
* \param N Total number of items in the output
* \param step Step between items, related to parallelism
* \param dropout_out Output dropout values
* \param mask_out Output mask (is multiplied to create dropout output, may be 0)
* \param input_data Input data to perform the dropout on
* \param pkeep Dropout rate (keep when the generated random number is less than this value)
*/
MSHADOW_XINLINE static void Map(int id,
RandGenerator<xpu, DType> gen,
const int N,
const int step,
DType *dropout_out,
DType *mask_out,
const DType *input_data,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
dropout_out[i] = input_data[i] * mask_out[i];
});
}
};
struct BernoulliKernel {
/*! \brief Bernoulli kernel for generating mask */
MSHADOW_XINLINE static void Map(int id,
RandGenerator<xpu, DType> gen,
const int N,
const int step,
DType *mask_out,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
});
}
};
void Init(const DropoutParam ¶m) {
this->pkeep_ = 1.0f - param.p;
this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode);
this->axes_ = param.axes;
}
void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data) {
if (req[dropout::kOut] != kNullOp) {
CHECK_EQ(in_data.size(), 1U);
if (ctx.is_train) {
CHECK_EQ(out_data.size(), 2U);
}
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob &out = out_data[dropout::kOut];
if (ctx.is_train || this->mode_ == dropout::kAlways) {
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
if (this->axes_.ndim() != 0 || !MKLForward(s, pgen, this->pkeep_, in_data, out_data)) {
const TBlob &mask = out_data[dropout::kMask];
CHECK(req[dropout::kOut] != kAddTo);
if (this->axes_.ndim() == 0) {
// standard case for dropout
LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(),
out.dptr<DType>(),
mask.dptr<DType>(),
in_data[dropout::kData].dptr<DType>(),
this->pkeep_);
return;
}
// initialize the mask
LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(),
mask.dptr<DType>(),
this->pkeep_);
// broadcast mul
TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(in_data[dropout::kData].shape_,
mask.shape_, out.shape_,
&new_lshape, &new_rshape, &new_oshape);
if (!ndim) {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), in_data[dropout::kData].dptr<DType>(),
mask.dptr<DType>());
});
} else {
BROADCAST_NDIM_SWITCH(ndim, NDim, {
mshadow::Shape<NDim> oshape = new_oshape.get<NDim>();
mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>());
mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType,
mshadow_op::mul>, xpu>::
template LaunchEx(s, new_oshape.Size(), req[dropout::kOut],
lstride, rstride, oshape,
in_data[dropout::kData].dptr<DType>(),
mask.dptr<DType>(), out.dptr<DType>());
});
}
}
} else {
const TBlob& data = in_data[dropout::kData];
if (req[dropout::kOut] == kWriteTo) {
mxnet_op::copy(s, out, data);
} else {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), data.dptr<DType>());
});
}
}
}
}
void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
if (ctx.is_train || mode_ == dropout::kAlways) {
if (this->axes_.ndim() != 0 || !MKLBackward(s, this->pkeep_, in_grad, out_data, out_grad)) {
const TBlob &gdata = in_grad[dropout::kData];
const TBlob &grad = out_grad[dropout::kOut];
const TBlob &mask = out_data[dropout::kMask];
if (this->axes_.ndim() == 0) {
// standard case for dropout
CHECK_EQ(grad.Size(), mask.Size());
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
return;
}
// broardcast mul
TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(grad.shape_,
mask.shape_, gdata.shape_,
&new_lshape, &new_rshape, &new_oshape);
if (!ndim) {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
} else {
BROADCAST_NDIM_SWITCH(ndim, NDim, {
mshadow::Shape<NDim> oshape = new_oshape.get<NDim>();
mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>());
mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType,
mshadow_op::mul>, xpu>::
template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape,
grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>());
});
}
}
} else {
const TBlob& gdata = in_grad[dropout::kData];
const TBlob& grad = out_grad[dropout::kOut];
if (req[dropout::kData] == kWriteTo) {
mxnet_op::copy(s, gdata, grad);
} else {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>());
});
}
}
}
private:
/*! \brief Dropout rate (keep when the generated random number is less than this value) */
real_t pkeep_;
/*! \brief Dropout mode */
dropout::DropoutOpMode mode_;
TShape axes_;
}; // class DropoutOp
template<typename xpu>
void DropoutCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const DropoutParam& param = nnvm::get<DropoutParam>(attrs.parsed);
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
static thread_local DropoutOp<xpu, DType> op;
op.Init(param);
op.Forward(ctx, inputs, req, outputs);
});
}
template<typename xpu>
void DropoutGradCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const DropoutParam& param = nnvm::get<DropoutParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1);
CHECK_EQ(req.size(), 1);
std::vector<TBlob> out_grads(2);
std::vector<TBlob> out_data(2);
out_grads[dropout::kOut] = inputs[0];
out_data[dropout::kMask] = inputs[1];
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
static thread_local DropoutOp<xpu, DType> op;
op.Init(param);
op.Backward(ctx, out_grads, out_data, req, outputs);
});
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
|
GB_unop__isfinite_bool_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isfinite_bool_fc32)
// op(A') function: GB (_unop_tran__isfinite_bool_fc32)
// C type: bool
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = (aij)
// unaryop: cij = GB_cisfinitef (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cisfinitef (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = (aij) ; \
Cx [pC] = GB_cisfinitef (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isfinite_bool_fc32)
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = GB_cisfinitef (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = GB_cisfinitef (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isfinite_bool_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | /**
* @file main.c
* @brief
*
*
* @author Yu Li, liyu@tjufe.edu.cn
*
* Created: 2020/9/13
* Revision: none
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "ops.h"
int TestAppCCS (int argc, char *argv[]);
int TestAppLAPACK(int argc, char *argv[]);
int TestAppHYPRE (int argc, char *argv[]);
int TestAppPHG (int argc, char *argv[]);
int TestAppSLEPC (int argc, char *argv[]);
int TestAppPAS_LAPACK(int argc, char *argv[]);
int TestAppPAS_CCS (int argc, char *argv[]);
int main(int argc, char *argv[])
{
#if OPS_USE_MEMWATCH
mwStatistics( 2 );
#endif
#if OPS_USE_OMP
#pragma omp parallel num_threads(OMP_NUM_THREADS)
{
int id = omp_get_thread_num();
printf("%d thread\n",id);
}
#endif
//TestAppLAPACK(argc, argv);
TestAppCCS(argc, argv);
//TestAppHYPRE(argc, argv);
//TestAppPHG(argc, argv);
//TestAppSLEPC(argc, argv);
/* create a PAS matrix to test */
//TestAppPAS_LAPACK(argc, argv);
//TestAppPAS_CCS (argc, argv);
//TestAppPAS_SLEPC (argc, argv);
return 0;
}
|
PlasticStrainMapping.h | /******************************************************************************
* SOFA, Simulation Open-Framework Architecture, development version *
* (c) 2006-2017 INRIA, USTL, UJF, CNRS, MGH *
* *
* This program is free software; you can redistribute it and/or modify it *
* under the terms of the GNU Lesser General Public License as published by *
* the Free Software Foundation; either version 2.1 of the License, or (at *
* your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, but WITHOUT *
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License *
* for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
*******************************************************************************
* Authors: The SOFA Team and external contributors (see Authors.txt) *
* *
* Contact information: contact@sofa-framework.org *
******************************************************************************/
#ifndef SOFA_COMPONENT_MAPPING_PlasticStrainMAPPING_H
#define SOFA_COMPONENT_MAPPING_PlasticStrainMAPPING_H
#include <Flexible/config.h>
#include "BaseStrainMapping.h"
#include "PlasticStrainJacobianBlock.h"
#include "../types/StrainTypes.h"
#include <sofa/helper/OptionsGroup.h>
#include <sofa/helper/IndexOpenMP.h>
namespace sofa
{
namespace component
{
namespace mapping
{
/// Decompose the total strain to an elastic strain + a plastic strain
///
/// @author Matthieu Nesme
///
template <class TStrain>
class PlasticStrainMapping : public BaseStrainMappingT<defaulttype::PlasticStrainJacobianBlock<TStrain> >
{
public:
typedef defaulttype::PlasticStrainJacobianBlock<TStrain> BlockType;
typedef BaseStrainMappingT<BlockType> Inherit;
typedef typename Inherit::Real Real;
SOFA_CLASS(SOFA_TEMPLATE(PlasticStrainMapping,TStrain), SOFA_TEMPLATE(BaseStrainMappingT,BlockType));
/// @name Different ways to decompose the strain
//@{
enum PlasticMethod { ADDITION=0, MULTIPLICATION, NB_PlasticMethod }; ///< ADDITION -> Müller method (faster), MULTIPLICATION -> Fedkiw method [Irving04]
Data<helper::OptionsGroup> f_method;
//@}
/// @name Plasticity parameters such as "Interactive Virtual Materials", Muller & Gross, GI 2004
//@{
Data<helper::vector<Real> > _max;
Data<helper::vector<Real> > _yield;
helper::vector<Real> _squaredYield;
Data<helper::vector<Real> > _creep; ///< this parameter is different from the article, here it includes the multiplication by dt
//@}
virtual void reinit()
{
_squaredYield.resize(_yield.getValue().size());
for(size_t i=0;i<_yield.getValue().size();i++) _squaredYield[i] = _yield.getValue()[i] * _yield.getValue()[i];
Inherit::reinit();
}
virtual void reset()
{
//serr<<"PlasticStrainMapping::reset"<<sendl;
Inherit::reset();
for( size_t i=0 ; i<this->jacobian.size() ; i++ )
this->jacobian[i].reset();
}
protected:
PlasticStrainMapping( core::State<TStrain>* from = NULL, core::State<TStrain>* to = NULL )
: Inherit ( from, to )
, f_method ( initData ( &f_method,"method","" ) )
, _max(initData(&_max,helper::vector<Real>((int)1,(Real)0.1f),"max","Plastic Max Threshold (2-norm of the strain)"))
, _yield(initData(&_yield,helper::vector<Real>((int)1,(Real)0.0001f),"yield","Plastic Yield Threshold (2-norm of the strain)"))
, _creep(initData(&_creep,helper::vector<Real>((int)1,(Real)1.f),"creep","Plastic Creep Factor * dt [0,1]. 1 <-> pure plastic ; <1 <-> visco-plastic (warning depending on dt)"))
{
helper::OptionsGroup Options;
Options.setNbItems( NB_PlasticMethod );
Options.setItemName( ADDITION, "addition" );
Options.setItemName( MULTIPLICATION, "multiplication" );
Options.setSelectedItem( ADDITION );
f_method.setValue( Options );
}
virtual ~PlasticStrainMapping() { }
virtual void apply( const core::MechanicalParams * /*mparams*/ , Data<typename Inherit::OutVecCoord>& dOut, const Data<typename Inherit::InVecCoord>& dIn )
{
helper::ReadAccessor<Data<typename Inherit::InVecCoord> > inpos (*this->fromModel->read(core::ConstVecCoordId::position()));
helper::ReadAccessor<Data<typename Inherit::OutVecCoord> > outpos (*this->toModel->read(core::ConstVecCoordId::position()));
if(inpos.size()!=outpos.size()) this->resizeOut();
typename Inherit::OutVecCoord& out = *dOut.beginWriteOnly();
const typename Inherit::InVecCoord& in = dIn.getValue();
switch( f_method.getValue().getSelectedId() )
{
case MULTIPLICATION:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for(sofa::helper::IndexOpenMP<unsigned int>::type i=0 ; i<this->jacobian.size() ; i++ )
{
out[i] = typename Inherit::OutCoord();
Real Max=(_max.getValue().size()<=i)?_max.getValue()[0]:_max.getValue()[i],SquaredYield=(_squaredYield.size()<=i)?_squaredYield[0]:_squaredYield[i] ,Creep=(_creep.getValue().size()<=i)?_creep.getValue()[0]:_creep.getValue()[i];
this->jacobian[i].addapply_multiplication( out[i], in[i], Max, SquaredYield, Creep );
}
break;
}
case ADDITION:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for(sofa::helper::IndexOpenMP<unsigned int>::type i=0 ; i<this->jacobian.size() ; i++ )
{
out[i] = typename Inherit::OutCoord();
Real Max=(_max.getValue().size()<=i)?_max.getValue()[0]:_max.getValue()[i],SquaredYield=(_squaredYield.size()<=i)?_squaredYield[0]:_squaredYield[i] ,Creep=(_creep.getValue().size()<=i)?_creep.getValue()[0]:_creep.getValue()[i];
this->jacobian[i].addapply_addition( out[i], in[i], Max, SquaredYield, Creep );
}
break;
}
}
dOut.endEdit();
}
}; // class PlasticStrainMapping
} // namespace mapping
} // namespace component
} // namespace sofa
#endif // SOFA_COMPONENT_MAPPING_PlasticStrainMAPPING_H
|
print.c | /*
Copyright (c) 2010-2011, Jun Namikawa <jnamika@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "utils.h"
#include "print.h"
#include "mregn.h"
#include "entropy.h"
#include "mregn_lyapunov.h"
static void fopen_array (
FILE **fp_array,
int size,
const char *template_filename,
const char *mode)
{
char str[7], *filename, *p;
int length = strlen(template_filename);
MALLOC(filename, length + 1);
strcpy(filename, template_filename);
p = strstr(filename, "XXXXXX");
if (p == NULL) {
REALLOC(filename, length + 8);
filename[length] = '.';
filename[length + 7] = '\0';
p = filename + length + 1;
}
for (int i = 0; i < size; i++) {
snprintf(str, sizeof(str), "%.6d", i);
memmove(p, str, 6);
fp_array[i] = fopen(filename, mode);
if (fp_array[i] == NULL) {
print_error_msg();
goto error;
}
}
FREE(filename);
return;
error:
exit(EXIT_FAILURE);
}
void init_output_files (
const struct general_parameters *gp,
const struct recurrent_neural_network *rnn,
struct output_files *fp_list,
const char *mode)
{
fp_list->array_size = rnn->series_num;
if (strlen(gp->iop.state_filename) > 0) {
MALLOC(fp_list->fp_wstate_array, fp_list->array_size);
fopen_array(fp_list->fp_wstate_array, fp_list->array_size,
gp->iop.state_filename, mode);
} else {
fp_list->fp_wstate_array = NULL;
}
if (strlen(gp->iop.mre_state_filename) > 0) {
MALLOC(fp_list->fp_wmre_state_array, fp_list->array_size);
fopen_array(fp_list->fp_wmre_state_array, fp_list->array_size,
gp->iop.mre_state_filename, mode);
} else {
fp_list->fp_wmre_state_array = NULL;
}
if (strlen(gp->iop.closed_state_filename) > 0) {
MALLOC(fp_list->fp_wclosed_state_array, fp_list->array_size);
fopen_array(fp_list->fp_wclosed_state_array, fp_list->array_size,
gp->iop.closed_state_filename, mode);
} else {
fp_list->fp_wclosed_state_array = NULL;
}
if (strlen(gp->iop.closed_mre_state_filename) > 0) {
MALLOC(fp_list->fp_wclosed_mre_state_array, fp_list->array_size);
fopen_array(fp_list->fp_wclosed_mre_state_array, fp_list->array_size,
gp->iop.closed_mre_state_filename, mode);
} else {
fp_list->fp_wclosed_mre_state_array = NULL;
}
if (strlen(gp->iop.weight_filename) > 0) {
fp_list->fp_wweight = fopen(gp->iop.weight_filename, mode);
if (fp_list->fp_wweight == NULL) goto error;
} else {
fp_list->fp_wweight = NULL;
}
if (strlen(gp->iop.threshold_filename) > 0) {
fp_list->fp_wthreshold = fopen(gp->iop.threshold_filename, mode);
if (fp_list->fp_wthreshold == NULL) goto error;
} else {
fp_list->fp_wthreshold = NULL;
}
if (strlen(gp->iop.tau_filename) > 0) {
fp_list->fp_wtau = fopen(gp->iop.tau_filename, mode);
if (fp_list->fp_wtau == NULL) goto error;
} else {
fp_list->fp_wtau = NULL;
}
if (strlen(gp->iop.init_filename) > 0) {
fp_list->fp_winit = fopen(gp->iop.init_filename, mode);
if (fp_list->fp_winit == NULL) goto error;
} else {
fp_list->fp_winit = NULL;
}
if (strlen(gp->iop.adapt_lr_filename) > 0 && gp->mp.use_adaptive_lr) {
fp_list->fp_wadapt_lr = fopen(gp->iop.adapt_lr_filename, mode);
if (fp_list->fp_wadapt_lr == NULL) goto error;
} else {
fp_list->fp_wadapt_lr = NULL;
}
if (strlen(gp->iop.error_filename) > 0) {
fp_list->fp_werror = fopen(gp->iop.error_filename, mode);
if (fp_list->fp_werror == NULL) goto error;
} else {
fp_list->fp_werror = NULL;
}
if (strlen(gp->iop.closed_error_filename) > 0) {
fp_list->fp_wclosed_error = fopen(gp->iop.closed_error_filename, mode);
if (fp_list->fp_wclosed_error == NULL) goto error;
} else {
fp_list->fp_wclosed_error = NULL;
}
if (strlen(gp->iop.lyapunov_filename) > 0) {
fp_list->fp_wlyapunov = fopen(gp->iop.lyapunov_filename, mode);
if (fp_list->fp_wlyapunov == NULL) goto error;
} else {
fp_list->fp_wlyapunov = NULL;
}
if (strlen(gp->iop.entropy_filename) > 0) {
fp_list->fp_wentropy = fopen(gp->iop.entropy_filename, mode);
if (fp_list->fp_wentropy == NULL) goto error;
} else {
fp_list->fp_wentropy = NULL;
}
return;
error:
print_error_msg();
exit(EXIT_FAILURE);
}
void free_output_files (struct output_files *fp_list)
{
if (fp_list->fp_wstate_array) {
for (int i = 0; i < fp_list->array_size; i++) {
fclose(fp_list->fp_wstate_array[i]);
}
FREE(fp_list->fp_wstate_array);
}
if (fp_list->fp_wmre_state_array) {
for (int i = 0; i < fp_list->array_size; i++) {
fclose(fp_list->fp_wmre_state_array[i]);
}
FREE(fp_list->fp_wmre_state_array);
}
if (fp_list->fp_wclosed_state_array) {
for (int i = 0; i < fp_list->array_size; i++) {
fclose(fp_list->fp_wclosed_state_array[i]);
}
FREE(fp_list->fp_wclosed_state_array);
}
if (fp_list->fp_wclosed_mre_state_array) {
for (int i = 0; i < fp_list->array_size; i++) {
fclose(fp_list->fp_wclosed_mre_state_array[i]);
}
FREE(fp_list->fp_wclosed_mre_state_array);
}
if (fp_list->fp_wweight) {
fclose(fp_list->fp_wweight);
}
if (fp_list->fp_wthreshold) {
fclose(fp_list->fp_wthreshold);
}
if (fp_list->fp_wtau) {
fclose(fp_list->fp_wtau);
}
if (fp_list->fp_winit) {
fclose(fp_list->fp_winit);
}
if (fp_list->fp_wadapt_lr) {
fclose(fp_list->fp_wadapt_lr);
}
if (fp_list->fp_werror) {
fclose(fp_list->fp_werror);
}
if (fp_list->fp_wclosed_error) {
fclose(fp_list->fp_wclosed_error);
}
if (fp_list->fp_wlyapunov) {
fclose(fp_list->fp_wlyapunov);
}
if (fp_list->fp_wentropy) {
fclose(fp_list->fp_wentropy);
}
}
static void print_general_parameters (
FILE *fp,
const struct general_parameters *gp)
{
fprintf(fp, "# seed = %lu\n", gp->mp.seed);
fprintf(fp, "# epoch_size = %ld\n", gp->mp.epoch_size);
if (gp->mp.use_adaptive_lr) {
fprintf(fp, "# use_adaptive_lr\n");
}
fprintf(fp, "# rho = %f\n", gp->mp.rho);
fprintf(fp, "# momentum = %f\n", gp->mp.momentum);
fprintf(fp, "# mre_delay_length = %d\n", gp->mp.mre_delay_length);
fprintf(fp, "# gn_delay_length = %d\n", gp->mp.gn_delay_length);
fprintf(fp, "# lambda = %f\n", gp->mp.lambda);
fprintf(fp, "# alpha = %f\n", gp->mp.alpha);
fprintf(fp, "# truncate_length = %d\n", gp->ap.truncate_length);
fprintf(fp, "# block_length = %d\n", gp->ap.block_length);
fprintf(fp, "# divide_num = %d\n", gp->ap.divide_num);
fprintf(fp, "# lyapunov_spectrum_size = %d\n",
gp->ap.lyapunov_spectrum_size);
}
static void print_rnn_parameters (
FILE *fp,
const struct recurrent_neural_network *rnn)
{
fprintf(fp, "# in_state_size = %d\n", rnn->rnn_p.in_state_size);
fprintf(fp, "# c_state_size = %d\n", rnn->rnn_p.c_state_size);
fprintf(fp, "# out_state_size = %d\n", rnn->rnn_p.out_state_size);
if (rnn->rnn_p.output_type == STANDARD_TYPE) {
fprintf(fp, "# output_type = STANDARD_TYPE\n");
} else if (rnn->rnn_p.output_type == SOFTMAX_TYPE) {
fprintf(fp, "# output_type = SOFTMAX_TYPE\n");
for (int c = 0; c < rnn->rnn_p.softmax_group_num; c++) {
fprintf(fp, "# group%d = ", c);
for (int i = 0; i < rnn->rnn_p.out_state_size; i++) {
if (rnn->rnn_p.softmax_group_id[i] == c) {
fprintf(fp, "%d,", i);
}
}
fprintf(fp, "\n");
}
}
if (rnn->rnn_p.fixed_weight) {
fprintf(fp, "# fixed_weight\n");
}
if (rnn->rnn_p.fixed_threshold) {
fprintf(fp, "# fixed_threshold\n");
}
if (rnn->rnn_p.fixed_tau) {
fprintf(fp, "# fixed_tau\n");
}
if (rnn->rnn_p.fixed_init_c_state) {
fprintf(fp, "# fixed_init_c_state\n");
}
fprintf(fp, "# target_num = %d\n", rnn->series_num);
for (int i = 0; i < rnn->series_num; i++) {
fprintf(fp, "# target %d\tlength = %d\n", i, rnn->rnn_s[i].length);
}
fprintf(fp, "# prior_strength = %f\n", rnn->rnn_p.prior_strength);
const struct rnn_parameters *rnn_p = &rnn->rnn_p;
for (int i = 0; i < rnn_p->c_state_size; i++) {
fprintf(fp, "# const_init_c[%d] = %d\n", i, rnn_p->const_init_c[i]);
}
for (int i = 0; i < rnn_p->c_state_size; i++) {
fprintf(fp, "# connection_weight_ci[%d] = ", i);
int I = 0;
while (rnn_p->connection_ci[i][I].begin != -1) {
int begin = rnn_p->connection_ci[i][I].begin;
int end = rnn_p->connection_ci[i][I].end;
fprintf(fp, "(%d,%d)", begin, end);
I++;
}
fprintf(fp, "\n");
}
for (int i = 0; i < rnn_p->c_state_size; i++) {
fprintf(fp, "# connection_weight_cc[%d] = ", i);
int I = 0;
while (rnn_p->connection_cc[i][I].begin != -1) {
int begin = rnn_p->connection_cc[i][I].begin;
int end = rnn_p->connection_cc[i][I].end;
fprintf(fp, "(%d,%d)", begin, end);
I++;
}
fprintf(fp, "\n");
}
for (int i = 0; i < rnn_p->out_state_size; i++) {
fprintf(fp, "# connection_weight_oc[%d] = ", i);
int I = 0;
while (rnn_p->connection_oc[i][I].begin != -1) {
int begin = rnn_p->connection_oc[i][I].begin;
int end = rnn_p->connection_oc[i][I].end;
fprintf(fp, "(%d,%d)", begin, end);
I++;
}
fprintf(fp, "\n");
}
}
static void print_rnn_weight (
FILE *fp,
long epoch,
const struct rnn_parameters *rnn_p)
{
fprintf(fp, "%ld", epoch);
for (int i = 0; i < rnn_p->c_state_size; i++) {
for (int j = 0; j < rnn_p->in_state_size; j++) {
fprintf(fp, "\t%f", rnn_p->weight_ci[i][j]);
}
for (int j = 0; j < rnn_p->c_state_size; j++) {
fprintf(fp, "\t%f", rnn_p->weight_cc[i][j]);
}
}
for (int i = 0; i < rnn_p->out_state_size; i++) {
for (int j = 0; j < rnn_p->c_state_size; j++) {
fprintf(fp, "\t%f", rnn_p->weight_oc[i][j]);
}
}
fprintf(fp, "\n");
}
static void print_rnn_threshold (
FILE *fp,
long epoch,
const struct rnn_parameters *rnn_p)
{
fprintf(fp, "%ld", epoch);
for (int i = 0; i < rnn_p->c_state_size; i++) {
fprintf(fp, "\t%f", rnn_p->threshold_c[i]);
}
for (int i = 0; i < rnn_p->out_state_size; i++) {
fprintf(fp, "\t%f", rnn_p->threshold_o[i]);
}
fprintf(fp, "\n");
}
static void print_rnn_tau (
FILE *fp,
long epoch,
const struct rnn_parameters *rnn_p)
{
fprintf(fp, "%ld", epoch);
for (int i = 0; i < rnn_p->c_state_size; i++) {
fprintf(fp, "\t%g", rnn_p->tau[i]);
}
fprintf(fp, "\n");
}
static void print_rnn_init (
FILE *fp,
long epoch,
const struct recurrent_neural_network *rnn)
{
fprintf(fp, "# epoch = %ld\n", epoch);
for (int i = 0; i < rnn->series_num; i++) {
fprintf(fp, "%d", i);
for (int j = 0; j < rnn->rnn_p.c_state_size; j++) {
fprintf(fp, "\t%f", rnn->rnn_s[i].init_c_inter_state[j]);
}
fprintf(fp, "\n");
}
}
static void print_adapt_lr (
FILE *fp,
long epoch,
double adapt_lr)
{
fprintf(fp, "%ld\t%f\n", epoch, adapt_lr);
}
static void print_mregn_error (
FILE *fp,
long epoch,
const struct mixture_of_rnn_experts *mre,
const struct recurrent_neural_network *gn)
{
double gn_error[gn->series_num];
double mre_error[mre->series_num];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < gn->series_num; i++) {
gn_error[i] = rnn_get_error(gn->rnn_s + i);
gn_error[i] /= gn->rnn_s[i].length * gn->rnn_p.out_state_size;
mre_error[i] = mre_get_error(mre->mre_s + i);
mre_error[i] /= mre->mre_s[i].length * mre->out_state_size;
}
fprintf(fp, "%ld", epoch);
for (int i = 0; i < gn->series_num; i++) {
fprintf(fp, "\t%g\t%g", gn_error[i], mre_error[i]);
}
fprintf(fp, "\n");
}
static void print_rnn_state (
FILE *fp,
const struct rnn_state *rnn_s)
{
for (int n = 0; n < rnn_s->length; n++) {
fprintf(fp, "%d", n);
for (int i = 0; i < rnn_s->rnn_p->out_state_size; i++) {
fprintf(fp, "\t%f", rnn_s->teach_state[n][i]);
fprintf(fp, "\t%f", rnn_s->out_state[n][i]);
}
for (int i = 0; i < rnn_s->rnn_p->c_state_size; i++) {
//fprintf(fp, "\t%f", rnn_s->c_state[n][i]);
fprintf(fp, "\t%f", rnn_s->c_inter_state[n][i]);
}
fprintf(fp, "\n");
}
}
static void print_rnn_state_forall (
FILE **fp_array,
long epoch,
const struct recurrent_neural_network *rnn)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < rnn->series_num; i++) {
fprintf(fp_array[i], "# epoch = %ld\n", epoch);
fprintf(fp_array[i], "# target:%d\n", i);
print_rnn_state(fp_array[i], rnn->rnn_s + i);
}
}
static void print_mre_state (
FILE *fp,
const struct mre_state *mre_s)
{
if (mre_s->mre->expert_num <= 0) return;
const struct rnn_state *rnn_s = mre_s->expert_rnn_s[0];
for (int n = 0; n < mre_s->length; n++) {
fprintf(fp, "%d", n);
for (int i = 0; i < mre_s->mre->out_state_size; i++) {
fprintf(fp, "\t%f\t%f", rnn_s->teach_state[n][i],
mre_s->out_state[n][i]);
}
fprintf(fp, "\n");
}
}
static void print_mre_state_forall (
FILE **fp_array,
long epoch,
const struct mixture_of_rnn_experts *mre)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < mre->series_num; i++) {
fprintf(fp_array[i], "# epoch = %ld\n", epoch);
fprintf(fp_array[i], "# target:%d\n", i);
print_mre_state(fp_array[i], mre->mre_s + i);
}
}
static void compute_lyapunov_spectrum_of_mregn_state (
const struct mre_state *mre_s,
const struct rnn_state *gn_s,
int spectrum_size,
int mre_delay_length,
int gn_delay_length,
int truncate_length,
double *spectrum)
{
if (gn_s->length > truncate_length && mre_s->length > truncate_length) {
struct mregn_lyapunov_info ml_info;
init_mregn_lyapunov_info(&ml_info, mre_s, gn_s, mre_delay_length,
gn_delay_length, truncate_length);
spectrum = mregn_lyapunov_spectrum(&ml_info, spectrum, spectrum_size);
if (spectrum == NULL) {
print_error_msg();
exit(EXIT_FAILURE);
}
free_mregn_lyapunov_info(&ml_info);
} else {
for (int i = 0; i < spectrum_size; i++) {
spectrum[i] = 0;
}
}
}
static void print_lyapunov_spectrum_of_mregn (
FILE *fp,
long epoch,
const struct mixture_of_rnn_experts *mre,
const struct recurrent_neural_network *gn,
int spectrum_size,
int mre_delay_length,
int gn_delay_length,
int truncate_length)
{
int max_num;
// decides spectrum_size which is the number to evaluate Lyapunov exponents
max_num = mre->out_state_size * mre_delay_length;
max_num += mre->expert_num * gn_delay_length;
for (int i = 0; i < mre->expert_num; i++) {
max_num += mre->expert_rnn[i].rnn_p.c_state_size;
}
max_num += gn->rnn_p.c_state_size;
if (max_num < spectrum_size || spectrum_size < 0) {
spectrum_size = max_num;
}
if (spectrum_size <= 0) return;
double **spectrum = NULL;
MALLOC2(spectrum, gn->series_num, spectrum_size);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < gn->series_num; i++) {
compute_lyapunov_spectrum_of_mregn_state(mre->mre_s + i, gn->rnn_s + i,
spectrum_size, mre_delay_length, gn_delay_length,
truncate_length, spectrum[i]);
}
fprintf(fp, "%ld", epoch);
for (int i = 0; i < gn->series_num; i++) {
for (int j = 0; j < spectrum_size; j++) {
fprintf(fp, "\t%f", spectrum[i][j]);
}
}
fprintf(fp, "\n");
FREE2(spectrum);
}
/* assigns an index to the vector with respect to indexed hypercubes in the
* R^dimension space */
static inline int f2symbol (
const double x,
double min,
double max,
int divide_num)
{
int symbol;
double mesh_size, split;
mesh_size = (max - min)/divide_num;
symbol = 0;
split = min;
for (int i = 0; i < divide_num; i++) {
split += mesh_size;
if (x <= split || i == divide_num-1) {
symbol = i;
break;
}
}
return symbol;
}
static void compute_kl_divergence_of_rnn_state (
const struct rnn_state *rnn_s,
int truncate_length,
int block_length,
int divide_num,
double *kl_div,
double *entropy_t,
double *entropy_o,
double *gen_rate)
{
if (rnn_s->length > truncate_length) {
double min, max;
int **sequence_t, **sequence_o;
struct block_frequency bf_t, bf_o;
const int length = rnn_s->length - truncate_length;
if (rnn_s->rnn_p->output_type == STANDARD_TYPE) {
min = -1.0; max = 1.0;
} else {
min = 0.0; max = 1.0;
}
MALLOC2(sequence_t, length, rnn_s->rnn_p->out_state_size);
MALLOC2(sequence_o, length, rnn_s->rnn_p->out_state_size);
for (int n = 0; n < length; n++) {
int N = n + truncate_length;
for (int i = 0; i < rnn_s->rnn_p->out_state_size; i++) {
sequence_t[n][i] = f2symbol(rnn_s->teach_state[N][i],
min, max, divide_num);
sequence_o[n][i] = f2symbol(rnn_s->out_state[N][i], min,
max, divide_num);
}
}
init_block_frequency(&bf_t, (const int* const*)sequence_t,
rnn_s->rnn_p->out_state_size, length, block_length);
init_block_frequency(&bf_o, (const int* const*)sequence_o,
rnn_s->rnn_p->out_state_size, length, block_length);
*kl_div = kullback_leibler_divergence(&bf_t, &bf_o);
*entropy_t = block_entropy(&bf_t) / block_length;
*entropy_o = block_entropy(&bf_o) / block_length;
*gen_rate = generation_rate(&bf_t, &bf_o);
free_block_frequency(&bf_t);
free_block_frequency(&bf_o);
FREE2(sequence_t);
FREE2(sequence_o);
} else {
*kl_div = 0;
*entropy_t = 0;
*entropy_o = 0;
*gen_rate = 0;
}
}
static void print_kl_divergence_of_rnn (
FILE *fp,
long epoch,
const struct recurrent_neural_network *rnn,
int truncate_length,
int block_length,
int divide_num)
{
double kl_div[rnn->series_num];
double entropy_t[rnn->series_num];
double entropy_o[rnn->series_num];
double gen_rate[rnn->series_num];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < rnn->series_num; i++) {
compute_kl_divergence_of_rnn_state(rnn->rnn_s + i,
truncate_length, block_length, divide_num, kl_div + i,
entropy_t + i, entropy_o + i, gen_rate + i);
}
fprintf(fp, "%ld", epoch);
for (int i = 0; i < rnn->series_num; i++) {
fprintf(fp, "\t%g\t%g\t%g\t%g", kl_div[i], gen_rate[i], entropy_t[i],
entropy_o[i]);
}
fprintf(fp, "\n");
}
static int enable_print (
long epoch,
const struct print_interval *pi)
{
long interval;
if (pi->use_logscale_interval) {
interval = 1;
while (epoch >= 10 * interval) {
interval *= 10;
}
if (interval > pi->interval) {
interval = pi->interval;
}
} else {
interval = pi->interval;
}
return ((epoch % interval) == 0 && epoch >= pi->init && epoch <= pi->end);
}
static void print_parameters_with_epoch (
long epoch,
const struct general_parameters *gp,
const struct recurrent_neural_network *rnn,
struct output_files *fp_list)
{
if (fp_list->fp_wweight &&
enable_print(epoch, &gp->iop.interval_for_weight_file)) {
print_rnn_weight(fp_list->fp_wweight, epoch, &rnn->rnn_p);
}
if (fp_list->fp_wthreshold &&
enable_print(epoch, &gp->iop.interval_for_threshold_file)) {
print_rnn_threshold(fp_list->fp_wthreshold, epoch, &rnn->rnn_p);
}
if (fp_list->fp_wtau &&
enable_print(epoch, &gp->iop.interval_for_tau_file)) {
print_rnn_tau(fp_list->fp_wtau, epoch, &rnn->rnn_p);
}
if (fp_list->fp_winit &&
enable_print(epoch, &gp->iop.interval_for_init_file)) {
print_rnn_init(fp_list->fp_winit, epoch, rnn);
}
if (fp_list->fp_wadapt_lr &&
enable_print(epoch, &gp->iop.interval_for_adapt_lr_file)) {
print_adapt_lr(fp_list->fp_wadapt_lr, epoch, gp->inp.adapt_lr);
fflush(fp_list->fp_wadapt_lr);
}
}
static void print_open_loop_data_with_epoch (
long epoch,
const struct general_parameters *gp,
struct mixture_of_rnn_experts *mre,
struct recurrent_neural_network *rnn,
struct output_files *fp_list)
{
int compute_forward_dynamics = 0;
if (fp_list->fp_werror &&
enable_print(epoch, &gp->iop.interval_for_error_file)) {
if (!compute_forward_dynamics) {
mregn_forward_dynamics_forall(mre, rnn, gp->mp.gn_delay_length);
compute_forward_dynamics = 1;
}
print_mregn_error(fp_list->fp_werror, epoch, mre, rnn);
fflush(fp_list->fp_werror);
}
if (fp_list->fp_wstate_array &&
enable_print(epoch, &gp->iop.interval_for_state_file)) {
if (!compute_forward_dynamics) {
mregn_forward_dynamics_forall(mre, rnn, gp->mp.gn_delay_length);
compute_forward_dynamics = 1;
}
print_rnn_state_forall(fp_list->fp_wstate_array, epoch, rnn);
for (int i = 0; i < fp_list->array_size; i++) {
fprintf(fp_list->fp_wstate_array[i], "\n");
}
}
if (fp_list->fp_wmre_state_array &&
enable_print(epoch, &gp->iop.interval_for_mre_state_file)) {
if (!compute_forward_dynamics) {
mregn_forward_dynamics_forall(mre, rnn, gp->mp.gn_delay_length);
compute_forward_dynamics = 1;
}
print_mre_state_forall(fp_list->fp_wmre_state_array, epoch, mre);
for (int i = 0; i < fp_list->array_size; i++) {
fprintf(fp_list->fp_wmre_state_array[i], "\n");
}
}
}
static void print_closed_loop_data_with_epoch (
long epoch,
const struct general_parameters *gp,
struct mixture_of_rnn_experts *mre,
struct recurrent_neural_network *rnn,
struct output_files *fp_list)
{
int compute_forward_dynamics = 0;
if (fp_list->fp_wclosed_error &&
enable_print(epoch, &gp->iop.interval_for_closed_error_file)) {
if (!compute_forward_dynamics) {
mregn_forward_dynamics_in_closed_loop_forall(mre, rnn,
gp->mp.mre_delay_length, gp->mp.gn_delay_length);
compute_forward_dynamics = 1;
}
print_mregn_error(fp_list->fp_wclosed_error, epoch, mre, rnn);
fflush(fp_list->fp_wclosed_error);
}
if (fp_list->fp_wclosed_state_array &&
enable_print(epoch, &gp->iop.interval_for_closed_state_file)) {
if (!compute_forward_dynamics) {
mregn_forward_dynamics_in_closed_loop_forall(mre, rnn,
gp->mp.mre_delay_length, gp->mp.gn_delay_length);
compute_forward_dynamics = 1;
}
print_rnn_state_forall(fp_list->fp_wclosed_state_array, epoch, rnn);
for (int i = 0; i < fp_list->array_size; i++) {
fprintf(fp_list->fp_wclosed_state_array[i], "\n");
}
}
if (fp_list->fp_wclosed_mre_state_array &&
enable_print(epoch, &gp->iop.interval_for_closed_mre_state_file)) {
if (!compute_forward_dynamics) {
mregn_forward_dynamics_in_closed_loop_forall(mre, rnn,
gp->mp.mre_delay_length, gp->mp.gn_delay_length);
compute_forward_dynamics = 1;
}
print_mre_state_forall(fp_list->fp_wclosed_mre_state_array, epoch, mre);
for (int i = 0; i < fp_list->array_size; i++) {
fprintf(fp_list->fp_wclosed_mre_state_array[i], "\n");
}
}
if (fp_list->fp_wlyapunov &&
enable_print(epoch, &gp->iop.interval_for_lyapunov_file)) {
if (!compute_forward_dynamics) {
mregn_forward_dynamics_in_closed_loop_forall(mre, rnn,
gp->mp.mre_delay_length, gp->mp.gn_delay_length);
compute_forward_dynamics = 1;
}
print_lyapunov_spectrum_of_mregn(fp_list->fp_wlyapunov, epoch, mre, rnn,
gp->ap.lyapunov_spectrum_size, gp->mp.mre_delay_length,
gp->mp.gn_delay_length, gp->ap.truncate_length);
fflush(fp_list->fp_wlyapunov);
}
if (fp_list->fp_wentropy &&
enable_print(epoch, &gp->iop.interval_for_entropy_file)) {
if (!compute_forward_dynamics) {
mregn_forward_dynamics_in_closed_loop_forall(mre, rnn,
gp->mp.mre_delay_length, gp->mp.gn_delay_length);
compute_forward_dynamics = 1;
}
print_kl_divergence_of_rnn(fp_list->fp_wentropy, epoch, rnn,
gp->ap.truncate_length, gp->ap.block_length,
gp->ap.divide_num);
fflush(fp_list->fp_wentropy);
}
}
void print_training_main_begin (
const struct general_parameters *gp,
const struct recurrent_neural_network *rnn,
struct output_files *fp_list)
{
if (fp_list->fp_wstate_array) {
for (int i = 0; i < fp_list->array_size; i++) {
fprintf(fp_list->fp_wstate_array[i], "# STATE FILE\n");
print_general_parameters(fp_list->fp_wstate_array[i], gp);
print_rnn_parameters(fp_list->fp_wstate_array[i], rnn);
}
}
if (fp_list->fp_wmre_state_array) {
for (int i = 0; i < fp_list->array_size; i++) {
fprintf(fp_list->fp_wmre_state_array[i], "# MRE STATE FILE\n");
print_general_parameters(fp_list->fp_wmre_state_array[i], gp);
print_rnn_parameters(fp_list->fp_wmre_state_array[i], rnn);
}
}
if (fp_list->fp_wclosed_state_array) {
for (int i = 0; i < fp_list->array_size; i++) {
fprintf(fp_list->fp_wclosed_state_array[i], "# STATE FILE\n");
print_general_parameters(fp_list->fp_wclosed_state_array[i], gp);
print_rnn_parameters(fp_list->fp_wclosed_state_array[i], rnn);
}
}
if (fp_list->fp_wclosed_mre_state_array) {
for (int i = 0; i < fp_list->array_size; i++) {
fprintf(fp_list->fp_wclosed_mre_state_array[i],
"# MRE STATE FILE\n");
print_general_parameters(fp_list->fp_wclosed_mre_state_array[i],
gp);
print_rnn_parameters(fp_list->fp_wclosed_mre_state_array[i], rnn);
}
}
if (fp_list->fp_wweight) {
fprintf(fp_list->fp_wweight, "# WEIGHT FILE\n");
print_general_parameters(fp_list->fp_wweight, gp);
print_rnn_parameters(fp_list->fp_wweight, rnn);
}
if (fp_list->fp_wthreshold) {
fprintf(fp_list->fp_wthreshold, "# THRESHOLD FILE\n");
print_general_parameters(fp_list->fp_wthreshold, gp);
print_rnn_parameters(fp_list->fp_wthreshold, rnn);
}
if (fp_list->fp_wtau) {
fprintf(fp_list->fp_wtau, "# TAU FILE\n");
print_general_parameters(fp_list->fp_wtau, gp);
print_rnn_parameters(fp_list->fp_wtau, rnn);
}
if (fp_list->fp_winit) {
fprintf(fp_list->fp_winit, "# INIT FILE\n");
print_general_parameters(fp_list->fp_winit, gp);
print_rnn_parameters(fp_list->fp_winit, rnn);
}
if (fp_list->fp_wadapt_lr) {
fprintf(fp_list->fp_wadapt_lr, "# ADAPT_LR FILE\n");
print_general_parameters(fp_list->fp_wadapt_lr, gp);
print_rnn_parameters(fp_list->fp_wadapt_lr, rnn);
}
if (fp_list->fp_werror) {
fprintf(fp_list->fp_werror, "# ERROR FILE\n");
print_general_parameters(fp_list->fp_werror, gp);
print_rnn_parameters(fp_list->fp_werror, rnn);
}
if (fp_list->fp_wclosed_error) {
fprintf(fp_list->fp_wclosed_error, "# ERROR FILE\n");
print_general_parameters(fp_list->fp_wclosed_error, gp);
print_rnn_parameters(fp_list->fp_wclosed_error, rnn);
}
if (fp_list->fp_wlyapunov) {
fprintf(fp_list->fp_wlyapunov, "# LYAPUNOV FILE\n");
print_general_parameters(fp_list->fp_wlyapunov, gp);
print_rnn_parameters(fp_list->fp_wlyapunov, rnn);
}
if (fp_list->fp_wentropy) {
fprintf(fp_list->fp_wentropy, "# ENTROPY FILE\n");
print_general_parameters(fp_list->fp_wentropy, gp);
print_rnn_parameters(fp_list->fp_wentropy, rnn);
}
}
void print_training_main_loop (
long epoch,
const struct general_parameters *gp,
struct mixture_of_rnn_experts *mre,
struct recurrent_neural_network *rnn,
struct output_files *fp_list)
{
print_parameters_with_epoch(epoch, gp, rnn, fp_list);
print_open_loop_data_with_epoch(epoch, gp, mre, rnn, fp_list);
print_closed_loop_data_with_epoch(epoch, gp, mre, rnn, fp_list);
}
|
opi.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
int main(int argc, char **argv) {
//seed random number generator
// Q2b: get the number of threads to run with from agrv and
// add OpenMP API code to set number of threads here
int Nthreads = atoi(argv[1]);
//tell OpenMP to use NumThreads threads
omp_set_num_threads(Nthreads);
struct drand48_data *drandData;
drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data));
int rank = omp_get_thread_num();
// Q2c: add an OpenMP parallel region here, wherein each thread initializes
// one entry in drandData using srand48_r and seed based on thread number
long int seed = rank; //sets the seed based on the thread number
srand48_r(seed, drandData+rank);
long long int Ntrials = 10000000;
//need running tallies
long long int Ntotal=0;
long long int Ncircle=0;
double startTime = omp_get_wtime();
#pragma omp parallel for reduction(+:Ncircle)
for (long long int n=0; n<Ntrials; n++) {
double rand1;
double rand2;
int rank = omp_get_thread_num();
//gererate two random numbers (use the thread id to offset drandData)
drand48_r(drandData+rank, &rand1);
drand48_r(drandData+rank, &rand2);
double x = -1 + 2*rand1; //shift to [-1,1]
double y = -1 + 2*rand2;
//check if its in the circle
if (sqrt(x*x+y*y)<=1) Ncircle++;
Ntotal++;
if (n%100 ==0) {
double pi = 4.0*Ncircle/ (double) (n);
printf("Our estimate of pi is %g \n", pi);
}
}
double pi = 4.0*Ncircle/ (double) (Ntotal);
printf("Our final estimate of pi is %g \n", pi);
double endTime = omp_get_wtime();
free(drandData);
printf("Total time is %g \n", endTime - startTime);
return 0;
}
|
main.c |
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include "omp.h"
void *print_message_function(void *ptr)
{
char *message;
message = (char *)ptr;
printf("%s \n", message);
return (void *)0;
}
void loop()
{
int array[] = { 1,2, 3, 4 };
volatile int sum = 0;
#pragma omp parallel for
for (unsigned int i = 0; i < 4; ++i)
{
sum += array[i];
}
}
int main()
{
pthread_t thread1, thread2;
char *message1 = "Thread 1";
char *message2 = "Thread 2";
int iret1, iret2;
iret1 = pthread_create(&thread1, NULL, print_message_function, (void *)message1);
iret2 = pthread_create(&thread2, NULL, print_message_function, (void *)message2);
pthread_join(thread1, NULL);
pthread_join(thread2, NULL);
printf("Thread 1 returns: %d\n", iret1);
printf("Thread 2 returns: %d\n", iret2);
return 0;
}
|
jacobi.c | //
// Implementation of the iterative Jacobi method.
//
// Given a known, diagonally dominant matrix A and a known vector b, we aim to
// to find the vector x that satisfies the following equation:
//
// Ax = b
//
// We first split the matrix A into the diagonal D and the remainder R:
//
// (D + R)x = b
//
// We then rearrange to form an iterative solution:
//
// x' = (b - Rx) / D
//
// More information:
// -> https://en.wikipedia.org/wiki/Jacobi_method
//
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <x86intrin.h>
#include <immintrin.h>
#include <omp.h>
static int N;
static int MAX_ITERATIONS;
static int SEED;
static double CONVERGENCE_THRESHOLD;
const int NUM_THREADS = 16;
#define SEPARATOR "------------------------------------\n"
// Return the current time in seconds since the Epoch
double get_timestamp();
// Parse command line arguments to set solver parameters
void parse_arguments(int argc, char *argv[]);
// Run the Jacobi solver
// Returns the number of iterations performed
int run(float* A, float* b, float* x, float* xtmp) {
int itr;
float diff;
float sqdiff;
float *ptrtmp;
float convergence_threshold_square = CONVERGENCE_THRESHOLD * CONVERGENCE_THRESHOLD;
float* d = malloc(N*sizeof(float));
for (int row = 0; row < N; row++) d[row] = 1 / A[row * N + row];
int row, ridx, col;
float dot = 0.0;
// Loop until converged or maximum iterations reached
itr = 0;
do {
// Perfom Jacobi iteration
#pragma omp parallel for private(dot, ridx, col)
for (row = 0; row < N; row++) {
ridx = row * N;
dot = 0.0;
for (col = 0; col < row; col++) dot += A[ridx + col] * x[col];
for (col = row + 1; col < N; col++) dot += A[ridx + col] * x[col];
xtmp[row] = (b[row] - dot) * d[row];
}
// Swap pointers
ptrtmp = x;
x = xtmp;
xtmp = ptrtmp;
// Check for convergence
sqdiff = 0.0;
for (row = 0; row < N; row++) {
diff = xtmp[row] - x[row];
sqdiff += diff * diff;
}
itr++;
} while ((itr < MAX_ITERATIONS) && (sqdiff > convergence_threshold_square));
return itr;
}
int main(int argc, char *argv[]) {
parse_arguments(argc, argv);
float *A = malloc(N*N*sizeof(float));
float *ta = malloc(N*N*sizeof(float));
float *b = malloc(N*sizeof(float));
float *x = malloc(N*sizeof(float));
float *xtmp = malloc(N*sizeof(float));
printf(SEPARATOR);
printf("Matrix size: %dx%d\n", N, N);
printf("Maximum iterations: %d\n", MAX_ITERATIONS);
printf("Convergence threshold: %lf\n", CONVERGENCE_THRESHOLD);
printf(SEPARATOR);
double total_start = get_timestamp();
// Initialize data
srand(SEED);
int row, col;
for (row = 0; row < N; row++) {
int ridx = row * N;
float rowsum = 0.0;
for (col = 0; col < N; col++) {
float value = rand()/(float)RAND_MAX;
ta[ridx + col] = value;
rowsum += value;
}
ta[ridx + row] += rowsum;
b[row] = rand()/(float)RAND_MAX;
x[row] = 0.0;
}
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel for private(col)
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++)
A[row * N + col] = ta[row * N + col];
}
// Run Jacobi solver
double solve_start = get_timestamp();
int itr = run(A, b, x, xtmp);
double solve_end = get_timestamp();
// Check error of final solution
double err = 0.0;
for (int row = 0, ridx = 0; row < N; row++, ridx += N) {
double tmp = 0.0;
for (int col = 0; col < N; col++) {
tmp += A[ridx + col] * x[col];
}
tmp = b[row] - tmp;
err += tmp*tmp;
}
err = sqrt(err);
double total_end = get_timestamp();
printf("Solution error = %lf\n", err);
printf("Iterations = %d\n", itr);
printf("Total runtime = %lf seconds\n", (total_end-total_start));
printf("Solver runtime = %lf seconds\n", (solve_end-solve_start));
if (itr == MAX_ITERATIONS)
printf("WARNING: solution did not converge\n");
printf(SEPARATOR);
free(A);
free(b);
free(x);
free(xtmp);
return 0;
}
double get_timestamp() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec*1e-6;
}
int parse_int(const char *str) {
char *next;
int value = strtoul(str, &next, 10);
return strlen(next) ? -1 : value;
}
double parse_double(const char *str) {
char *next;
double value = strtod(str, &next);
return strlen(next) ? -1 : value;
}
void parse_arguments(int argc, char *argv[]) {
// Set default values
N = 1000;
MAX_ITERATIONS = 20000;
CONVERGENCE_THRESHOLD = 0.0001;
SEED = 0;
for (int i = 1; i < argc; i++) {
if (!strcmp(argv[i], "--convergence") || !strcmp(argv[i], "-c")) {
if (++i >= argc || (CONVERGENCE_THRESHOLD = parse_double(argv[i])) < 0) {
printf("Invalid convergence threshold\n");
exit(1);
}
} else if (!strcmp(argv[i], "--iterations") || !strcmp(argv[i], "-i")) {
if (++i >= argc || (MAX_ITERATIONS = parse_int(argv[i])) < 0) {
printf("Invalid number of iterations\n");
exit(1);
}
} else if (!strcmp(argv[i], "--norder") || !strcmp(argv[i], "-n")) {
if (++i >= argc || (N = parse_int(argv[i])) < 0) {
printf("Invalid matrix order\n");
exit(1);
}
} else if (!strcmp(argv[i], "--seed") || !strcmp(argv[i], "-s")) {
if (++i >= argc || (SEED = parse_int(argv[i])) < 0) {
printf("Invalid seed\n");
exit(1);
}
} else if (!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h")) {
printf("\n");
printf("Usage: ./jacobi [OPTIONS]\n\n");
printf("Options:\n");
printf(" -h --help Print this message\n");
printf(" -c --convergence C Set convergence threshold\n");
printf(" -i --iterations I Set maximum number of iterations\n");
printf(" -n --norder N Set maxtrix order\n");
printf(" -s --seed S Set random number seed\n");
printf("\n");
exit(0);
} else {
printf("Unrecognized argument '%s' (try '--help')\n", argv[i]);
exit(1);
}
}
}
|
ocp_nlp_sqp.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "acados/ocp_nlp/ocp_nlp_sqp.h"
// external
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#if defined(ACADOS_WITH_OPENMP)
#include <omp.h>
#endif
// blasfeo
#include "blasfeo/include/blasfeo_d_aux.h"
#include "blasfeo/include/blasfeo_d_aux_ext_dep.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// acados
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h"
#include "acados/ocp_nlp/ocp_nlp_reg_common.h"
#include "acados/ocp_qp/ocp_qp_common.h"
#include "acados/sim/sim_common.h"
#include "acados/utils/math.h"
#include "acados/utils/mem.h"
#include "acados/utils/print.h"
#include "acados/utils/timing.h"
#include "acados/utils/types.h"
/************************************************
* options
************************************************/
int ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
int size = 0;
size += sizeof(ocp_nlp_sqp_opts);
size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
size += config->regularize->opts_calculate_size();
// dynamics
size += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
size += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]);
}
return size;
}
void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
char *c_ptr = (char *) raw_memory;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_opts);
opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr);
c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
opts->regularize = config->regularize->opts_assign(c_ptr);
c_ptr += config->regularize->opts_calculate_size();
// dynamics
opts->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
opts->dynamics[ii] = dynamics[ii]->opts_assign(dynamics[ii], dims->dynamics[ii], c_ptr);
c_ptr += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]);
}
// cost
opts->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
opts->cost[ii] = cost[ii]->opts_assign(cost[ii], dims->cost[ii], c_ptr);
c_ptr += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]);
}
// constraints
opts->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
opts->constraints[ii] =
constraints[ii]->opts_assign(constraints[ii], dims->constraints[ii], c_ptr);
c_ptr += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]);
}
assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr);
return opts;
}
void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
ocp_nlp_reg_config *regularize = config->regularize;
int ii;
int N = dims->N;
// SQP opts
opts->max_iter = 20;
opts->tol_stat = 1e-8;
opts->tol_eq = 1e-8;
opts->tol_ineq = 1e-8;
opts->tol_comp = 1e-8;
opts->reuse_workspace = 1;
#if defined(ACADOS_WITH_OPENMP)
opts->num_threads = ACADOS_NUM_THREADS;
#endif
opts->ext_qp_res = 0;
opts->qp_warm_start = 0;
// submodules opts
// qp solver
qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// overwrite default
qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_stat", &opts->tol_stat);
qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_eq", &opts->tol_eq);
qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq);
qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_comp", &opts->tol_comp);
// regularization
regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize);
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_initialize_default(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
cost[ii]->opts_initialize_default(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_initialize_default(constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
}
return;
}
void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_update(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
cost[ii]->opts_update(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_update(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
return;
}
void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value)
{
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_;
ocp_nlp_config *config = config_;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if(char_!=NULL)
{
module_length = char_-field;
for(ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if(!strcmp(ptr_module, "qp"))
{
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value);
if(!strcmp(field, "qp_warm_start"))
{
int* i_ptr = (int *) value;
opts->qp_warm_start = *i_ptr;
}
}
else // nlp opts
{
if (!strcmp(field, "max_iter"))
{
int* max_iter = (int *) value;
opts->max_iter = *max_iter;
}
else if (!strcmp(field, "reuse_workspace"))
{
int* reuse_workspace = (int *) value;
opts->reuse_workspace = *reuse_workspace;
}
else if (!strcmp(field, "num_threads"))
{
int* num_threads = (int *) value;
opts->num_threads = *num_threads;
}
else if (!strcmp(field, "tol_stat")) // TODO rename !!!
{
double* tol_stat = (double *) value;
opts->tol_stat = *tol_stat;
// pass to QP too
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_stat", value);
}
else if (!strcmp(field, "tol_eq")) // TODO rename !!!
{
double* tol_eq = (double *) value;
opts->tol_eq = *tol_eq;
// pass to QP too
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_eq", value);
}
else if (!strcmp(field, "tol_ineq")) // TODO rename !!!
{
double* tol_ineq = (double *) value;
opts->tol_ineq = *tol_ineq;
// pass to QP too
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_ineq", value);
}
else if (!strcmp(field, "tol_comp")) // TODO rename !!!
{
double* tol_comp = (double *) value;
opts->tol_comp = *tol_comp;
// pass to QP too
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_comp", value);
}
else if (!strcmp(field, "exact_hess"))
{
int N = config->N;
// cost
for (ii=0; ii<=N; ii++)
config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value);
// dynamics
for (ii=0; ii<N; ii++)
config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value);
// constraints TODO disabled for now as prevents convergence !!!
// for (ii=0; ii<=N; ii++)
// config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value);
}
else if (!strcmp(field, "ext_qp_res"))
{
int* ext_qp_res = (int *) value;
opts->ext_qp_res = *ext_qp_res;
}
else
{
printf("\nerror: ocp_nlp_sqp_opts_set: wrong field: %s\n", field);
exit(1);
}
}
return;
}
void ocp_nlp_sqp_dynamics_opts_set(void *config_, void *opts_, int stage,
const char *field, void *value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_dynamics_config *dyn_config = config->dynamics[stage];
dyn_config->opts_set(dyn_config, opts->dynamics[stage], field, value);
return;
}
void ocp_nlp_sqp_cost_opts_set(void *config_, void *opts_, int stage,
const char *field, void *value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_cost_config *cost_config = config->cost[stage];
cost_config->opts_set(cost_config, opts->cost[stage], field, value);
return;
}
void ocp_nlp_sqp_constraints_opts_set(void *config_, void *opts_, int stage,
const char *field, void *value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_constraints_config *constraints_config = config->constraints[stage];
constraints_config->opts_set(constraints_config, opts->constraints[stage], (char *) field, value);
return;
}
/************************************************
* memory
************************************************/
int ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
// extract dims
int N = dims->N;
// ocp_nlp_cost_dims **cost_dims = dims->cost;
// int ny;
int size = 0;
size += sizeof(ocp_nlp_sqp_memory);
// qp solver
size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize);
// dynamics
size += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
size += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
}
// nlp res
size += ocp_nlp_res_calculate_size(dims);
// nlp mem
size += ocp_nlp_memory_calculate_size(config, dims);
// stat
int stat_m = opts->max_iter+1;
int stat_n = 6;
if(opts->ext_qp_res)
stat_n += 4;
size += stat_n*stat_m*sizeof(double);
size += 8; // initial align
// make_int_multiple_of(64, &size);
return size;
}
void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
char *c_ptr = (char *) raw_memory;
// extract dims
int N = dims->N;
// ocp_nlp_cost_dims **cost_dims = dims->cost;
// int ny;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_memory);
// QP solver
mem->qp_solver_mem =
qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr);
c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize, opts->regularize, c_ptr);
c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize);
// nlp res
mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr);
c_ptr += mem->nlp_res->memsize;
// nlp mem
mem->nlp_mem = ocp_nlp_memory_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_memory_calculate_size(config, dims);
// dynamics
mem->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
mem->dynamics[ii] = dynamics[ii]->memory_assign(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii], c_ptr);
c_ptr += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
mem->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
mem->cost[ii] = cost[ii]->memory_assign(cost[ii], dims->cost[ii], opts->cost[ii], c_ptr);
c_ptr += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
mem->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
mem->constraints[ii] = constraints[ii]->memory_assign(
constraints[ii], dims->constraints[ii], opts->constraints[ii], c_ptr);
c_ptr += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
}
// stat
mem->stat = (double *) c_ptr;
mem->stat_m = opts->max_iter+1;
mem->stat_n = 6;
if(opts->ext_qp_res)
mem->stat_n += 4;
c_ptr += mem->stat_m*mem->stat_n*sizeof(double);
mem->status = ACADOS_READY;
assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr);
return mem;
}
/************************************************
* workspace
************************************************/
int ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
// loop index
int ii;
// extract dims
int N = dims->N;
int *nx = dims->nx;
int *nu = dims->nu;
int *nz = dims->nz;
int size = 0;
int size_tmp = 0;
int tmp;
// sqp
size += sizeof(ocp_nlp_sqp_work);
// array of pointers
// cost
size += (N + 1) * sizeof(void *);
// dynamics
size += N * sizeof(void *);
// constraints
size += (N + 1) * sizeof(void *);
// qp in
size += ocp_qp_in_calculate_size(qp_solver, dims->qp_solver);
// qp out
size += ocp_qp_out_calculate_size(qp_solver, dims->qp_solver);
if(opts->ext_qp_res)
{
// qp res
size += ocp_qp_res_calculate_size(dims->qp_solver);
// qp res ws
size += ocp_qp_res_workspace_calculate_size(dims->qp_solver);
}
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
#else
// qp solver
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (ii = 0; ii < N; ii++)
{
tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (ii = 0; ii <= N; ii++)
{
tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (ii = 0; ii <= N; ii++)
{
tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
size += size_tmp;
#endif
}
else
{
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
}
// dzduxt
size += (N+1)*sizeof(struct blasfeo_dmat);
for(ii=0; ii<=N; ii++)
size += blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]);
// z_alg
size += (N+1)*sizeof(struct blasfeo_dvec);
for(ii=0; ii<=N; ii++)
size += blasfeo_memsize_dvec(nz[ii]);
size += 1*8; // blasfeo_str align
size += 1*64; // blasfeo_mem align
return size;
}
// TODO(all): introduce member "memsize" in all structures to make on-line cast cheaper (i.e. avoid
// to calculate size on-line)
static void ocp_nlp_sqp_cast_workspace(void *config_, ocp_nlp_dims *dims, ocp_nlp_sqp_work *work,
ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_opts *opts)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
// loop index
int ii;
// extract dims
int N = dims->N;
int *nx = dims->nx;
int *nu = dims->nu;
int *nz = dims->nz;
// sqp
char *c_ptr = (char *) work;
c_ptr += sizeof(ocp_nlp_sqp_work);
// array of pointers
//
work->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
//
work->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
//
work->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
// qp in
work->qp_in = ocp_qp_in_assign(qp_solver, dims->qp_solver, c_ptr);
c_ptr += ocp_qp_in_calculate_size(qp_solver, dims->qp_solver);
// qp out
work->qp_out = ocp_qp_out_assign(qp_solver, dims->qp_solver, c_ptr);
c_ptr += ocp_qp_out_calculate_size(qp_solver, dims->qp_solver);
if(opts->ext_qp_res)
{
// qp res
work->qp_res = ocp_qp_res_assign(dims->qp_solver, c_ptr);
c_ptr += ocp_qp_res_calculate_size(dims->qp_solver);
// qp res ws
work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver, c_ptr);
c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver);
}
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
#else
int size_tmp = 0;
int tmp;
// qp solver
work->qp_work = (void *) c_ptr;
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
c_ptr += size_tmp;
#endif
}
else
{
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
}
// blasfeo_str align
align_char_to(8, &c_ptr);
// dzduxt
work->dzduxt = (struct blasfeo_dmat *) c_ptr;
c_ptr += (N+1)*sizeof(struct blasfeo_dmat);
// z_alg
work->z_alg = (struct blasfeo_dvec *) c_ptr;
c_ptr += (N+1)*sizeof(struct blasfeo_dvec);
// blasfeo_mem align
align_char_to(64, &c_ptr);
// dzduxt
for(ii=0; ii<=N; ii++)
{
blasfeo_create_dmat(nu[ii]+nx[ii], nz[ii], work->dzduxt+ii, c_ptr);
c_ptr += blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]);
}
// z_alg
for(ii=0; ii<=N; ii++)
{
blasfeo_create_dvec(nz[ii], work->z_alg+ii, c_ptr);
c_ptr += blasfeo_memsize_dvec(nz[ii]);
}
// assert & return
assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr);
return;
}
/************************************************
* functions
************************************************/
static void initialize_qp(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in,
ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem,
ocp_nlp_sqp_work *work)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
// loop index
int ii;
// extract dims
int N = dims->N;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (ii = 0; ii <= N; ii++)
{
// cost
config->cost[ii]->initialize(config->cost[ii], dims->cost[ii], nlp_in->cost[ii],
opts->cost[ii], mem->cost[ii], work->cost[ii]);
// dynamics
if (ii < N)
config->dynamics[ii]->initialize(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], opts->dynamics[ii],
mem->dynamics[ii], work->dynamics[ii]);
// constraints
config->constraints[ii]->initialize(config->constraints[ii], dims->constraints[ii],
nlp_in->constraints[ii], opts->constraints[ii],
mem->constraints[ii], work->constraints[ii]);
}
return;
}
static void linearize_update_qp_matrices(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in,
ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts,
ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_work *work)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
// loop index
int i;
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
/* stage-wise multiple shooting lagrangian evaluation */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// init Hessian to 0
blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, work->qp_in->RSQrq+i, 0, 0);
// dynamics
if (i < N)
config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i],
nlp_in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]);
// cost
config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], nlp_in->cost[i],
opts->cost[i], mem->cost[i], work->cost[i]);
// constraints
config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i],
nlp_in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]);
}
/* collect stage-wise evaluations */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i <= N; i++)
{
// nlp mem: cost_grad
struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]);
blasfeo_dveccp(nv[i], cost_grad, 0, nlp_mem->cost_grad + i, 0);
// nlp mem: dyn_fun
if (i < N)
{
struct blasfeo_dvec *dyn_fun
= config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]);
blasfeo_dveccp(nx[i + 1], dyn_fun, 0, nlp_mem->dyn_fun + i, 0);
}
// nlp mem: dyn_adj
if (i < N)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]);
blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, nlp_mem->dyn_adj + i, 0);
}
else
{
blasfeo_dvecse(nu[N] + nx[N], 0.0, nlp_mem->dyn_adj + N, 0);
}
if (i > 0)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]);
blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], nlp_mem->dyn_adj+i, nu[i],
nlp_mem->dyn_adj+i, nu[i]);
}
// nlp mem: ineq_fun
struct blasfeo_dvec *ineq_fun =
config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
blasfeo_dveccp(2 * ni[i], ineq_fun, 0, nlp_mem->ineq_fun + i, 0);
// nlp mem: ineq_adj
struct blasfeo_dvec *ineq_adj =
config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]);
blasfeo_dveccp(nv[i], ineq_adj, 0, nlp_mem->ineq_adj + i, 0);
}
// TODO(all): still to clean !!!!!!!!!!!!!
for (i = 0; i <= N; i++)
{
// TODO(rien) where should the update happen??? move to qp update ???
// TODO(all): fix and move where appropriate
// if(i<N)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme != NULL && opts->scheme->type != exact)
// {
// for (int_t j = 0; j < nx; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j];
// for (int_t j = 0; j < nu; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j];
// }
// }
}
return;
}
// update QP rhs for SQP (step prim var, abs dual var)
// TODO(all): move in dynamics, cost, constraints modules ???
static void sqp_update_qp_vectors(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in,
ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts,
ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_work *work)
{
// loop index
int i;
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// g
blasfeo_dveccp(nv[i], nlp_mem->cost_grad + i, 0, work->qp_in->rqz + i, 0);
// b
if (i < N)
blasfeo_dveccp(nx[i + 1], nlp_mem->dyn_fun + i, 0, work->qp_in->b + i, 0);
// d
blasfeo_dveccp(2 * ni[i], nlp_mem->ineq_fun + i, 0, work->qp_in->d + i, 0);
}
return;
}
static void sqp_update_variables(void *config_, ocp_nlp_dims *dims, ocp_nlp_out *nlp_out,
ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem,
ocp_nlp_sqp_work *work)
{
// loop index
int i;
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
// ocp_nlp_config *config = (ocp_nlp_config *) config_;
// TODO(all): fix and move where appropriate
// for (i = 0; i < N; i++)
// {
// nx1 = dims->constraints[i+1]->nx;
// for (j = 0; j < nx1; j++)
// {
// work->sim_in[i]->S_adj[j] = -BLASFEO_DVECEL(&work->qp_out->pi[i], j);
// }
// }
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// (full) step in primal variables
blasfeo_daxpy(nv[i], 1.0, work->qp_out->ux + i, 0, nlp_out->ux + i, 0, nlp_out->ux + i, 0);
// absolute in dual variables
if (i < N)
blasfeo_dveccp(nx[i + 1], work->qp_out->pi + i, 0, nlp_out->pi + i, 0);
blasfeo_dveccp(2 * ni[i], work->qp_out->lam + i, 0, nlp_out->lam + i, 0);
blasfeo_dveccp(2 * ni[i], work->qp_out->t + i, 0, nlp_out->t + i, 0);
if (i < N)
blasfeo_dveccp(nz[i], work->z_alg+i, 0, nlp_out->z+i, 0);
}
return;
}
// Simple fixed-step Gauss-Newton based SQP routine
int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
// acados timer
acados_timer timer0, timer1;
// start timer
acados_tic(&timer0);
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_work *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, work, mem, opts);
// zero timers
double total_time = 0.0;
mem->time_qp_sol = 0.0;
mem->time_lin = 0.0;
mem->time_reg = 0.0;
mem->time_tot = 0.0;
// extract dims
int N = dims->N;
int ii;
int qp_iter = 0;
int qp_status = 0;
#if defined(ACADOS_WITH_OPENMP)
// backup number of threads
int num_threads_bkp = omp_get_num_threads();
// set number of threads
omp_set_num_threads(opts->num_threads);
#pragma omp parallel
{ // beginning of parallel region
#endif
// alias to dynamics_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_BAbt_ptr(work->qp_in->BAbt+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_RSQrq_ptr(work->qp_in->RSQrq+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_dzduxt_ptr(work->dzduxt+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_z_guess_ptr(nlp_out->z+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_z_alg_ptr(work->z_alg+ii, mem->dynamics[ii]);
}
// alias to cost_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->cost[ii]->memory_set_ux_ptr(nlp_out->ux + ii, mem->cost[ii]);
config->cost[ii]->memory_set_z_alg_ptr(work->z_alg+ii, mem->cost[ii]);
config->cost[ii]->memory_set_dzdux_tran_ptr(work->dzduxt+ii, mem->cost[ii]);
config->cost[ii]->memory_set_RSQrq_ptr(work->qp_in->RSQrq + ii, mem->cost[ii]);
config->cost[ii]->memory_set_Z_ptr(work->qp_in->Z + ii, mem->cost[ii]);
}
// alias to constraints_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_DCt_ptr(work->qp_in->DCt+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_RSQrq_ptr(work->qp_in->RSQrq+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_idxb_ptr(work->qp_in->idxb[ii], mem->constraints[ii]);
config->constraints[ii]->memory_set_idxs_ptr(work->qp_in->idxs[ii], mem->constraints[ii]);
}
// alias to regularize memory
config->regularize->memory_set_RSQrq_ptr(dims->regularize, work->qp_in->RSQrq, mem->regularize_mem);
config->regularize->memory_set_rq_ptr(dims->regularize, work->qp_in->rqz, mem->regularize_mem);
config->regularize->memory_set_BAbt_ptr(dims->regularize, work->qp_in->BAbt, mem->regularize_mem);
config->regularize->memory_set_b_ptr(dims->regularize, work->qp_in->b, mem->regularize_mem);
config->regularize->memory_set_idxb_ptr(dims->regularize, work->qp_in->idxb, mem->regularize_mem);
config->regularize->memory_set_DCt_ptr(dims->regularize, work->qp_in->DCt, mem->regularize_mem);
config->regularize->memory_set_ux_ptr(dims->regularize, work->qp_out->ux, mem->regularize_mem);
config->regularize->memory_set_pi_ptr(dims->regularize, work->qp_out->pi, mem->regularize_mem);
config->regularize->memory_set_lam_ptr(dims->regularize, work->qp_out->lam, mem->regularize_mem);
// copy sampling times into dynamics model
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
}
#if defined(ACADOS_WITH_OPENMP)
} // end of parallel region
#endif
// initialize QP
initialize_qp(config, dims, nlp_in, nlp_out, opts, mem, work);
// main sqp loop
int sqp_iter = 0;
for (; sqp_iter < opts->max_iter; sqp_iter++)
{
// printf("\n------- sqp iter %d (max_iter %d) --------\n", sqp_iter, opts->max_iter);
// if(sqp_iter==2)
// exit(1);
// start timer
acados_tic(&timer1);
// linearizate NLP and update QP matrices
linearize_update_qp_matrices(config, dims, nlp_in, nlp_out, opts, mem, work);
// stop timer
mem->time_lin += acados_toc(&timer1);
// update QP rhs for SQP (step prim var, abs dual var)
sqp_update_qp_vectors(config, dims, nlp_in, nlp_out, opts, mem, work);
// compute nlp residuals
ocp_nlp_res_compute(dims, nlp_in, nlp_out, mem->nlp_res, mem->nlp_mem);
nlp_out->inf_norm_res = mem->nlp_res->inf_norm_res_g;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_b > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_b :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_d > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_d :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_m > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_m :
nlp_out->inf_norm_res;
// save statistics
if (sqp_iter < mem->stat_m)
{
mem->stat[mem->stat_n*sqp_iter+0] = mem->nlp_res->inf_norm_res_g;
mem->stat[mem->stat_n*sqp_iter+1] = mem->nlp_res->inf_norm_res_b;
mem->stat[mem->stat_n*sqp_iter+2] = mem->nlp_res->inf_norm_res_d;
mem->stat[mem->stat_n*sqp_iter+3] = mem->nlp_res->inf_norm_res_m;
mem->stat[mem->stat_n*sqp_iter+4] = qp_status;
mem->stat[mem->stat_n*sqp_iter+5] = qp_iter;
}
// exit conditions on residuals
if ((mem->nlp_res->inf_norm_res_g < opts->tol_stat) &
(mem->nlp_res->inf_norm_res_b < opts->tol_eq) &
(mem->nlp_res->inf_norm_res_d < opts->tol_ineq) &
(mem->nlp_res->inf_norm_res_m < opts->tol_comp))
{
// printf("%d sqp iterations\n", sqp_iter);
// print_ocp_qp_in(work->qp_in);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
nlp_out->total_time = total_time;
mem->time_tot = total_time;
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_SUCCESS;
return mem->status;
}
// start timer
acados_tic(&timer1);
// regularize Hessian
config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->regularize, mem->regularize_mem);
// stop timer
mem->time_reg += acados_toc(&timer1);
// printf("\n------- qp_in (sqp iter %d) --------\n", sqp_iter);
// print_ocp_qp_in(work->qp_in);
// if(sqp_iter==1)
// exit(1);
// no warm start at first iteration
if(sqp_iter==0)
{
int tmp_int = 0;
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "warm_start", &tmp_int);
}
// start timer
acados_tic(&timer1);
// TODO move qp_out in memory !!!!! (it has to be preserved to do warm start)
qp_status = qp_solver->evaluate(qp_solver, work->qp_in, work->qp_out, opts->qp_solver_opts, mem->qp_solver_mem, work->qp_work);
// stop timer
mem->time_qp_sol += acados_toc(&timer1);
// start timer
acados_tic(&timer1);
// compute correct dual solution in case of Hessian regularization
config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->regularize, mem->regularize_mem);
// stop timer
mem->time_reg += acados_toc(&timer1);
// restore default warm start
if(sqp_iter==0)
{
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "warm_start", &opts->qp_warm_start);
}
// TODO move into QP solver memory ???
nlp_out->qp_iter = ((ocp_qp_info *) work->qp_out->misc)->num_iter;
qp_iter = ((ocp_qp_info *) work->qp_out->misc)->num_iter;
// compute external QP residuals (for debugging)
if(opts->ext_qp_res)
{
ocp_qp_res_compute(work->qp_in, work->qp_out, work->qp_res, work->qp_res_ws);
if (sqp_iter+1 < mem->stat_m)
ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+6));
// printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter, inf_norm_qp_res[0], inf_norm_qp_res[1], inf_norm_qp_res[2], inf_norm_qp_res[3]);
}
// printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter);
// print_ocp_qp_out(work->qp_out);
// if(sqp_iter==1)
// exit(1);
if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER))
{
// print_ocp_qp_in(work->qp_in);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
printf("QP solver returned error status %d in iteration %d\n", qp_status, sqp_iter);
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_QP_FAILURE;
return mem->status;
}
sqp_update_variables(config, dims, nlp_out, opts, mem, work);
// ocp_nlp_dims_print(nlp_out->dims);
// ocp_nlp_out_print(nlp_out);
// exit(1);
// ??? @rien
// for (int_t i = 0; i < N; i++)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme == NULL)
// continue;
// opts->sens_adj = (opts->scheme->type != exact);
// if (nlp_in->freezeSens) {
// // freeze inexact sensitivities after first SQP iteration !!
// opts->scheme->freeze = true;
// }
// }
}
// stop timer
total_time += acados_toc(&timer0);
// ocp_nlp_out_print(nlp_out);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
// printf("%d sqp iterations\n", sqp_iter);
// print_ocp_qp_in(work->qp_in);
// maximum number of iterations reached
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_MAXITER;
return mem->status;
}
int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
// ocp_nlp_out *nlp_out = nlp_out_;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_work *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, work, mem, opts);
// extract dims
int N = dims->N;
int status = ACADOS_SUCCESS;
int ii;
// TODO(fuck_lint) checks
// TODO(fuck_lint) flag to enable/disable checks
for (ii = 0; ii <= N; ii++)
{
// TODO(fuck_lint) check that ns in opt_var == ns in constraints
}
// precompute
for (ii = 0; ii < N; ii++)
{
// set T
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
// dynamics precompute
status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], opts->dynamics[ii],
mem->dynamics[ii], work->dynamics[ii]);
if (status != ACADOS_SUCCESS) return status;
}
return status;
}
void ocp_nlp_sqp_get(void *config_, void *mem_, const char *field, void *return_value_)
{
// ocp_nlp_config *config = config_;
ocp_nlp_sqp_memory *mem = mem_;
if (!strcmp("sqp_iter", field))
{
int *value = return_value_;
*value = mem->sqp_iter;
}
else if (!strcmp("status", field))
{
int *value = return_value_;
*value = mem->status;
}
else if (!strcmp("time_tot", field) || !strcmp("tot_time", field))
{
double *value = return_value_;
*value = mem->time_tot;
}
else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field))
{
double *value = return_value_;
*value = mem->time_qp_sol;
}
else if (!strcmp("time_lin", field))
{
double *value = return_value_;
*value = mem->time_lin;
}
else if (!strcmp("time_reg", field))
{
double *value = return_value_;
*value = mem->time_reg;
}
else if (!strcmp("nlp_res", field))
{
ocp_nlp_res **value = return_value_;
*value = mem->nlp_res;
}
else if (!strcmp("stat", field))
{
double **value = return_value_;
*value = mem->stat;
}
else if (!strcmp("stat_m", field))
{
int *value = return_value_;
*value = mem->stat_m;
}
else if (!strcmp("stat_n", field))
{
int *value = return_value_;
*value = mem->stat_n;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_config_initialize_default(void *config_)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size;
config->opts_assign = &ocp_nlp_sqp_opts_assign;
config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default;
config->opts_update = &ocp_nlp_sqp_opts_update;
config->opts_set = &ocp_nlp_sqp_opts_set;
config->dynamics_opts_set = &ocp_nlp_sqp_dynamics_opts_set;
config->cost_opts_set = &ocp_nlp_sqp_cost_opts_set;
config->constraints_opts_set = &ocp_nlp_sqp_constraints_opts_set;
config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size;
config->memory_assign = &ocp_nlp_sqp_memory_assign;
config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size;
config->evaluate = &ocp_nlp_sqp;
config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default;
config->precompute = &ocp_nlp_sqp_precompute;
config->get = &ocp_nlp_sqp_get;
return;
}
|
path.c | #include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
#include "mt19937p.h"
//ldoc on
/**
* # The basic recurrence
*
* At the heart of the method is the following basic recurrence.
* If $l_{ij}^s$ represents the length of the shortest path from
* $i$ to $j$ that can be attained in at most $2^s$ steps, then
* $$
* l_{ij}^{s+1} = \min_k \{ l_{ik}^s + l_{kj}^s \}.
* $$
* That is, the shortest path of at most $2^{s+1}$ hops that connects
* $i$ to $j$ consists of two segments of length at most $2^s$, one
* from $i$ to $k$ and one from $k$ to $j$. Compare this with the
* following formula to compute the entries of the square of a
* matrix $A$:
* $$
* a_{ij}^2 = \sum_k a_{ik} a_{kj}.
* $$
* These two formulas are identical, save for the niggling detail that
* the latter has addition and multiplication where the former has min
* and addition. But the basic pattern is the same, and all the
* tricks we learned when discussing matrix multiplication apply -- or
* at least, they apply in principle. I'm actually going to be lazy
* in the implementation of `square`, which computes one step of
* this basic recurrence. I'm not trying to do any clever blocking.
* You may choose to be more clever in your assignment, but it is not
* required.
*
* The return value for `square` is true if `l` and `lnew` are
* identical, and false otherwise.
*/
int square(int n, // Number of nodes
int* restrict l, // Partial distance at step s
int* restrict lnew) // Partial distance at step s+1
{
int done = 1;
#pragma omp parallel for shared(l, lnew) reduction(&& : done)
for (int j = 0; j < n; ++j) {
for (int i = 0; i < n; ++i) {
int lij = lnew[j*n+i];
for (int k = 0; k < n; ++k) {
int lik = l[k*n+i];
int lkj = l[j*n+k];
if (lik + lkj < lij) {
lij = lik+lkj;
done = 0;
}
}
lnew[j*n+i] = lij;
}
}
return done;
}
/**
*
* The value $l_{ij}^0$ is almost the same as the $(i,j)$ entry of
* the adjacency matrix, except for one thing: by convention, the
* $(i,j)$ entry of the adjacency matrix is zero when there is no
* edge between $i$ and $j$; but in this case, we want $l_{ij}^0$
* to be "infinite". It turns out that it is adequate to make
* $l_{ij}^0$ longer than the longest possible shortest path; if
* edges are unweighted, $n+1$ is a fine proxy for "infinite."
* The functions `infinitize` and `deinfinitize` convert back
* and forth between the zero-for-no-edge and $n+1$-for-no-edge
* conventions.
*/
static inline void infinitize(int n, int* l)
{
for (int i = 0; i < n*n; ++i)
if (l[i] == 0)
l[i] = n+1;
}
static inline void deinfinitize(int n, int* l)
{
for (int i = 0; i < n*n; ++i)
if (l[i] == n+1)
l[i] = 0;
}
/**
*
* Of course, any loop-free path in a graph with $n$ nodes can
* at most pass through every node in the graph. Therefore,
* once $2^s \geq n$, the quantity $l_{ij}^s$ is actually
* the length of the shortest path of any number of hops. This means
* we can compute the shortest path lengths for all pairs of nodes
* in the graph by $\lceil \lg n \rceil$ repeated squaring operations.
*
* The `shortest_path` routine attempts to save a little bit of work
* by only repeatedly squaring until two successive matrices are the
* same (as indicated by the return value of the `square` routine).
*/
void shortest_paths(int n, int* restrict l)
{
// Generate l_{ij}^0 from adjacency matrix representation
infinitize(n, l);
for (int i = 0; i < n*n; i += n+1)
l[i] = 0;
// Repeated squaring until nothing changes
int* restrict lnew = (int*) calloc(n*n, sizeof(int));
memcpy(lnew, l, n*n * sizeof(int));
for (int done = 0; !done; ) {
done = square(n, l, lnew);
memcpy(l, lnew, n*n * sizeof(int));
}
free(lnew);
deinfinitize(n, l);
}
/**
* # The random graph model
*
* Of course, we need to run the shortest path algorithm on something!
* For the sake of keeping things interesting, let's use a simple random graph
* model to generate the input data. The $G(n,p)$ model simply includes each
* possible edge with probability $p$, drops it otherwise -- doesn't get much
* simpler than that. We use a thread-safe version of the Mersenne twister
* random number generator in lieu of coin flips.
*/
int* gen_graph(int n, double p)
{
int* l = calloc(n*n, sizeof(int));
struct mt19937p state;
sgenrand(10302011UL, &state);
for (int j = 0; j < n; ++j) {
for (int i = 0; i < n; ++i)
l[j*n+i] = (genrand(&state) < p);
l[j*n+j] = 0;
}
return l;
}
/**
* # Result checks
*
* Simple tests are always useful when tuning code, so I have included
* two of them. Since this computation doesn't involve floating point
* arithmetic, we should get bitwise identical results from run to
* run, even if we do optimizations that change the associativity of
* our computations. The function `fletcher16` computes a simple
* [simple checksum][wiki-fletcher] over the output of the
* `shortest_paths` routine, which we can then use to quickly tell
* whether something has gone wrong. The `write_matrix` routine
* actually writes out a text representation of the matrix, in case we
* want to load it into MATLAB to compare results.
*
* [wiki-fletcher]: http://en.wikipedia.org/wiki/Fletcher's_checksum
*/
int fletcher16(int* data, int count)
{
int sum1 = 0;
int sum2 = 0;
for(int index = 0; index < count; ++index) {
sum1 = (sum1 + data[index]) % 255;
sum2 = (sum2 + sum1) % 255;
}
return (sum2 << 8) | sum1;
}
void write_matrix(const char* fname, int n, int* a)
{
FILE* fp = fopen(fname, "w+");
if (fp == NULL) {
fprintf(stderr, "Could not open output file: %s\n", fname);
exit(-1);
}
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j)
fprintf(fp, "%d ", a[j*n+i]);
fprintf(fp, "\n");
}
fclose(fp);
}
/**
* # The `main` event
*/
const char* usage =
"path.x -- Parallel all-pairs shortest path on a random graph\n"
"Flags:\n"
" - n -- number of nodes (200)\n"
" - p -- probability of including edges (0.05)\n"
" - i -- file name where adjacency matrix should be stored (none)\n"
" - o -- file name where output matrix should be stored (none)\n";
int main(int argc, char** argv)
{
int n = 200; // Number of nodes
double p = 0.05; // Edge probability
const char* ifname = NULL; // Adjacency matrix file name
const char* ofname = NULL; // Distance matrix file name
// Option processing
extern char* optarg;
const char* optstring = "hn:d:p:o:i:";
int c;
while ((c = getopt(argc, argv, optstring)) != -1) {
switch (c) {
case 'h':
fprintf(stderr, "%s", usage);
return -1;
case 'n': n = atoi(optarg); break;
case 'p': p = atof(optarg); break;
case 'o': ofname = optarg; break;
case 'i': ifname = optarg; break;
}
}
// Graph generation + output
int* l = gen_graph(n, p);
if (ifname)
write_matrix(ifname, n, l);
// Time the shortest paths code
double t0 = omp_get_wtime();
shortest_paths(n, l);
double t1 = omp_get_wtime();
printf("== OpenMP with %d threads\n", atoi(getenv("OMP_NUM_THREADS")));
printf("n: %d\n", n);
printf("p: %g\n", p);
printf("Time: %g\n", t1-t0);
printf("Check: %X\n", fletcher16(l, n*n));
// Generate output file
if (ofname)
write_matrix(ofname, n, l);
// Clean up
free(l);
return 0;
}
|
tools.h | #ifndef YGGTOOLS_H_
#define YGGTOOLS_H_
#ifdef _MSC_VER
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS 1
#endif
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <errno.h>
#include <time.h>
#ifdef USE_OSR_YGG
struct complex_float{
float re;
float im;
};
struct complex_double{
double re;
double im;
};
struct complex_long_double{
long double re;
long double im;
};
typedef struct complex_float complex_float;
typedef struct complex_double complex_double;
typedef struct complex_long_double complex_long_double;
#define creal(x) x.re
#define crealf(x) x.re
#define creall(x) x.re
#define cimag(x) x.im
#define cimagf(x) x.im
#define cimagl(x) x.im
#else /*USE_YGG_OSR*/
#ifdef _MSC_VER
#ifdef __cplusplus
#include <complex>
typedef std::complex<float> complex_float;
typedef std::complex<double> complex_double;
typedef std::complex<long double> complex_long_double;
#ifndef creal
#define creal(x) x.real()
#define crealf(x) x.real()
#define creall(x) x.real()
#define cimag(x) x.imag()
#define cimagf(x) x.imag()
#define cimagl(x) x.imag()
#endif /*creal*/
#else /*__cplusplus*/
#include <complex.h>
typedef _Fcomplex complex_float;
typedef _Dcomplex complex_double;
typedef _Lcomplex complex_long_double;
#define print_complex(x) printf("%lf+%lfj\n", (double)(x._Val[0]), (double)(x._Val[1]))
#endif /*__cplusplus*/
#else // Unix
#ifdef __cplusplus
#include <complex>
typedef std::complex<float> complex_float;
typedef std::complex<double> complex_double;
typedef std::complex<long double> complex_long_double;
#ifndef creal
#define creal(x) x.real()
#define crealf(x) x.real()
#define creall(x) x.real()
#define cimag(x) x.imag()
#define cimagf(x) x.imag()
#define cimagl(x) x.imag()
#endif /*creal*/
#else /*__cplusplus*/
#include <complex.h>
typedef float _Complex complex_float;
typedef double _Complex complex_double;
typedef long double _Complex complex_long_double;
#endif /*__cplusplus*/
#endif /*Unix*/
#endif /*USE_YGG_OSR*/
#ifndef print_complex
#define print_complex(x) printf("%lf+%lfj\n", (double)creal(x), (double)cimag(x))
#endif
#ifdef __cplusplus /* If this is a C++ compiler, use C linkage */
extern "C" {
#endif
#include <math.h> // Required to prevent error when using mingw on windows
#ifdef _DEBUG
#undef _DEBUG
#include <Python.h>
#include <numpy/arrayobject.h>
#include <numpy/ndarrayobject.h>
#include <numpy/npy_common.h>
#define _DEBUG
#else
#include <Python.h>
#include <numpy/arrayobject.h>
#include <numpy/ndarrayobject.h>
#include <numpy/npy_common.h>
#endif
typedef struct complex_float_t {
float re;
float im;
} complex_float_t;
typedef struct complex_double_t {
double re;
double im;
} complex_double_t;
typedef struct complex_long_double_t {
long double re;
long double im;
} complex_long_double_t;
// Platform specific
#ifdef _WIN32
#include "regex/regex_win32.h"
#include "getline_win32.h"
#else
#include "regex_posix.h"
#endif
#ifdef _MSC_VER
#include "windows_stdint.h" // Use local copy for MSVC support
// Prevent windows.h from including winsock.h
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#include <process.h>
#define ygg_getpid _getpid
#define sleep(tsec) Sleep(1000*tsec)
#define usleep(usec) Sleep(usec/1000)
#else
#include <stdint.h>
#include <unistd.h>
#define ygg_getpid getpid
#endif
#define STRBUFF 100
/*! @brief Maximum message size. */
#ifdef IPCDEF
#define YGG_MSG_MAX 2048
#else
#define YGG_MSG_MAX 1048576
#endif
/*! @brief End of file message. */
#define YGG_MSG_EOF "EOF!!!"
/*! @brief End of client message. */
#define YGG_CLIENT_EOF "YGG_END_CLIENT"
/*! @brief Resonable size for buffer. */
#define YGG_MSG_BUF 2048
/*! @brief Sleep time in micro-seconds */
#define YGG_SLEEP_TIME ((int)250000)
/*! @brief Size for buffers to contain names of Python objects. */
#define PYTHON_NAME_SIZE 1000
/*! @brief Define old style names for compatibility. */
#define PSI_MSG_MAX YGG_MSG_MAX
#define PSI_MSG_BUF YGG_MSG_BUF
#define PSI_MSG_EOF YGG_MSG_EOF
#ifdef PSI_DEBUG
#define YGG_DEBUG PSI_DEBUG
#endif
static int _ygg_error_flag = 0;
/*! @brief Define macros to allow counts of variables. */
// https://codecraft.co/2014/11/25/variadic-macros-tricks/
#ifdef _MSC_VER
// https://stackoverflow.com/questions/48710758/how-to-fix-variadic-macro-related-issues-with-macro-overloading-in-msvc-mic
#define MSVC_BUG(MACRO, ARGS) MACRO ARGS // name to remind that bug fix is due to MSVC :-)
#define _GET_NTH_ARG_2(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, N, ...) N
#define _GET_NTH_ARG(...) MSVC_BUG(_GET_NTH_ARG_2, (__VA_ARGS__))
#define COUNT_VARARGS(...) _GET_NTH_ARG("ignored", ##__VA_ARGS__, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
#define VA_MACRO(MACRO, ...) MSVC_BUG(CONCATE, (MACRO, COUNT_VARARGS(__VA_ARGS__)))(__VA_ARGS__)
#else
#define _GET_NTH_ARG(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, N, ...) N
#define COUNT_VARARGS(...) _GET_NTH_ARG("ignored", ##__VA_ARGS__, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
#endif
#define UNUSED(arg) ((void)&(arg))
/*! @brief Memory to allow thread association to be set via macro. */
static int global_thread_id = -1;
#define ASSOCIATED_WITH_THREAD(COMM, THREAD) global_thread_id = THREAD; COMM; global_thread_id = -1;
#ifdef _OPENMP
#pragma omp threadprivate(global_thread_id)
#endif
/*!
@brief Get an unsigned long seed from the least significant 32bits of a pointer.
@param[in] ptr Pointer that should be turned into a seed.
@return Unsigned long seed.
*/
static inline
unsigned long ptr2seed(void *ptr) {
uint64_t v = (uint64_t)ptr;
unsigned long seed = (unsigned long)(v & 0xFFFFFFFFLL);
return seed;
};
/*! @brief Structure used to wrap va_list and allow pointer passing.
@param va va_list Wrapped variable argument list.
*/
typedef struct va_list_t {
va_list va;
int using_ptrs;
void **ptrs;
int nptrs;
int iptr;
int for_fortran;
} va_list_t;
/*! @brief Structure used to wrap Python objects. */
typedef struct python_t {
char name[PYTHON_NAME_SIZE];
void *args;
void *kwargs;
PyObject *obj;
} python_t;
/*!
@brief Get the ID for the current thread (if inside one).
@returns int Thread ID.
*/
static inline
int get_thread_id() {
int out = 0;
if (global_thread_id >= 0)
return global_thread_id;
#ifdef _OPENMP
if (omp_in_parallel())
out = omp_get_thread_num();
/* #elif defined pthread_self */
/* // TODO: Finalize/test support for pthread */
/* out = pthread_self(); */
#endif
return out;
};
/*!
@brief Initialize a structure to contain a Python object.
@returns python_t New Python object structure.
*/
static inline
python_t init_python() {
python_t out;
out.name[0] = '\0';
out.args = NULL;
out.kwargs = NULL;
out.obj = NULL;
return out;
};
/*!
@brief Initialize Numpy arrays if it is not initalized.
@returns int 0 if successful, other values indicate errors.
*/
static inline
int init_numpy_API() {
int out = 0;
#ifdef _OPENMP
#pragma omp critical (numpy)
{
#endif
if (PyArray_API == NULL) {
if (_import_array() < 0) {
PyErr_Print();
out = -2;
}
}
#ifdef _OPENMP
}
#endif
return out;
};
/*!
@brief Initialize Python if it is not initialized.
@returns int 0 if successful, other values indicate errors.
*/
static inline
int init_python_API() {
int out = 0;
#ifdef _OPENMP
#pragma omp critical (python)
{
#endif
if (!(Py_IsInitialized())) {
char *name = getenv("YGG_PYTHON_EXEC");
if (name != NULL) {
wchar_t *wname = Py_DecodeLocale(name, NULL);
if (wname == NULL) {
printf("Error decoding YGG_PYTHON_EXEC\n");
out = -1;
} else {
Py_SetProgramName(wname);
PyMem_RawFree(wname);
}
}
if (out >= 0) {
Py_Initialize();
if (!(Py_IsInitialized()))
out = -1;
}
}
if (out >= 0) {
out = init_numpy_API();
}
#ifdef _OPENMP
}
#endif
return out;
};
//==============================================================================
/*!
Logging
Alliases are set at compile-time based on the value of YGG_CLIENT_DEBUG. If
set to INFO, only messages logged with info or error alias are printed. If
set to DEBUG, messages logged with error, info or debug aliases are printed.
Otherwise, only error messages are printed. If the YGG_CLIENT_DEBUG is
changed, any code including this header must be recompiled for the change to
take effect.
*/
//==============================================================================
/*!
@brief Print a log message.
Prints a formatted message, prepending it with the process id and appending
it with a newline.
@param[in] prefix a constant character pointer to the prefix that should
preceed the message and process id.
@param[in] fmt a constant character pointer to a format string.
@param[in] ap va_list of arguments to be formatted in the format string.
*/
static inline
void yggLog(const char* prefix, const char* fmt, va_list ap) {
fprintf(stdout, "%s: %d:%d ", prefix, ygg_getpid(), get_thread_id());
vfprintf(stdout, fmt, ap);
fprintf(stdout, "\n");
fflush(stdout);
};
/*!
@brief Print an info log message.
Prints a formatted message, prepending it with INFO and the process id. A
newline character is added to the end of the message.
@param[in] fmt a constant character pointer to a format string.
@param[in] ... arguments to be formatted in the format string.
*/
static inline
void yggInfo(const char* fmt, ...) {
va_list ap;
va_start(ap, fmt);
yggLog("INFO", fmt, ap);
va_end(ap);
};
/*!
@brief Print an debug log message.
Prints a formatted message, prepending it with DEBUG and the process id. A
newline character is added to the end of the message.
@param[in] fmt a constant character pointer to a format string.
@param[in] ... arguments to be formatted in the format string.
*/
static inline
void yggDebug(const char* fmt, ...) {
va_list ap;
va_start(ap, fmt);
yggLog("DEBUG", fmt, ap);
va_end(ap);
};
/*!
@brief Print an error log message from a variable argument list.
Prints a formatted message, prepending it with ERROR and the process id. A
newline character is added to the end of the message.
@param[in] fmt a constant character pointer to a format string.
@param[in] ap va_list Variable argument list.
@param[in] ... arguments to be formatted in the format string.
*/
static inline
void yggError_va(const char* fmt, va_list ap) {
yggLog("ERROR", fmt, ap);
_ygg_error_flag = 1;
};
/*!
@brief Print an error log message.
Prints a formatted message, prepending it with ERROR and the process id. A
newline character is added to the end of the message.
@param[in] fmt a constant character pointer to a format string.
@param[in] ... arguments to be formatted in the format string.
*/
static inline
void yggError(const char* fmt, ...) {
va_list ap;
va_start(ap, fmt);
yggError_va(fmt, ap);
va_end(ap);
};
#ifdef YGG_DEBUG
#if YGG_DEBUG <= 10
#define ygglog_error yggError
#define ygglog_info yggInfo
#define ygglog_debug yggDebug
#elif YGG_DEBUG <= 20
#define ygglog_error yggError
#define ygglog_info yggInfo
#define ygglog_debug while (0) yggDebug
#elif YGG_DEBUG <= 40
#define ygglog_error yggError
#define ygglog_info while (0) yggInfo
#define ygglog_debug while (0) yggDebug
#else
#define ygglog_error while (0) yggError
#define ygglog_info while (0) yggInfo
#define ygglog_debug while (0) yggDebug
#endif
#else
#define ygglog_error yggError
#define ygglog_info while (0) yggInfo
#define ygglog_debug while (0) yggDebug
#endif
/*!
@brief Get the length (in bytes) of a character array containing 4 byte
unicode characters.
@param[in] strarg char* Pointer to character array.
@returns size_t Length of strarg in bytes.
*/
static inline
size_t strlen4(char* strarg) {
if(!strarg)
return 0; //strarg is NULL pointer
char* str = strarg;
for(;*str;str+=4)
; // empty body
return (str - strarg);
}
/*!
@brief Called snprintf and realloc buffer if the formatted string is
larger than the provided buffer.
@param[in] dst char** Pointer to buffer where formatted message
should be stored.
@param[in,out] max_len size_t* Pointer to maximum size of buffer
that will be modified when the buffer is reallocated.
@param[in,out] offset size_t* Pointer to offset in buffer where the
formatted message should be stored. This will be updated to the end
of the updated message.
@param[in] format_str const char* Format string that should be used.
@param[in] ... Additional arguments are passed to snprintf as
parameters for formatting.
@returns int -1 if there is an error, otherwise the number of new
characters written to the buffer.
*/
static inline
int snprintf_realloc(char** dst, size_t* max_len, size_t* offset,
const char* format_str, ...) {
va_list arglist;
va_start(arglist, format_str);
int fmt_len = 0;
while (1) {
va_list arglist_copy;
va_copy(arglist_copy, arglist);
fmt_len = vsnprintf(dst[0] + offset[0],
max_len[0] - offset[0],
format_str, arglist_copy);
if (fmt_len > (int)(max_len[0] - offset[0])) {
max_len[0] = max_len[0] + fmt_len + 1;
char* temp = (char*)realloc(dst[0], max_len[0]);
if (temp == NULL) {
ygglog_error("snprintf_realloc: Error reallocating buffer.");
fmt_len = -1;
break;
}
dst[0] = temp;
} else {
offset[0] = offset[0] + fmt_len;
break;
}
}
va_end(arglist);
return fmt_len;
};
/*!
@brief Check if a character array matches a message and is non-zero length.
@param[in] pattern constant character pointer to string that should be checked.
@param[in] buf constant character pointer to string that should be checked.
@returns int 1 if buf matches pattern, 0 otherwise.
*/
static inline
int not_empty_match(const char *pattern, const char *buf) {
if (buf == NULL)
return 0;
if (buf[0] == '\0')
return 0;
if (strcmp(buf, pattern) == 0) {
return 1;
} else {
return 0;
}
};
/*!
@brief Check if a character array matches the internal EOF message.
@param[in] buf constant character pointer to string that should be checked.
@returns int 1 if buf is the EOF message, 0 otherwise.
*/
static inline
int is_eof(const char *buf) {
return not_empty_match(YGG_MSG_EOF, buf);
};
/*!
@brief Check if a character array matches "recv".
@param[in] buf constant character pointer to string that should be checked.
@returns int 1 if buf is the "recv" message, 0 otherwise.
*/
static inline
int is_recv(const char *buf) {
return not_empty_match("recv", buf);
};
/*!
@brief Check if a character array matches "send".
@param[in] buf constant character pointer to string that should be checked.
@returns int 1 if buf is the "send" message, 0 otherwise.
*/
static inline
int is_send(const char *buf) {
return not_empty_match("send", buf);
};
/*!
@brief Initialize a variable argument list from an existing va_list.
@returns va_list_t New variable argument list structure.
*/
static inline
va_list_t init_va_list() {
va_list_t out;
out.using_ptrs = 0;
out.ptrs = NULL;
out.nptrs = 0;
out.iptr = 0;
out.for_fortran = 0;
return out;
};
/*! Initialize a variable argument list from an array of pointers.
@param[in] nptrs int Number of pointers.
@param[in] ptrs void** Array of pointers.
@returns va_list_t New variable argument list structure.
*/
static inline
va_list_t init_va_ptrs(const int nptrs, void** ptrs) {
va_list_t out;
out.using_ptrs = 1;
out.ptrs = ptrs;
out.nptrs = nptrs;
out.iptr = 0;
out.for_fortran = 0;
return out;
};
/*! Finalize a variable argument list.
@param[in] va_list_t Variable argument list.
*/
static inline
void end_va_list(va_list_t *ap) {
if (!(ap->using_ptrs)) {
va_end(ap->va);
}
};
/*! Copy a variable argument list.
@param[in] va_list_t Variable argument list structure to copy.
@returns va_list_t New variable argument list structure.
*/
static inline
va_list_t copy_va_list(va_list_t ap) {
va_list_t out;
if (ap.using_ptrs) {
out = init_va_ptrs(ap.nptrs, ap.ptrs);
out.iptr = ap.iptr;
} else {
out = init_va_list();
va_copy(out.va, ap.va);
}
out.for_fortran = ap.for_fortran;
return out;
};
/*! @brief Method for skipping a number of bytes in the argument list.
@param[in] ap va_list_t* Structure containing variable argument list.
@param[in] nbytes size_t Number of bytes that should be skipped.
*/
static inline
void va_list_t_skip(va_list_t *ap, size_t nbytes) {
if (ap->using_ptrs) {
ap->iptr++;
} else {
if (nbytes == sizeof(void*)) {
va_arg(ap->va, void*);
} else if (nbytes == sizeof(size_t)) {
va_arg(ap->va, size_t);
} else if (nbytes == sizeof(char*)) {
va_arg(ap->va, char*);
} else {
printf("WARNING: Cannot get argument of size %ld.\n", nbytes);
va_arg(ap->va, void*);
// va_arg(ap->va, char[nbytes]);
}
}
};
#ifdef __cplusplus /* If this is a C++ compiler, end C linkage */
}
#endif
#endif /*YGGTOOLS_H_*/
|
hello_omp.c | #include <stdio.h>
#include <omp.h>
int main(int argc, char** argv){
#pragma omp parallel
{
printf("Hello from process: %d\n", omp_get_thread_num());
}
return 0;
}
|
GB_binop__eq_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_fc64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__eq_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_fc64)
// C=scalar+B GB (_bind1st__eq_fc64)
// C=scalar+B' GB (_bind1st_tran__eq_fc64)
// C=A+scalar GB (_bind2nd__eq_fc64)
// C=A'+scalar GB (_bind2nd_tran__eq_fc64)
// C type: bool
// A type: GxB_FC64_t
// A pattern? 0
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = GB_FC64_eq (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = (creal (GBX (Ax, pA, A_iso)) != 0) || (cimag (GBX (Ax, pA, A_iso)) != 0)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = (creal (GBX (Bx, pB, B_iso)) != 0) || (cimag (GBX (Bx, pB, B_iso)) != 0)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_eq (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_FC64 || GxB_NO_EQ_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__eq_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_eq (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_eq (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_eq (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_eq (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_for_schedule_guided.c | <ompts:test>
<ompts:testdescription>Test which checks the guided option of the omp for schedule directive.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp for schedule(guided)</ompts:directive>
<ompts:dependences>omp flush,omp for nowait,omp critical,omp single</ompts:dependences>
<ompts:testcode>
/* Test for guided scheduling
* Ensure threads get chunks interleavely first
* Then judge the chunk sizes are decreasing to a stable value
* Modified by Chunhua Liao
* For example, 100 iteration on 2 threads, chunksize 7
* one line for each dispatch, 0/1 means thread id
* 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 24
* 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 18
* 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14
* 1 1 1 1 1 1 1 1 1 1 10
* 0 0 0 0 0 0 0 0 8
* 1 1 1 1 1 1 1 7
* 0 0 0 0 0 0 0 7
* 1 1 1 1 1 1 1 7
* 0 0 0 0 0 5
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
//#define NUMBER_OF_THREADS 10
#define CFSMAX_SIZE 1000
#define MAX_TIME 0.005
#ifdef SLEEPTIME
#undef SLEEPTIME
#define SLEEPTIME 0.0001
#endif
int <ompts:testcode:functionname>omp_for_schedule_guided</ompts:testcode:functionname> (FILE * logFile)
{
<ompts:orphan:vars>
int * tids;
int * chunksizes;
int notout;
int maxiter;
</ompts:orphan:vars>
int NUMBER_OF_THREADS;
int threads;
int i;
int result;
tids = (int *) malloc (sizeof (int) * (CFSMAX_SIZE + 1));
maxiter = 0;
result = 1;
notout = 1;
/* Testing if enought threads are available for this check. */
#pragma omp parallel
{
#pragma omp single
{
threads = omp_get_num_threads ();
NUMBER_OF_THREADS = threads;
} /* end of single */
} /* end of parallel */
if (threads < 2) {
printf ("This test only works with at least two threads .\n");
fprintf (logFile, "This test only works with at least two threads. Available were only %d thread(s).\n", threads);
return (0);
} /* end if */
/* Now the real parallel work:
*
* Each thread will start immediately with the first chunk.
*/
#pragma omp parallel shared(tids,maxiter)
{ /* begin of parallel */
<ompts:orphan>
double count;
int tid;
int j;
tid = omp_get_thread_num ();
#pragma omp for nowait <ompts:check>schedule(guided)</ompts:check>
for(j = 0; j < CFSMAX_SIZE; ++j)
{
count = 0.;
#pragma omp flush(maxiter)
if (j > maxiter)
{
#pragma omp critical
{
maxiter = j;
} /* end of critical */
}
/*printf ("thread %d sleeping\n", tid);*/
#pragma omp flush(maxiter,notout)
while (notout && (count < MAX_TIME) && (maxiter == j))
{
#pragma omp flush(maxiter,notout)
my_sleep (SLEEPTIME);
count += SLEEPTIME;
#ifdef VERBOSE
printf(".");
#endif
}
#ifdef VERBOSE
if (count > 0.) printf(" waited %lf s\n", count);
#endif
/*printf ("thread %d awake\n", tid);*/
tids[j] = tid;
#ifdef VERBOSE
printf("%d finished by %d\n",j,tid);
#endif
} /* end of for */
notout = 0;
#pragma omp flush(maxiter,notout)
</ompts:orphan>
} /* end of parallel */
/*******************************************************
* evaluation of the values *
*******************************************************/
{
int determined_chunksize = 1;
int last_threadnr = tids[0];
int global_chunknr = 0;
int local_chunknr[NUMBER_OF_THREADS];
int openwork = CFSMAX_SIZE;
int expected_chunk_size;
double c = 1;
for (i = 0; i < NUMBER_OF_THREADS; i++)
local_chunknr[i] = 0;
tids[CFSMAX_SIZE] = -1;
/*
* determine the number of global chunks
*/
/*fprintf(logFile,"# global_chunknr thread local_chunknr chunksize\n"); */
for(i = 1; i <= CFSMAX_SIZE; ++i)
{
if (last_threadnr==tids[i]) {
determined_chunksize++;
}
else
{
/* fprintf (logFile, "%d\t%d\t%d\t%d\n", global_chunknr,last_threadnr, local_chunknr[last_threadnr], m); */
global_chunknr++;
local_chunknr[last_threadnr]++;
last_threadnr = tids[i];
determined_chunksize = 1;
}
}
/* now allocate the memory for saving the sizes of the global chunks */
chunksizes = (int*)malloc(global_chunknr * sizeof(int));
/*
* Evaluate the sizes of the global chunks
*/
global_chunknr = 0;
determined_chunksize = 1;
last_threadnr = tids[0];
for (i = 1; i <= CFSMAX_SIZE; ++i)
{
/* If the threadnumber was the same as before increase the detected chunksize for this chunk
* otherwise set the detected chunksize again to one and save the number of the next thread in last_threadnr.
*/
if (last_threadnr == tids[i]) {
determined_chunksize++;
}
else {
chunksizes[global_chunknr] = determined_chunksize;
global_chunknr++;
local_chunknr[last_threadnr]++;
last_threadnr = tids[i];
determined_chunksize = 1;
}
}
#ifdef VERBOSE
fprintf (logFile, "found\texpected\tconstant\n");
#endif
/* identify the constant c for the exponential decrease of the chunksize */
expected_chunk_size = openwork / threads;
c = (double) chunksizes[0] / expected_chunk_size;
for (i = 0; i < global_chunknr; i++)
{
/* calculate the new expected chunksize */
if (expected_chunk_size > 1)
expected_chunk_size = c * openwork / threads;
#ifdef VERBOSE
fprintf (logFile, "%8d\t%8d\t%lf\n", chunksizes[i], expected_chunk_size, c * chunksizes[i]/expected_chunk_size);
#endif
/* check if chunksize is inside the rounding errors */
if (abs (chunksizes[i] - expected_chunk_size) >= 2) {
result = 0;
#ifndef VERBOSE
fprintf (logFile, "Chunksize differed from expected value: %d instead of %d\n", chunksizes[i], expected_chunk_size);
return 0;
#endif
} /* end if */
#ifndef VERBOSE
if (expected_chunk_size - chunksizes[i] < 0 )
fprintf (logFile, "Chunksize did not decrease: %d instead of %d\n", chunksizes[i],expected_chunk_size);
#endif
/* calculating the remaining amount of work */
openwork -= chunksizes[i];
}
}
return result;
}
</ompts:testcode>
</ompts:test>
|
GB_binop__minus_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fp64)
// A*D function (colscale): GB (_AxD__minus_fp64)
// D*A function (rowscale): GB (_DxB__minus_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fp64)
// C=scalar+B GB (_bind1st__minus_fp64)
// C=scalar+B' GB (_bind1st_tran__minus_fp64)
// C=A+scalar GB (_bind2nd__minus_fp64)
// C=A'+scalar GB (_bind2nd_tran__minus_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_FP64 || GxB_NO_MINUS_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_1x1_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_int8_sse(bottom_im2col, top_blob, kernel, opt);
}
static void conv1x1s2_sgemm_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2 * outw + w;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const signed char* r0 = bottom_blob.channel(p);
signed char* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
outptr[0] = r0[0];
outptr[1] = r0[2];
outptr[2] = r0[4];
outptr[3] = r0[6];
r0 += 8;
outptr += 4;
}
for (; j + 1 < outw; j += 2)
{
outptr[0] = r0[0];
outptr[1] = r0[2];
r0 += 4;
outptr += 2;
}
for (; j < outw; j++)
{
outptr[0] = r0[0];
r0 += 2;
outptr += 1;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_int8_sse(bottom_blob_shrinked, top_blob, kernel, opt);
}
static void conv1x1s1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
int q = 0;
for (; q + 7 < inch; q += 8)
{
int* outptr0 = out0;
const signed char* kernel0 = (const signed char*)kernel + p * inch + q;
const signed char* r0 = bottom_blob.channel(q);
const signed char* r1 = bottom_blob.channel(q + 1);
const signed char* r2 = bottom_blob.channel(q + 2);
const signed char* r3 = bottom_blob.channel(q + 3);
const signed char* r4 = bottom_blob.channel(q + 4);
const signed char* r5 = bottom_blob.channel(q + 5);
const signed char* r6 = bottom_blob.channel(q + 6);
const signed char* r7 = bottom_blob.channel(q + 7);
int size = outw * outh;
int remain = size;
for (; remain > 0; remain--)
{
//ToDo Neon
int sum0 = (int)*r0 * (int)kernel0[0] + (int)*r1 * (int)kernel0[1] + (int)*r2 * (int)kernel0[2] + (int)*r3 * (int)kernel0[3] + (int)*r4 * (int)kernel0[4] + (int)*r5 * (int)kernel0[5] + (int)*r6 * (int)kernel0[6] + (int)*r7 * (int)kernel0[7];
*outptr0 += sum0;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
r7++;
outptr0++;
}
}
for (; q < inch; q++)
{
int* outptr0 = out0;
const signed char* r0 = bottom_blob.channel(q);
const signed char* kernel0 = (const signed char*)kernel + p * inch + q;
const signed char k0 = kernel0[0];
int size = outw * outh;
int remain = size;
for (; remain > 0; remain--)
{
int sum0 = (int)(*r0) * (int)k0;
*outptr0 += sum0;
r0++;
outptr0++;
}
}
}
}
static void conv1x1s2_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char* kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
int q = 0;
for (; q + 7 < inch; q += 8)
{
int* outptr0 = out0;
const signed char* kernel0 = (const signed char*)kernel + p * inch + q;
const signed char* r0 = bottom_blob.channel(q);
const signed char* r1 = bottom_blob.channel(q + 1);
const signed char* r2 = bottom_blob.channel(q + 2);
const signed char* r3 = bottom_blob.channel(q + 3);
const signed char* r4 = bottom_blob.channel(q + 4);
const signed char* r5 = bottom_blob.channel(q + 5);
const signed char* r6 = bottom_blob.channel(q + 6);
const signed char* r7 = bottom_blob.channel(q + 7);
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
//ToDo Neon
int sum0 = (int)*r0 * (int)kernel0[0] + (int)*r1 * (int)kernel0[1] + (int)*r2 * (int)kernel0[2] + (int)*r3 * (int)kernel0[3] + (int)*r4 * (int)kernel0[4] + (int)*r5 * (int)kernel0[5] + (int)*r6 * (int)kernel0[6] + (int)*r7 * (int)kernel0[7];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
r5 += 2;
r6 += 2;
r7 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
r7 += tailstep;
}
}
for (; q < inch; q++)
{
int* outptr0 = out0;
const signed char* r0 = bottom_blob.channel(q);
const signed char* kernel0 = (const signed char*)kernel + p * inch + q;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
//ToDo Neon
int sum0 = (int)*r0 * (int)kernel0[0];
*outptr0 += sum0;
r0 += 2;
outptr0++;
}
r0 += tailstep;
}
}
}
}
|
vision.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V V IIIII SSSSS IIIII OOO N N %
% V V I SS I O O NN N %
% V V I SSS I O O N N N %
% V V I SS I O O N NN %
% V IIIII SSSSS IIIII OOO N N %
% %
% %
% MagickCore Computer Vision Methods %
% %
% Software Design %
% Cristy %
% September 2014 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/opencl-private.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/vision.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n n e c t e d C o m p o n e n t s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConnectedComponentsImage() returns the connected-components of the image
% uniquely labeled. The returned connected components image colors member
% defines the number of unique objects. Choose from 4 or 8-way connectivity.
%
% You are responsible for freeing the connected components objects resources
% with this statement;
%
% objects = (CCObjectInfo *) RelinquishMagickMemory(objects);
%
% The format of the ConnectedComponentsImage method is:
%
% Image *ConnectedComponentsImage(const Image *image,
% const size_t connectivity,CCObjectInfo **objects,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o connectivity: how many neighbors to visit, choose from 4 or 8.
%
% o objects: return the attributes of each unique object.
%
% o exception: return any errors or warnings in this structure.
%
*/
static int CCObjectInfoCompare(const void *x,const void *y)
{
CCObjectInfo
*p,
*q;
p=(CCObjectInfo *) x;
q=(CCObjectInfo *) y;
return((int) (q->area-(ssize_t) p->area));
}
MagickExport Image *ConnectedComponentsImage(const Image *image,
const size_t connectivity,CCObjectInfo **objects,ExceptionInfo *exception)
{
#define ConnectedComponentsImageTag "ConnectedComponents/Image"
CacheView
*component_view,
*image_view,
*object_view;
CCObjectInfo
*object;
char
*c;
const char
*artifact,
*metrics[CCMaxMetrics];
double
max_threshold,
min_threshold;
Image
*component_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*equivalences;
RectangleInfo
bounding_box;
ssize_t
i;
size_t
size;
ssize_t
background_id,
connect4[2][2] = { { -1, 0 }, { 0, -1 } },
connect8[4][2] = { { -1, -1 }, { -1, 0 }, { -1, 1 }, { 0, -1 } },
dx,
dy,
first,
last,
n,
step,
y;
/*
Initialize connected components image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (objects != (CCObjectInfo **) NULL)
*objects=(CCObjectInfo *) NULL;
component_image=CloneImage(image,0,0,MagickTrue,exception);
if (component_image == (Image *) NULL)
return((Image *) NULL);
component_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (AcquireImageColormap(component_image,MaxColormapSize,exception) == MagickFalse)
{
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Initialize connected components equivalences.
*/
size=image->columns*image->rows;
if (image->columns != (size/image->rows))
{
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
equivalences=AcquireMatrixInfo(size,1,sizeof(ssize_t),exception);
if (equivalences == (MatrixInfo *) NULL)
{
component_image=DestroyImage(component_image);
return((Image *) NULL);
}
for (n=0; n < (ssize_t) (image->columns*image->rows); n++)
(void) SetMatrixElement(equivalences,n,0,&n);
object=(CCObjectInfo *) AcquireQuantumMemory(MaxColormapSize,sizeof(*object));
if (object == (CCObjectInfo *) NULL)
{
equivalences=DestroyMatrixInfo(equivalences);
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(object,0,MaxColormapSize*sizeof(*object));
for (i=0; i < (ssize_t) MaxColormapSize; i++)
{
object[i].id=i;
object[i].bounding_box.x=(ssize_t) image->columns;
object[i].bounding_box.y=(ssize_t) image->rows;
GetPixelInfo(image,&object[i].color);
}
/*
Find connected components.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (n=0; n < (ssize_t) (connectivity > 4 ? 4 : 2); n++)
{
if (status == MagickFalse)
continue;
dx=connectivity > 4 ? connect8[n][1] : connect4[n][1];
dy=connectivity > 4 ? connect8[n][0] : connect4[n][0];
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-1,image->columns,3,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel,
target;
ssize_t
neighbor_offset,
obj,
offset,
ox,
oy,
root;
/*
Is neighbor an authentic pixel and a different color than the pixel?
*/
GetPixelInfoPixel(image,p,&pixel);
if (((x+dx) < 0) || ((x+dx) >= (ssize_t) image->columns) ||
((y+dy) < 0) || ((y+dy) >= (ssize_t) image->rows))
{
p+=GetPixelChannels(image);
continue;
}
neighbor_offset=dy*(GetPixelChannels(image)*image->columns)+dx*
GetPixelChannels(image);
GetPixelInfoPixel(image,p+neighbor_offset,&target);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
p+=GetPixelChannels(image);
continue;
}
/*
Resolve this equivalence.
*/
offset=y*image->columns+x;
neighbor_offset=dy*image->columns+dx;
ox=offset;
status=GetMatrixElement(equivalences,ox,0,&obj);
while (obj != ox)
{
ox=obj;
status=GetMatrixElement(equivalences,ox,0,&obj);
}
oy=offset+neighbor_offset;
status=GetMatrixElement(equivalences,oy,0,&obj);
while (obj != oy)
{
oy=obj;
status=GetMatrixElement(equivalences,oy,0,&obj);
}
if (ox < oy)
{
status=SetMatrixElement(equivalences,oy,0,&ox);
root=ox;
}
else
{
status=SetMatrixElement(equivalences,ox,0,&oy);
root=oy;
}
ox=offset;
status=GetMatrixElement(equivalences,ox,0,&obj);
while (obj != root)
{
status=GetMatrixElement(equivalences,ox,0,&obj);
status=SetMatrixElement(equivalences,ox,0,&root);
}
oy=offset+neighbor_offset;
status=GetMatrixElement(equivalences,oy,0,&obj);
while (obj != root)
{
status=GetMatrixElement(equivalences,oy,0,&obj);
status=SetMatrixElement(equivalences,oy,0,&root);
}
status=SetMatrixElement(equivalences,y*image->columns+x,0,&root);
p+=GetPixelChannels(image);
}
}
}
/*
Label connected components.
*/
n=0;
component_view=AcquireAuthenticCacheView(component_image,exception);
for (y=0; y < (ssize_t) component_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(component_view,0,y,component_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) component_image->columns; x++)
{
ssize_t
id,
offset;
offset=y*image->columns+x;
status=GetMatrixElement(equivalences,offset,0,&id);
if (id != offset)
status=GetMatrixElement(equivalences,id,0,&id);
else
{
id=n++;
if (id >= (ssize_t) MaxColormapSize)
break;
}
status=SetMatrixElement(equivalences,offset,0,&id);
if (x < object[id].bounding_box.x)
object[id].bounding_box.x=x;
if (x >= (ssize_t) object[id].bounding_box.width)
object[id].bounding_box.width=(size_t) x;
if (y < object[id].bounding_box.y)
object[id].bounding_box.y=y;
if (y >= (ssize_t) object[id].bounding_box.height)
object[id].bounding_box.height=(size_t) y;
object[id].color.red+=QuantumScale*GetPixelRed(image,p);
object[id].color.green+=QuantumScale*GetPixelGreen(image,p);
object[id].color.blue+=QuantumScale*GetPixelBlue(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
object[id].color.alpha+=QuantumScale*GetPixelAlpha(image,p);
if (image->colorspace == CMYKColorspace)
object[id].color.black+=QuantumScale*GetPixelBlack(image,p);
object[id].centroid.x+=x;
object[id].centroid.y+=y;
object[id].area++;
SetPixelIndex(component_image,(Quantum) id,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(component_image);
}
if (n > (ssize_t) MaxColormapSize)
break;
if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,ConnectedComponentsImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
component_view=DestroyCacheView(component_view);
image_view=DestroyCacheView(image_view);
equivalences=DestroyMatrixInfo(equivalences);
if (n > (ssize_t) MaxColormapSize)
{
object=(CCObjectInfo *) RelinquishMagickMemory(object);
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"TooManyObjects");
}
background_id=0;
min_threshold=0.0;
max_threshold=0.0;
component_image->colors=(size_t) n;
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width-=(object[i].bounding_box.x-1);
object[i].bounding_box.height-=(object[i].bounding_box.y-1);
object[i].color.red/=(QuantumScale*object[i].area);
object[i].color.green/=(QuantumScale*object[i].area);
object[i].color.blue/=(QuantumScale*object[i].area);
if (image->alpha_trait != UndefinedPixelTrait)
object[i].color.alpha/=(QuantumScale*object[i].area);
if (image->colorspace == CMYKColorspace)
object[i].color.black/=(QuantumScale*object[i].area);
object[i].centroid.x/=object[i].area;
object[i].centroid.y/=object[i].area;
max_threshold+=object[i].area;
if (object[i].area > object[background_id].area)
background_id=i;
}
max_threshold+=MagickEpsilon;
n=(-1);
artifact=GetImageArtifact(image,"connected-components:background-id");
if (artifact != (const char *) NULL)
background_id=(ssize_t) StringToLong(artifact);
artifact=GetImageArtifact(image,"connected-components:area-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max area threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].area < min_threshold) ||
(object[i].area >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:keep-colors");
if (artifact != (const char *) NULL)
{
const char
*p;
/*
Keep selected objects based on color, merge others.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
object[i].merge=MagickTrue;
for (p=artifact; ; )
{
char
color[MagickPathExtent];
PixelInfo
pixel;
const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,&pixel,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (IsFuzzyEquivalencePixelInfo(&object[i].color,&pixel) != MagickFalse)
object[i].merge=MagickFalse;
if (*q == '\0')
break;
p=q+1;
}
}
artifact=GetImageArtifact(image,"connected-components:keep-ids");
if (artifact == (const char *) NULL)
artifact=GetImageArtifact(image,"connected-components:keep");
if (artifact != (const char *) NULL)
{
/*
Keep selected objects based on id, merge others.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
object[i].merge=MagickTrue;
for (c=(char *) artifact; *c != '\0'; )
{
while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ','))
c++;
first=(ssize_t) strtol(c,&c,10);
if (first < 0)
first+=(ssize_t) component_image->colors;
last=first;
while (isspace((int) ((unsigned char) *c)) != 0)
c++;
if (*c == '-')
{
last=(ssize_t) strtol(c+1,&c,10);
if (last < 0)
last+=(ssize_t) component_image->colors;
}
step=(ssize_t) (first > last ? -1 : 1);
for ( ; first != (last+step); first+=step)
object[first].merge=MagickFalse;
}
}
artifact=GetImageArtifact(image,"connected-components:keep-top");
if (artifact != (const char *) NULL)
{
CCObjectInfo
*top_objects;
ssize_t
top_ids;
/*
Keep top objects.
*/
top_ids=(ssize_t) StringToLong(artifact);
top_objects=(CCObjectInfo *) AcquireQuantumMemory(component_image->colors,
sizeof(*top_objects));
if (top_objects == (CCObjectInfo *) NULL)
{
object=(CCObjectInfo *) RelinquishMagickMemory(object);
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memcpy(top_objects,object,component_image->colors*sizeof(*object));
qsort((void *) top_objects,component_image->colors,sizeof(*top_objects),
CCObjectInfoCompare);
for (i=top_ids+1; i < (ssize_t) component_image->colors; i++)
object[top_objects[i].id].merge=MagickTrue;
top_objects=(CCObjectInfo *) RelinquishMagickMemory(top_objects);
}
artifact=GetImageArtifact(image,"connected-components:remove-colors");
if (artifact != (const char *) NULL)
{
const char
*p;
/*
Remove selected objects based on color, keep others.
*/
for (p=artifact; ; )
{
char
color[MagickPathExtent];
PixelInfo
pixel;
const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,&pixel,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (IsFuzzyEquivalencePixelInfo(&object[i].color,&pixel) != MagickFalse)
object[i].merge=MagickTrue;
if (*q == '\0')
break;
p=q+1;
}
}
artifact=GetImageArtifact(image,"connected-components:remove-ids");
if (artifact == (const char *) NULL)
artifact=GetImageArtifact(image,"connected-components:remove");
if (artifact != (const char *) NULL)
for (c=(char *) artifact; *c != '\0'; )
{
/*
Remove selected objects based on id, keep others.
*/
while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ','))
c++;
first=(ssize_t) strtol(c,&c,10);
if (first < 0)
first+=(ssize_t) component_image->colors;
last=first;
while (isspace((int) ((unsigned char) *c)) != 0)
c++;
if (*c == '-')
{
last=(ssize_t) strtol(c+1,&c,10);
if (last < 0)
last+=(ssize_t) component_image->colors;
}
step=(ssize_t) (first > last ? -1 : 1);
for ( ; first != (last+step); first+=step)
object[first].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:perimeter-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max perimeter threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="perimeter";
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
RectangleInfo
bounding_box;
size_t
pattern[4] = { 1, 0, 0, 0 };
ssize_t
y;
/*
Compute perimeter of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=(-1); y < (ssize_t) bounding_box.height+1; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1,
bounding_box.y+y,bounding_box.width+2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=(-1); x < (ssize_t) bounding_box.width+1; x++)
{
Quantum
pixels[4];
ssize_t
v;
size_t
foreground;
/*
An Algorithm for Calculating Objects’ Shape Features in Binary
Images, Lifeng He, Yuyan Chao.
*/
foreground=0;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
ssize_t
offset;
offset=v*(bounding_box.width+2)*
GetPixelChannels(component_image)+u*
GetPixelChannels(component_image);
pixels[2*v+u]=GetPixelIndex(component_image,p+offset);
if ((ssize_t) pixels[2*v+u] == i)
foreground++;
}
}
if (foreground == 1)
pattern[1]++;
else
if (foreground == 2)
{
if ((((ssize_t) pixels[0] == i) &&
((ssize_t) pixels[3] == i)) ||
(((ssize_t) pixels[1] == i) &&
((ssize_t) pixels[2] == i)))
pattern[0]++; /* diagonal */
else
pattern[2]++;
}
else
if (foreground == 3)
pattern[3]++;
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[n]=ceil(MagickSQ1_2*pattern[1]+1.0*pattern[2]+
MagickSQ1_2*pattern[3]+MagickSQ2*pattern[0]-0.5);
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:circularity-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max circularity threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="circularity";
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
RectangleInfo
bounding_box;
size_t
pattern[4] = { 1, 0, 0, 0 };
ssize_t
y;
/*
Compute perimeter of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=(-1); y < (ssize_t) bounding_box.height; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1,
bounding_box.y+y,bounding_box.width+2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=(-1); x < (ssize_t) bounding_box.width; x++)
{
Quantum
pixels[4];
ssize_t
v;
size_t
foreground;
/*
An Algorithm for Calculating Objects’ Shape Features in Binary
Images, Lifeng He, Yuyan Chao.
*/
foreground=0;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
ssize_t
offset;
offset=v*(bounding_box.width+2)*
GetPixelChannels(component_image)+u*
GetPixelChannels(component_image);
pixels[2*v+u]=GetPixelIndex(component_image,p+offset);
if ((ssize_t) pixels[2*v+u] == i)
foreground++;
}
}
if (foreground == 1)
pattern[1]++;
else
if (foreground == 2)
{
if ((((ssize_t) pixels[0] == i) &&
((ssize_t) pixels[3] == i)) ||
(((ssize_t) pixels[1] == i) &&
((ssize_t) pixels[2] == i)))
pattern[0]++; /* diagonal */
else
pattern[2]++;
}
else
if (foreground == 3)
pattern[3]++;
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[n]=ceil(MagickSQ1_2*pattern[1]+1.0*pattern[2]+
MagickSQ1_2*pattern[3]+MagickSQ2*pattern[0]-0.5);
object[i].metric[n]=4.0*MagickPI*object[i].area/(object[i].metric[n]*
object[i].metric[n]);
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:diameter-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max diameter threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="diameter";
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].metric[n]=ceil(sqrt(4.0*object[i].area/MagickPI)-0.5);
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
}
artifact=GetImageArtifact(image,"connected-components:major-axis-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max ellipse major threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="major-axis";
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 };
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
ssize_t
y;
/*
Compute ellipse major axis of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[n]=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)+
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:minor-axis-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max ellipse minor threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="minor-axis";
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 };
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
ssize_t
y;
/*
Compute ellipse major axis of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[n]=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)-
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,
"connected-components:eccentricity-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max eccentricity threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="eccentricy";
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 },
ellipse_axis = { 0.0, 0.0 };
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
ssize_t
y;
/*
Compute eccentricity of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
ellipse_axis.x=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)+
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
ellipse_axis.y=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)-
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
object[i].metric[n]=sqrt(1.0-(ellipse_axis.y*ellipse_axis.y*
PerceptibleReciprocal(ellipse_axis.x*ellipse_axis.x)));
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:angle-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max ellipse angle threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="angle";
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 };
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
ssize_t
y;
/*
Compute ellipse angle of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[n]=RadiansToDegrees(1.0/2.0*atan(2.0*M11*
PerceptibleReciprocal(M20-M02)));
if (fabs(M11) < 0.0)
{
if ((fabs(M20-M02) >= 0.0) && ((M20-M02) < 0.0))
object[i].metric[n]+=90.0;
}
else
if (M11 < 0.0)
{
if (fabs(M20-M02) >= 0.0)
{
if ((M20-M02) < 0.0)
object[i].metric[n]+=90.0;
else
object[i].metric[n]+=180.0;
}
}
else
if ((fabs(M20-M02) >= 0.0) && ((M20-M02) < 0.0))
object[i].metric[n]+=90.0;
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
/*
Merge any object not within the min and max area threshold.
*/
component_view=AcquireAuthenticCacheView(component_image,exception);
object_view=AcquireVirtualCacheView(component_image,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
{
ssize_t
j;
size_t
id;
if (status == MagickFalse)
continue;
if ((object[i].merge == MagickFalse) || (i == background_id))
continue; /* keep object */
/*
Merge this object.
*/
for (j=0; j < (ssize_t) component_image->colors; j++)
object[j].census=0;
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
ssize_t
n;
if (status == MagickFalse)
continue;
j=(ssize_t) GetPixelIndex(component_image,p);
if (j == i)
for (n=0; n < (ssize_t) (connectivity > 4 ? 4 : 2); n++)
{
const Quantum
*p;
/*
Compute area of adjacent objects.
*/
if (status == MagickFalse)
continue;
dx=connectivity > 4 ? connect8[n][1] : connect4[n][1];
dy=connectivity > 4 ? connect8[n][0] : connect4[n][0];
p=GetCacheViewVirtualPixels(object_view,bounding_box.x+x+dx,
bounding_box.y+y+dy,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
j=(ssize_t) GetPixelIndex(component_image,p);
if (j != i)
object[j].census++;
}
p+=GetPixelChannels(component_image);
}
}
/*
Merge with object of greatest adjacent area.
*/
id=0;
for (j=1; j < (ssize_t) component_image->colors; j++)
if (object[j].census > object[id].census)
id=(size_t) j;
object[i].area=0.0;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,q) == i)
SetPixelIndex(component_image,(Quantum) id,q);
q+=GetPixelChannels(component_image);
}
if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse)
status=MagickFalse;
}
}
object_view=DestroyCacheView(object_view);
component_view=DestroyCacheView(component_view);
artifact=GetImageArtifact(image,"connected-components:mean-color");
if (IsStringTrue(artifact) != MagickFalse)
{
/*
Replace object with mean color.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
component_image->colormap[i]=object[i].color;
}
(void) SyncImage(component_image,exception);
artifact=GetImageArtifact(image,"connected-components:verbose");
if ((IsStringTrue(artifact) != MagickFalse) ||
(objects != (CCObjectInfo **) NULL))
{
/*
Report statistics on each unique object.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width=0;
object[i].bounding_box.height=0;
object[i].bounding_box.x=(ssize_t) component_image->columns;
object[i].bounding_box.y=(ssize_t) component_image->rows;
object[i].centroid.x=0;
object[i].centroid.y=0;
object[i].census=object[i].area == 0.0 ? 0.0 : 1.0;
object[i].area=0;
}
component_view=AcquireVirtualCacheView(component_image,exception);
for (y=0; y < (ssize_t) component_image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,0,y,component_image->columns,
1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) component_image->columns; x++)
{
size_t
id;
id=(size_t) GetPixelIndex(component_image,p);
if (x < object[id].bounding_box.x)
object[id].bounding_box.x=x;
if (x > (ssize_t) object[id].bounding_box.width)
object[id].bounding_box.width=(size_t) x;
if (y < object[id].bounding_box.y)
object[id].bounding_box.y=y;
if (y > (ssize_t) object[id].bounding_box.height)
object[id].bounding_box.height=(size_t) y;
object[id].centroid.x+=x;
object[id].centroid.y+=y;
object[id].area++;
p+=GetPixelChannels(component_image);
}
}
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width-=(object[i].bounding_box.x-1);
object[i].bounding_box.height-=(object[i].bounding_box.y-1);
object[i].centroid.x=object[i].centroid.x/object[i].area;
object[i].centroid.y=object[i].centroid.y/object[i].area;
}
component_view=DestroyCacheView(component_view);
qsort((void *) object,component_image->colors,sizeof(*object),
CCObjectInfoCompare);
if (objects == (CCObjectInfo **) NULL)
{
ssize_t
j;
artifact=GetImageArtifact(image,
"connected-components:exclude-header");
if (IsStringTrue(artifact) == MagickFalse)
{
(void) fprintf(stdout,"Objects (");
artifact=GetImageArtifact(image,
"connected-components:exclude-ids");
if (IsStringTrue(artifact) == MagickFalse)
(void) fprintf(stdout,"id: ");
(void) fprintf(stdout,"bounding-box centroid area mean-color");
for (j=0; j <= n; j++)
(void) fprintf(stdout," %s",metrics[j]);
(void) fprintf(stdout,"):\n");
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (object[i].census > 0.0)
{
char
mean_color[MagickPathExtent];
GetColorTuple(&object[i].color,MagickFalse,mean_color);
(void) fprintf(stdout," ");
artifact=GetImageArtifact(image,
"connected-components:exclude-ids");
if (IsStringTrue(artifact) == MagickFalse)
(void) fprintf(stdout,"%.20g: ",(double) object[i].id);
(void) fprintf(stdout,
"%.20gx%.20g%+.20g%+.20g %.1f,%.1f %.*g %s",(double)
object[i].bounding_box.width,(double)
object[i].bounding_box.height,(double)
object[i].bounding_box.x,(double) object[i].bounding_box.y,
object[i].centroid.x,object[i].centroid.y,
GetMagickPrecision(),(double) object[i].area,mean_color);
for (j=0; j <= n; j++)
(void) fprintf(stdout," %.*g",GetMagickPrecision(),
object[i].metric[j]);
(void) fprintf(stdout,"\n");
}
}
}
if (objects == (CCObjectInfo **) NULL)
object=(CCObjectInfo *) RelinquishMagickMemory(object);
else
*objects=object;
return(component_image);
}
|
target_array_extension.c | // --------------------------------------------------
// Check extends before
// --------------------------------------------------
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \
// RUN: -fopenmp-version=51 -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \
// RUN: | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu \
// RUN: -fopenmp-version=51 -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \
// RUN: | %fcheck-x86_64-pc-linux-gnu
// --------------------------------------------------
// Check extends after
// --------------------------------------------------
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \
// RUN: -fopenmp-version=51 -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \
// RUN: | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu \
// RUN: -fopenmp-version=51 -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \
// RUN: | %fcheck-x86_64-pc-linux-gnu
// END.
#include <stdio.h>
#define BEFORE 0
#define AFTER 1
#define SIZE 100
#if EXTENDS == BEFORE
# define SMALL_BEG (SIZE-2)
# define SMALL_END SIZE
# define LARGE_BEG 0
# define LARGE_END SIZE
#elif EXTENDS == AFTER
# define SMALL_BEG 0
# define SMALL_END 2
# define LARGE_BEG 0
# define LARGE_END SIZE
#else
# error EXTENDS undefined
#endif
#define SMALL_SIZE (SMALL_END-SMALL_BEG)
#define LARGE_SIZE (LARGE_END-LARGE_BEG)
#define SMALL SMALL_BEG:SMALL_SIZE
#define LARGE LARGE_BEG:LARGE_SIZE
int main() {
int arr[SIZE];
// CHECK: addr=0x[[#%x,SMALL_ADDR:]], size=[[#%u,SMALL_BYTES:]]
fprintf(stderr, "addr=%p, size=%ld\n", &arr[SMALL_BEG],
SMALL_SIZE * sizeof arr[0]);
// CHECK: addr=0x[[#%x,LARGE_ADDR:]], size=[[#%u,LARGE_BYTES:]]
fprintf(stderr, "addr=%p, size=%ld\n", &arr[LARGE_BEG],
LARGE_SIZE * sizeof arr[0]);
// CHECK-NOT: Libomptarget
#pragma omp target data map(alloc: arr[LARGE])
{
#pragma omp target map(present, tofrom: arr[SMALL])
;
}
// CHECK: arr is present
fprintf(stderr, "arr is present\n");
// CHECK: Libomptarget message: explicit extension not allowed: host address specified is 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes), but device allocation maps to host at 0x{{0*}}[[#SMALL_ADDR]] ([[#SMALL_BYTES]] bytes)
// CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes)
// CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory
#pragma omp target data map(alloc: arr[SMALL])
{
#pragma omp target map(present, tofrom: arr[LARGE])
;
}
// CHECK-NOT: arr is present
fprintf(stderr, "arr is present\n");
return 0;
}
|
GB_unaryop__minv_uint64_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint64_bool
// op(A') function: GB_tran__minv_uint64_bool
// C type: uint64_t
// A type: bool
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 64)
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint64_bool
(
uint64_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Tigran.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <math.h>
struct tablo {
int * tab;
int size;
};
#define MINGLOBAL -2147483640
int max(int a, int b){
if (a>b){
return a;
}else{
return b;
}
}
void printArray(struct tablo * tmp) {
printf("---- Array of size %i ---- \n", tmp->size);
int size = tmp->size;
int i;
for (i = 0; i < size; ++i) {
printf("%i ", tmp->tab[i]);
}
printf("\n");
}
void montee(struct tablo * source, struct tablo * destination) {
destination->tab[0] = 0;
for (int p = 0; p < source->size; p++) {
destination->tab[p + source->size] = source->tab[p];
}
for (int l = log2(source->size) - 1; l >= 0; l--) {
int inf = pow(2, l);
int sup = pow(2, l + 1);
#pragma omp parallel for
for (int j = inf; j < sup; j++) {
destination->tab[j] = destination->tab[2 * j] + destination->tab[2 * j + 1];
}
}
}
void monteeMAX(struct tablo * source, struct tablo * destination) {
destination->tab[0] = MINGLOBAL;
for (int p = 0; p < source->size; p++) {
destination->tab[p + source->size] = source->tab[p];
}
for (int l = log2(source->size) - 1; l >= 0; l--) {
int inf = pow(2, l);
int sup = pow(2, l + 1);
#pragma omp parallel for
for (int j = inf; j < sup; j++) {
destination->tab[j] = max(destination->tab[2 * j] , destination->tab[2 * j + 1]);
}
}
}
//Descente suffix de la meme maniere que prefix sauf qu'on utilise la symétrie au lieu de faire un reverse de façon naif
//
//Du coup on descends dans la arbre si on est le fils droit on prends la valeur du pere sinon on prends la valeur du pere + la valeur de son frere dans l'autre arbre
//
void descenteSuffixe(struct tablo * a, struct tablo * b) {
b->tab[1]=0;
for(int l=1;l<=log2(a->size/2);l++){
int inf = pow(2,l);
int sup = pow(2,l+1);
#pragma omp parallel for
for(int j = sup-1;j>=inf;j--){
if(j%2==1){
b->tab[j]=b->tab[(j-1)/2];
}
else{b->tab[j]=b->tab[j/2]+a->tab[j+1];}
}
}
}//Descente suffix de la meme maniere que prefix sauf qu'on utilise la symétrie au lieu de faire un reverse de façon naif
//
//Du coup on descends dans la arbre si on est le fils droit on prends la valeur du pere sinon on prends le max de la valeur du pere et la valeur de son frere dans l'autre arbre
//
void descenteSuffixeMAX(struct tablo * a, struct tablo * b) {
b->tab[1]=MINGLOBAL;
for(int l=1;l<=log2(a->size/2);l++){
int inf = pow(2,l);
int sup = pow(2,l+1);
#pragma omp parallel for
for(int j = sup-1;j>=inf;j--){
if(j%2==1){
b->tab[j]=b->tab[(j-1)/2];
}
else{b->tab[j]=max(b->tab[j/2],a->tab[j+1]);}
}
}
}
//Descente prefix
//
//Du coup on descends dans la arbre si on est le fils gauche on prends la valeur du pere sinon on prends la valeur du pere + la valeur de son frere dans l'autre arbre
//
void descente(struct tablo * a, struct tablo * c) {
c->tab[1]=0;
for(int l=1;l<=log2(a->size/2);l++){
int inf = pow(2,l);
int sup = pow(2,l+1);
#pragma omp parallel for
for(int j = inf;j<sup;j++){
if(j%2==0){
c->tab[j]=c->tab[j / 2];
}
else{ c->tab[j]= c->tab[(j - 1) / 2] + a->tab[j - 1];}
}
}
}
//Descente prefix
//
//Du coup on descends dans la arbre si on est le fils gauche on prends la valeur du pere sinon on prends le mac de la valeur du pere et la valeur de son frere dans l'autre arbre
//
void descenteMAX(struct tablo * a, struct tablo * c) {
c->tab[1]=MINGLOBAL;
for(int l=1;l<=log2(a->size/2);l++){
int inf = pow(2,l);
int sup = pow(2,l+1);
#pragma omp parallel for
for(int j = inf;j<sup;j++){
if(j%2==0){
c->tab[j]=c->tab[j / 2];
}
else{ c->tab[j]= max(c->tab[(j - 1) / 2] , a->tab[j - 1]);}
}
}
}
void final(struct tablo * a, struct tablo *b) {
int inf = pow(2,log2(a->size/2));
int sup = pow(2,log2(a->size/2)+1);
#pragma omp parallel for
for (int j=inf;j<sup;j++){
b->tab[j]=b->tab[j]+a->tab[j];
}
}
void finalMAX(struct tablo * a, struct tablo *b) {
int inf = pow(2,log2(a->size/2));
int sup = pow(2,log2(a->size/2)+1);
#pragma omp parallel for
for (int j=inf;j<sup;j++){
b->tab[j]=max(b->tab[j],a->tab[j]);
}
}
void generateArray(struct tablo * s,char * threads,int size){
s->size=size;
s->tab=malloc(s->size*sizeof(int));
FILE *myFile;
myFile = fopen(threads, "r");
int i;
for (i = 0; i < size; i++)
{
fscanf(myFile, "%d", &s->tab[i]);
}
free(myFile);
}
//l'algorithm Perumalla et al est vraiment rapide en parallel
void scan_prefixe(struct tablo *source, struct tablo *dest){
struct tablo * a = malloc(sizeof(struct tablo));
a->tab = malloc(source->size*2*sizeof(int));
a->size =source->size*2;
#pragma omp parallel for
for (int i = 0; i < a->size; i++) {
a->tab[i] = 0;
}
montee(source, a);
struct tablo * b = malloc(sizeof(struct tablo));
b->tab= malloc(source->size*2*sizeof(int));
b->size=source->size*2;
#pragma omp parallel for
for (int i = 0; i < b->size; i++) {
b->tab[i] = 0;
}
descente(a, b);
final(a,b);
#pragma omp parallel for
for (int i=0; i < dest->size; i++){
dest->tab[i]=b->tab[i + source->size];
}
free(a->tab);
free(b->tab);
free(a);
free(b);
}
void scan_suffixe(struct tablo *source, struct tablo *dest){
struct tablo * a = malloc(sizeof(struct tablo));
a->tab = malloc(source->size*2*sizeof(int));
a->size =source->size*2;
#pragma omp parallel for
for (int i = 0; i < a->size; i++) {
a->tab[i] = 0;
}
montee(source, a);
struct tablo * b = malloc(sizeof(struct tablo));
b->tab= malloc(source->size*2*sizeof(int));
b->size=source->size*2;
#pragma omp parallel for
for (int i=0; i < dest->size; i++){
dest->tab[i]=b->tab[i + source->size];
}
descenteSuffixe(a, b);
final(a,b);
#pragma omp parallel for
for (int i=0; i < dest->size; i++){
dest->tab[i]=b->tab[i + source->size];
}
free(a->tab);
free(b->tab);
free(a);
free(b);
}
void scan_prefixeMAX(struct tablo *source, struct tablo *dest){
struct tablo * a = malloc(sizeof(struct tablo));
a->tab = malloc(source->size*2*sizeof(int));
a->size =source->size*2;
#pragma omp parallel for
for (int i = 0; i < a->size; i++) {
a->tab[i] = MINGLOBAL;
}
monteeMAX(source, a);
struct tablo * b = malloc(sizeof(struct tablo));
b->tab= malloc(source->size*2*sizeof(int));
b->size=source->size*2;
#pragma omp parallel for
for (int i = 0; i < b->size; i++) {
b->tab[i] = MINGLOBAL;
}
descenteMAX(a, b);
finalMAX(a,b);
#pragma omp parallel for
for (int i=0; i < dest->size; i++){
dest->tab[i]=b->tab[i + source->size];
}
free(a->tab);
free(b->tab);
free(a);
free(b);
}
void scan_suffixeMAX(struct tablo *source, struct tablo *dest){
struct tablo * a = malloc(sizeof(struct tablo));
a->tab = malloc(source->size*2*sizeof(int));
a->size =source->size*2;
#pragma omp parallel for
for (int i = 0; i < a->size; i++) {
a->tab[i] = MINGLOBAL;
}
monteeMAX(source, a);
struct tablo * b = malloc(sizeof(struct tablo));
b->tab= malloc(source->size*2*sizeof(int));
b->size=source->size*2;
#pragma omp parallel for
for (int i=0; i < dest->size; i++){
dest->tab[i]=b->tab[i + source->size];
}
descenteSuffixeMAX(a, b);
finalMAX(a,b);
#pragma omp parallel for
for (int i=0; i < dest->size; i++){
dest->tab[i]=b->tab[i + source->size];
}
free(a->tab);
free(b->tab);
free(a);
free(b);
}
int main(int argc, char **argv){
char * threads ="";
if (argc>1) {
threads= argv[1];}
else {printf("WARNING No input parameter exit status 1");
exit(1);}
struct tablo Q;
FILE* fileTest = fopen(threads, "r");
fseek(fileTest, 0L, SEEK_END);
Q.size = ftell(fileTest);
fseek(fileTest, 0L, SEEK_SET);
int count = 0;
Q.tab = malloc(Q.size * sizeof(int));
while (fscanf(fileTest, "%d", &Q.tab[count]) == 1) {
count++;
}
fclose(fileTest);
Q.size = count;
//printArray(&Q);
struct tablo * PSUM = malloc(sizeof(struct tablo));
PSUM->size =Q.size;
PSUM->tab=malloc(PSUM->size * sizeof(int));
#pragma omp parallel for
for (int i = 0; i < PSUM->size; i++) {
PSUM->tab[i] = 0;
}
scan_prefixe(&Q, PSUM);
//printArray(PSUM);
struct tablo * SSUM = malloc(sizeof(struct tablo));
SSUM->size =Q.size;
SSUM->tab=malloc(SSUM->size * sizeof(int));
#pragma omp parallel for
for (int i = 0; i < SSUM->size; i++) {
SSUM->tab[i] = 0;
}
scan_suffixe(&Q, SSUM);
//printArray(SSUM);
struct tablo * SMAX = malloc(sizeof(struct tablo));
SMAX->size =Q.size;
SMAX->tab=malloc(SMAX->size * sizeof(int));
#pragma omp parallel for
for (int i = 0; i < SMAX->size; i++) {
SMAX->tab[i] = 0;
}
scan_suffixeMAX(PSUM, SMAX);
//printArray(SMAX);
struct tablo * PMAX = malloc(sizeof(struct tablo));
PMAX->size =Q.size;
PMAX->tab=malloc(PMAX->size * sizeof(int));
#pragma omp parallel for
for (int i = 0; i < SMAX->size; i++) {
PMAX->tab[i] = 0;
}
scan_prefixeMAX(SSUM, PMAX);
//printArray(PMAX);
struct tablo * M = malloc(sizeof(struct tablo));
M->size =Q.size;
M->tab=malloc(M->size * sizeof(int));
#pragma omp parallel for
for (int i = 0; i < M->size; i++) {
M->tab[i] = 0;
}
#pragma omp parallel for
for (int i=0; i < Q.size; i++) {
M->tab[i] = SMAX->tab[i] - PSUM->tab[i] + Q.tab[i] + PMAX->tab[i] - SSUM->tab[i];
}
//printArray(M);
int MaxValue = MINGLOBAL;
int index = Q.size;
// STEP 6 with reduction
// On cherche d'abord la valeur maximal en utilisant reduction
#pragma omp parallel for reduction(max:MaxValue)
for (int indexIterator = 0; indexIterator < M->size; ++indexIterator) {
//printf("%d",omp_get_thread_num());
if (M->tab[indexIterator] > MaxValue) {
MaxValue = M->tab[indexIterator];
}
}
//on cherche l'element dans le table ayant la meme valeur que MaxValue mais ayant l'indice minimal
#pragma omp parallel for reduction(min:index)
for (int indexIterator = 0; indexIterator < M->size; ++indexIterator) {
//printf(" %d ",index);
if (M->tab[indexIterator] == MaxValue) {
if(index>indexIterator){
index=indexIterator;
}
}
}
// on se position à l'indice minimal dans le tableau M puis avec un pointeur on parcours jusqu'à que la valeur de l'element est differente de max value
int pointeur= index;
printf("%d",MaxValue);
while(pointeur < M->size){
if (!(M->tab[pointeur] == MaxValue)){
pointeur--;
break;
}
else {
if (pointeur == M->size-1){
printf(" %d", Q.tab[pointeur]);
break;
}
else {
if (pointeur <= M->size-1){
printf(" %d", Q.tab[pointeur]);
}pointeur++;}
}
}
free(Q.tab);
free(M->tab);
free(PSUM->tab);
free(SSUM->tab);
free(PMAX->tab);
free(SMAX->tab);
free(M);
free(PSUM);
free(SSUM);
free(PMAX);
free(SMAX);
}
|
control_tool.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN
#include "callback.h"
#include <omp.h>
int main()
{
#pragma omp parallel num_threads(1)
{
print_frame_from_outlined_fn(1);
print_frame(0);
omp_control_tool(omp_control_tool_flush, 1, NULL);
print_current_address(0);
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_control_tool'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: __builtin_frame_address({{.}})=[[EXIT_FRAME:0x[0-f]*]]
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER_FRAME:0x[0-f]*]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_control_tool: command=3, modifier=1, arg=[[NULL]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]*]], current_task_frame.exit=[[EXIT_FRAME]], current_task_frame.reenter=[[REENTER_FRAME]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]]
return 0;
}
|
pcptdesencryptecbcaomp.c | /*******************************************************************************
* Copyright 2002-2019 Intel Corporation
* All Rights Reserved.
*
* If this software was obtained under the Intel Simplified Software License,
* the following terms apply:
*
* The source code, information and material ("Material") contained herein is
* owned by Intel Corporation or its suppliers or licensors, and title to such
* Material remains with Intel Corporation or its suppliers or licensors. The
* Material contains proprietary information of Intel or its suppliers and
* licensors. The Material is protected by worldwide copyright laws and treaty
* provisions. No part of the Material may be used, copied, reproduced,
* modified, published, uploaded, posted, transmitted, distributed or disclosed
* in any way without Intel's prior express written permission. No license under
* any patent, copyright or other intellectual property rights in the Material
* is granted to or conferred upon you, either expressly, by implication,
* inducement, estoppel or otherwise. Any license under such intellectual
* property rights must be express and approved by Intel in writing.
*
* Unless otherwise agreed by Intel in writing, you may not remove or alter this
* notice or any other notice embedded in Materials by Intel or Intel's
* suppliers or licensors in any way.
*
*
* If this software was obtained under the Apache License, Version 2.0 (the
* "License"), the following terms apply:
*
* You may not use this file except in compliance with the License. You may
* obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/*
// Name:
// ippsTDESEncryptECB
//
// Purpose:
// Cryptography Primitives.
// Encrypt byte data stream according to TDES.
//
//
*/
#include "owndefs.h"
#if defined ( _OPENMP )
#include "owncp.h"
#include "pcpdes.h"
#include "pcptool.h"
#include "omp.h"
/*F*
// Name:
// ippsTDESEncryptECB
//
// Purpose:
// Encrypt byte data stream according to TDES in ECB mode
// using OpenMP API.
//
// Returns:
// ippStsNoErr No errors, it's OK.
// ippStsNullPtrErr ( pCtx1 == NULL ) || ( pCtx2 == NULL ) ||
// ( pCtx3 == NULL ) || ( pSrc == NULL ) ||
// ( pDst == NULL )
// ippStsLengthErr srcLen < 1
// ippStsContextMatchErr ( pCtx1->idCtx != idCtxDES ) ||
// ( pCtx2->idCtx != idCtxDES ) ||
// ( pCtx3->idCtx != idCtxDES )
// ippStsUnderRunErr srcLen % 8
//
// Parameters:
// pSrc Pointer to the input ciphertext byte data stream.
// pDst Pointer to the output plaintext byte data stream.
// srcLen Ciphertext data stream length in bytes.
// pCtx1 Pointer to the IppsDESSpec context.
// pCtx2 Pointer to the IppsDESSpec context.
// pCtx3 Pointer to the IppsDESSpec context.
// padding Padding scheme indicator
*F*/
static
void TDES_EncECB_processing(const Ipp8u* pSrc, Ipp8u* pDst, int nBlocks,
const RoundKeyDES* pRKey[3])
{
/*
// encrypt block-by-block aligned streams
*/
if( !(IPP_UINT_PTR(pSrc) & 0x7) && !(IPP_UINT_PTR(pDst) & 0x7)) {
ECB_TDES((const Ipp64u*)pSrc, (Ipp64u*)pDst, nBlocks, pRKey, DESspbox);
}
/*
// encrypt block-by-block misaligned streams
*/
else {
Ipp64u block;
while(nBlocks) {
CopyBlock8(pSrc, &block);
block = Cipher_DES(block, pRKey[0], DESspbox);
block = Cipher_DES(block, pRKey[1], DESspbox);
block = Cipher_DES(block, pRKey[2], DESspbox);
CopyBlock8(&block, pDst);
pSrc += MBS_DES;
pDst += MBS_DES;
nBlocks--;
}
}
}
IPPFUN(IppStatus, ippsTDESEncryptECB,(const Ipp8u* pSrc, Ipp8u* pDst, int srcLen,
const IppsDESSpec* pCtx1,
const IppsDESSpec* pCtx2,
const IppsDESSpec* pCtx3,
IppsPadding padding))
{
/* test the pointers */
IPP_BAD_PTR2_RET(pSrc, pDst);
IPP_BAD_PTR3_RET(pCtx1, pCtx2, pCtx3);
/* align the contexts */
pCtx1 = (IppsDESSpec*)(IPP_ALIGNED_PTR(pCtx1, DES_ALIGNMENT));
pCtx2 = (IppsDESSpec*)(IPP_ALIGNED_PTR(pCtx2, DES_ALIGNMENT));
pCtx3 = (IppsDESSpec*)(IPP_ALIGNED_PTR(pCtx3, DES_ALIGNMENT));
/* test the contexts */
IPP_BADARG_RET(!DES_ID_TEST(pCtx1), ippStsContextMatchErr);
IPP_BADARG_RET(!DES_ID_TEST(pCtx2), ippStsContextMatchErr);
IPP_BADARG_RET(!DES_ID_TEST(pCtx3), ippStsContextMatchErr);
/* test the data stream length */
IPP_BADARG_RET((srcLen<1), ippStsLengthErr);
/* test the data stream integrity */
IPP_BADARG_RET((srcLen&(MBS_DES-1)), ippStsUnderRunErr);
IPP_UNREFERENCED_PARAMETER(padding);
{
int nBlocks = srcLen / MBS_DES;
int nThreads = IPP_MIN(IPPCP_GET_NUM_THREADS(), IPP_MAX(nBlocks/TDES_MIN_BLK_PER_THREAD, 1));
const RoundKeyDES* pRKey[3];
pRKey[0] = DES_EKEYS(pCtx1);
pRKey[1] = DES_DKEYS(pCtx2);
pRKey[2] = DES_EKEYS(pCtx3);
if(1==nThreads)
TDES_EncECB_processing(pSrc, pDst, nBlocks, pRKey);
else {
int blksThreadReg;
int blksThreadTail;
#pragma omp parallel IPPCP_OMP_LIMIT_MAX_NUM_THREADS(nThreads)
{
#pragma omp master
{
nThreads = omp_get_num_threads();
blksThreadReg = nBlocks / nThreads;
blksThreadTail = blksThreadReg + nBlocks % nThreads;
}
#pragma omp barrier
{
int id = omp_get_thread_num();
Ipp8u* pThreadSrc = (Ipp8u*)pSrc + id*blksThreadReg * MBS_DES;
Ipp8u* pThreadDst = (Ipp8u*)pDst + id*blksThreadReg * MBS_DES;
int blkThread = (id==(nThreads-1))? blksThreadTail : blksThreadReg;
TDES_EncECB_processing(pThreadSrc, pThreadDst, blkThread, pRKey);
}
}
}
return ippStsNoErr;
}
}
#endif /* #ifdef _OPENMP */
|
implicit_blender.c | /*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) Blender Foundation
* All rights reserved.
*/
/** \file
* \ingroup bph
*/
#include "implicit.h"
#ifdef IMPLICIT_SOLVER_BLENDER
# include "MEM_guardedalloc.h"
# include "DNA_scene_types.h"
# include "DNA_object_types.h"
# include "DNA_object_force_types.h"
# include "DNA_meshdata_types.h"
# include "DNA_texture_types.h"
# include "BLI_math.h"
# include "BLI_utildefines.h"
# include "BKE_cloth.h"
# include "BKE_collision.h"
# include "BKE_effect.h"
# include "BPH_mass_spring.h"
# ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wtype-limits"
# endif
# ifdef _OPENMP
# define CLOTH_OPENMP_LIMIT 512
# endif
//#define DEBUG_TIME
# ifdef DEBUG_TIME
# include "PIL_time.h"
# endif
static float I[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}};
static float ZERO[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}};
# if 0
# define C99
# ifdef C99
# defineDO_INLINE inline
# else
# defineDO_INLINE static
# endif
# endif /* if 0 */
struct Cloth;
//////////////////////////////////////////
/* fast vector / matrix library, enhancements are welcome :) -dg */
/////////////////////////////////////////
/* DEFINITIONS */
typedef float lfVector[3];
typedef struct fmatrix3x3 {
float m[3][3]; /* 3x3 matrix */
unsigned int c, r; /* column and row number */
/* int pinned; // is this vertex allowed to move? */
float n1, n2, n3; /* three normal vectors for collision constrains */
unsigned int vcount; /* vertex count */
unsigned int scount; /* spring count */
} fmatrix3x3;
///////////////////////////
// float[3] vector
///////////////////////////
/* simple vector code */
/* STATUS: verified */
DO_INLINE void mul_fvector_S(float to[3], float from[3], float scalar)
{
to[0] = from[0] * scalar;
to[1] = from[1] * scalar;
to[2] = from[2] * scalar;
}
/* simple v^T * v product ("outer product") */
/* STATUS: HAS TO BE verified (*should* work) */
DO_INLINE void mul_fvectorT_fvector(float to[3][3], float vectorA[3], float vectorB[3])
{
mul_fvector_S(to[0], vectorB, vectorA[0]);
mul_fvector_S(to[1], vectorB, vectorA[1]);
mul_fvector_S(to[2], vectorB, vectorA[2]);
}
/* simple v^T * v product with scalar ("outer product") */
/* STATUS: HAS TO BE verified (*should* work) */
DO_INLINE void mul_fvectorT_fvectorS(float to[3][3], float vectorA[3], float vectorB[3], float aS)
{
mul_fvectorT_fvector(to, vectorA, vectorB);
mul_fvector_S(to[0], to[0], aS);
mul_fvector_S(to[1], to[1], aS);
mul_fvector_S(to[2], to[2], aS);
}
# if 0
/* printf vector[3] on console: for debug output */
static void print_fvector(float m3[3])
{
printf("%f\n%f\n%f\n\n", m3[0], m3[1], m3[2]);
}
///////////////////////////
// long float vector float (*)[3]
///////////////////////////
/* print long vector on console: for debug output */
DO_INLINE void print_lfvector(float (*fLongVector)[3], unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
print_fvector(fLongVector[i]);
}
}
# endif
/* create long vector */
DO_INLINE lfVector *create_lfvector(unsigned int verts)
{
/* TODO: check if memory allocation was successful */
return (lfVector *)MEM_callocN(verts * sizeof(lfVector), "cloth_implicit_alloc_vector");
// return (lfVector *)cloth_aligned_malloc(&MEMORY_BASE, verts * sizeof(lfVector));
}
/* delete long vector */
DO_INLINE void del_lfvector(float (*fLongVector)[3])
{
if (fLongVector != NULL) {
MEM_freeN(fLongVector);
// cloth_aligned_free(&MEMORY_BASE, fLongVector);
}
}
/* copy long vector */
DO_INLINE void cp_lfvector(float (*to)[3], float (*from)[3], unsigned int verts)
{
memcpy(to, from, verts * sizeof(lfVector));
}
/* init long vector with float[3] */
DO_INLINE void init_lfvector(float (*fLongVector)[3], float vector[3], unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
copy_v3_v3(fLongVector[i], vector);
}
}
/* zero long vector with float[3] */
DO_INLINE void zero_lfvector(float (*to)[3], unsigned int verts)
{
memset(to, 0.0f, verts * sizeof(lfVector));
}
/* multiply long vector with scalar*/
DO_INLINE void mul_lfvectorS(float (*to)[3],
float (*fLongVector)[3],
float scalar,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
mul_fvector_S(to[i], fLongVector[i], scalar);
}
}
/* multiply long vector with scalar*/
/* A -= B * float */
DO_INLINE void submul_lfvectorS(float (*to)[3],
float (*fLongVector)[3],
float scalar,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
VECSUBMUL(to[i], fLongVector[i], scalar);
}
}
/* dot product for big vector */
DO_INLINE float dot_lfvector(float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
unsigned int verts)
{
long i = 0;
float temp = 0.0;
// XXX brecht, disabled this for now (first schedule line was already disabled),
// due to non-commutative nature of floating point ops this makes the sim give
// different results each time you run it!
// schedule(guided, 2)
//#pragma omp parallel for reduction(+: temp) if (verts > CLOTH_OPENMP_LIMIT)
for (i = 0; i < (long)verts; i++) {
temp += dot_v3v3(fLongVectorA[i], fLongVectorB[i]);
}
return temp;
}
/* A = B + C --> for big vector */
DO_INLINE void add_lfvector_lfvector(float (*to)[3],
float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
add_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]);
}
}
/* A = B + C * float --> for big vector */
DO_INLINE void add_lfvector_lfvectorS(float (*to)[3],
float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
float bS,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
VECADDS(to[i], fLongVectorA[i], fLongVectorB[i], bS);
}
}
/* A = B * float + C * float --> for big vector */
DO_INLINE void add_lfvectorS_lfvectorS(float (*to)[3],
float (*fLongVectorA)[3],
float aS,
float (*fLongVectorB)[3],
float bS,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
VECADDSS(to[i], fLongVectorA[i], aS, fLongVectorB[i], bS);
}
}
/* A = B - C * float --> for big vector */
DO_INLINE void sub_lfvector_lfvectorS(float (*to)[3],
float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
float bS,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
VECSUBS(to[i], fLongVectorA[i], fLongVectorB[i], bS);
}
}
/* A = B - C --> for big vector */
DO_INLINE void sub_lfvector_lfvector(float (*to)[3],
float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
sub_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]);
}
}
///////////////////////////
// 3x3 matrix
///////////////////////////
# if 0
/* printf 3x3 matrix on console: for debug output */
static void print_fmatrix(float m3[3][3])
{
printf("%f\t%f\t%f\n", m3[0][0], m3[0][1], m3[0][2]);
printf("%f\t%f\t%f\n", m3[1][0], m3[1][1], m3[1][2]);
printf("%f\t%f\t%f\n\n", m3[2][0], m3[2][1], m3[2][2]);
}
static void print_sparse_matrix(fmatrix3x3 *m)
{
if (m) {
unsigned int i;
for (i = 0; i < m[0].vcount + m[0].scount; i++) {
printf("%d:\n", i);
print_fmatrix(m[i].m);
}
}
}
# endif
# if 0
static void print_lvector(lfVector *v, int numverts)
{
int i;
for (i = 0; i < numverts; ++i) {
if (i > 0) {
printf("\n");
}
printf("%f,\n", v[i][0]);
printf("%f,\n", v[i][1]);
printf("%f,\n", v[i][2]);
}
}
# endif
# if 0
static void print_bfmatrix(fmatrix3x3 *m)
{
int tot = m[0].vcount + m[0].scount;
int size = m[0].vcount * 3;
float *t = MEM_callocN(sizeof(float) * size * size, "bfmatrix");
int q, i, j;
for (q = 0; q < tot; ++q) {
int k = 3 * m[q].r;
int l = 3 * m[q].c;
for (j = 0; j < 3; ++j) {
for (i = 0; i < 3; ++i) {
// if (t[k + i + (l + j) * size] != 0.0f) {
// printf("warning: overwriting value at %d, %d\n", m[q].r, m[q].c);
// }
if (k == l) {
t[k + i + (k + j) * size] += m[q].m[i][j];
}
else {
t[k + i + (l + j) * size] += m[q].m[i][j];
t[l + j + (k + i) * size] += m[q].m[j][i];
}
}
}
}
for (j = 0; j < size; ++j) {
if (j > 0 && j % 3 == 0) {
printf("\n");
}
for (i = 0; i < size; ++i) {
if (i > 0 && i % 3 == 0) {
printf(" ");
}
implicit_print_matrix_elem(t[i + j * size]);
}
printf("\n");
}
MEM_freeN(t);
}
# endif
/* copy 3x3 matrix */
DO_INLINE void cp_fmatrix(float to[3][3], float from[3][3])
{
// memcpy(to, from, sizeof (float) * 9);
copy_v3_v3(to[0], from[0]);
copy_v3_v3(to[1], from[1]);
copy_v3_v3(to[2], from[2]);
}
/* copy 3x3 matrix */
DO_INLINE void initdiag_fmatrixS(float to[3][3], float aS)
{
cp_fmatrix(to, ZERO);
to[0][0] = aS;
to[1][1] = aS;
to[2][2] = aS;
}
# if 0
/* calculate determinant of 3x3 matrix */
DO_INLINE float det_fmatrix(float m[3][3])
{
return m[0][0] * m[1][1] * m[2][2] + m[1][0] * m[2][1] * m[0][2] + m[0][1] * m[1][2] * m[2][0] -
m[0][0] * m[1][2] * m[2][1] - m[0][1] * m[1][0] * m[2][2] - m[2][0] * m[1][1] * m[0][2];
}
DO_INLINE void inverse_fmatrix(float to[3][3], float from[3][3])
{
unsigned int i, j;
float d;
if ((d = det_fmatrix(from)) == 0) {
printf("can't build inverse");
exit(0);
}
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
int i1 = (i + 1) % 3;
int i2 = (i + 2) % 3;
int j1 = (j + 1) % 3;
int j2 = (j + 2) % 3;
/** Reverse indexes i&j to take transpose. */
to[j][i] = (from[i1][j1] * from[i2][j2] - from[i1][j2] * from[i2][j1]) / d;
/**
* <pre>
* if (i == j) {
* to[i][j] = 1.0f / from[i][j];
* }
* else {
* to[i][j] = 0;
* }
* </pre>
*/
}
}
}
# endif
/* 3x3 matrix multiplied by a scalar */
/* STATUS: verified */
DO_INLINE void mul_fmatrix_S(float matrix[3][3], float scalar)
{
mul_fvector_S(matrix[0], matrix[0], scalar);
mul_fvector_S(matrix[1], matrix[1], scalar);
mul_fvector_S(matrix[2], matrix[2], scalar);
}
/* a vector multiplied by a 3x3 matrix */
/* STATUS: verified */
DO_INLINE void mul_fvector_fmatrix(float *to, float *from, float matrix[3][3])
{
to[0] = matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2];
to[1] = matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2];
to[2] = matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2];
}
/* 3x3 matrix multiplied by a vector */
/* STATUS: verified */
DO_INLINE void mul_fmatrix_fvector(float *to, float matrix[3][3], float from[3])
{
to[0] = dot_v3v3(matrix[0], from);
to[1] = dot_v3v3(matrix[1], from);
to[2] = dot_v3v3(matrix[2], from);
}
/* 3x3 matrix addition with 3x3 matrix */
DO_INLINE void add_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3])
{
add_v3_v3v3(to[0], matrixA[0], matrixB[0]);
add_v3_v3v3(to[1], matrixA[1], matrixB[1]);
add_v3_v3v3(to[2], matrixA[2], matrixB[2]);
}
/* A -= B*x + C*y (3x3 matrix sub-addition with 3x3 matrix) */
DO_INLINE void subadd_fmatrixS_fmatrixS(
float to[3][3], float matrixA[3][3], float aS, float matrixB[3][3], float bS)
{
VECSUBADDSS(to[0], matrixA[0], aS, matrixB[0], bS);
VECSUBADDSS(to[1], matrixA[1], aS, matrixB[1], bS);
VECSUBADDSS(to[2], matrixA[2], aS, matrixB[2], bS);
}
/* A = B - C (3x3 matrix subtraction with 3x3 matrix) */
DO_INLINE void sub_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3])
{
sub_v3_v3v3(to[0], matrixA[0], matrixB[0]);
sub_v3_v3v3(to[1], matrixA[1], matrixB[1]);
sub_v3_v3v3(to[2], matrixA[2], matrixB[2]);
}
/////////////////////////////////////////////////////////////////
// special functions
/////////////////////////////////////////////////////////////////
/* 3x3 matrix multiplied+added by a vector */
/* STATUS: verified */
DO_INLINE void muladd_fmatrix_fvector(float to[3], float matrix[3][3], float from[3])
{
to[0] += dot_v3v3(matrix[0], from);
to[1] += dot_v3v3(matrix[1], from);
to[2] += dot_v3v3(matrix[2], from);
}
DO_INLINE void muladd_fmatrixT_fvector(float to[3], float matrix[3][3], float from[3])
{
to[0] += matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2];
to[1] += matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2];
to[2] += matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2];
}
BLI_INLINE void outerproduct(float r[3][3], const float a[3], const float b[3])
{
mul_v3_v3fl(r[0], a, b[0]);
mul_v3_v3fl(r[1], a, b[1]);
mul_v3_v3fl(r[2], a, b[2]);
}
BLI_INLINE void cross_m3_v3m3(float r[3][3], const float v[3], float m[3][3])
{
cross_v3_v3v3(r[0], v, m[0]);
cross_v3_v3v3(r[1], v, m[1]);
cross_v3_v3v3(r[2], v, m[2]);
}
BLI_INLINE void cross_v3_identity(float r[3][3], const float v[3])
{
r[0][0] = 0.0f;
r[1][0] = v[2];
r[2][0] = -v[1];
r[0][1] = -v[2];
r[1][1] = 0.0f;
r[2][1] = v[0];
r[0][2] = v[1];
r[1][2] = -v[0];
r[2][2] = 0.0f;
}
BLI_INLINE void madd_m3_m3fl(float r[3][3], float m[3][3], float f)
{
r[0][0] += m[0][0] * f;
r[0][1] += m[0][1] * f;
r[0][2] += m[0][2] * f;
r[1][0] += m[1][0] * f;
r[1][1] += m[1][1] * f;
r[1][2] += m[1][2] * f;
r[2][0] += m[2][0] * f;
r[2][1] += m[2][1] * f;
r[2][2] += m[2][2] * f;
}
/////////////////////////////////////////////////////////////////
///////////////////////////
// SPARSE SYMMETRIC big matrix with 3x3 matrix entries
///////////////////////////
/* printf a big matrix on console: for debug output */
# if 0
static void print_bfmatrix(fmatrix3x3 *m3)
{
unsigned int i = 0;
for (i = 0; i < m3[0].vcount + m3[0].scount; i++) {
print_fmatrix(m3[i].m);
}
}
# endif
BLI_INLINE void init_fmatrix(fmatrix3x3 *matrix, int r, int c)
{
matrix->r = r;
matrix->c = c;
}
/* create big matrix */
DO_INLINE fmatrix3x3 *create_bfmatrix(unsigned int verts, unsigned int springs)
{
// TODO: check if memory allocation was successful */
fmatrix3x3 *temp = (fmatrix3x3 *)MEM_callocN(sizeof(fmatrix3x3) * (verts + springs),
"cloth_implicit_alloc_matrix");
int i;
temp[0].vcount = verts;
temp[0].scount = springs;
/* vertex part of the matrix is diagonal blocks */
for (i = 0; i < verts; ++i) {
init_fmatrix(temp + i, i, i);
}
return temp;
}
/* delete big matrix */
DO_INLINE void del_bfmatrix(fmatrix3x3 *matrix)
{
if (matrix != NULL) {
MEM_freeN(matrix);
}
}
/* copy big matrix */
DO_INLINE void cp_bfmatrix(fmatrix3x3 *to, fmatrix3x3 *from)
{
// TODO bounds checking
memcpy(to, from, sizeof(fmatrix3x3) * (from[0].vcount + from[0].scount));
}
/* init big matrix */
// slow in parallel
DO_INLINE void init_bfmatrix(fmatrix3x3 *matrix, float m3[3][3])
{
unsigned int i;
for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) {
cp_fmatrix(matrix[i].m, m3);
}
}
/* init the diagonal of big matrix */
// slow in parallel
DO_INLINE void initdiag_bfmatrix(fmatrix3x3 *matrix, float m3[3][3])
{
unsigned int i, j;
float tmatrix[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}};
for (i = 0; i < matrix[0].vcount; i++) {
cp_fmatrix(matrix[i].m, m3);
}
for (j = matrix[0].vcount; j < matrix[0].vcount + matrix[0].scount; j++) {
cp_fmatrix(matrix[j].m, tmatrix);
}
}
/* SPARSE SYMMETRIC multiply big matrix with long vector*/
/* STATUS: verified */
DO_INLINE void mul_bfmatrix_lfvector(float (*to)[3], fmatrix3x3 *from, lfVector *fLongVector)
{
unsigned int vcount = from[0].vcount;
lfVector *temp = create_lfvector(vcount);
zero_lfvector(to, vcount);
# pragma omp parallel sections if (vcount > CLOTH_OPENMP_LIMIT)
{
# pragma omp section
{
for (unsigned int i = from[0].vcount; i < from[0].vcount + from[0].scount; i++) {
/* This is the lower triangle of the sparse matrix,
* therefore multiplication occurs with transposed submatrices. */
muladd_fmatrixT_fvector(to[from[i].c], from[i].m, fLongVector[from[i].r]);
}
}
# pragma omp section
{
for (unsigned int i = 0; i < from[0].vcount + from[0].scount; i++) {
muladd_fmatrix_fvector(temp[from[i].r], from[i].m, fLongVector[from[i].c]);
}
}
}
add_lfvector_lfvector(to, to, temp, from[0].vcount);
del_lfvector(temp);
}
/* SPARSE SYMMETRIC sub big matrix with big matrix*/
/* A -= B * float + C * float --> for big matrix */
/* VERIFIED */
DO_INLINE void subadd_bfmatrixS_bfmatrixS(
fmatrix3x3 *to, fmatrix3x3 *from, float aS, fmatrix3x3 *matrix, float bS)
{
unsigned int i = 0;
/* process diagonal elements */
for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) {
subadd_fmatrixS_fmatrixS(to[i].m, from[i].m, aS, matrix[i].m, bS);
}
}
///////////////////////////////////////////////////////////////////
// simulator start
///////////////////////////////////////////////////////////////////
typedef struct Implicit_Data {
/* inputs */
fmatrix3x3 *bigI; /* identity (constant) */
fmatrix3x3 *tfm; /* local coordinate transform */
fmatrix3x3 *M; /* masses */
lfVector *F; /* forces */
fmatrix3x3 *dFdV, *dFdX; /* force jacobians */
int num_blocks; /* number of off-diagonal blocks (springs) */
/* motion state data */
lfVector *X, *Xnew; /* positions */
lfVector *V, *Vnew; /* velocities */
/* internal solver data */
lfVector *B; /* B for A*dV = B */
fmatrix3x3 *A; /* A for A*dV = B */
lfVector *dV; /* velocity change (solution of A*dV = B) */
lfVector *z; /* target velocity in constrained directions */
fmatrix3x3 *S; /* filtering matrix for constraints */
fmatrix3x3 *P, *Pinv; /* pre-conditioning matrix */
} Implicit_Data;
Implicit_Data *BPH_mass_spring_solver_create(int numverts, int numsprings)
{
Implicit_Data *id = (Implicit_Data *)MEM_callocN(sizeof(Implicit_Data), "implicit vecmat");
/* process diagonal elements */
id->tfm = create_bfmatrix(numverts, 0);
id->A = create_bfmatrix(numverts, numsprings);
id->dFdV = create_bfmatrix(numverts, numsprings);
id->dFdX = create_bfmatrix(numverts, numsprings);
id->S = create_bfmatrix(numverts, 0);
id->Pinv = create_bfmatrix(numverts, numsprings);
id->P = create_bfmatrix(numverts, numsprings);
id->bigI = create_bfmatrix(numverts, numsprings); // TODO 0 springs
id->M = create_bfmatrix(numverts, numsprings);
id->X = create_lfvector(numverts);
id->Xnew = create_lfvector(numverts);
id->V = create_lfvector(numverts);
id->Vnew = create_lfvector(numverts);
id->F = create_lfvector(numverts);
id->B = create_lfvector(numverts);
id->dV = create_lfvector(numverts);
id->z = create_lfvector(numverts);
initdiag_bfmatrix(id->bigI, I);
return id;
}
void BPH_mass_spring_solver_free(Implicit_Data *id)
{
del_bfmatrix(id->tfm);
del_bfmatrix(id->A);
del_bfmatrix(id->dFdV);
del_bfmatrix(id->dFdX);
del_bfmatrix(id->S);
del_bfmatrix(id->P);
del_bfmatrix(id->Pinv);
del_bfmatrix(id->bigI);
del_bfmatrix(id->M);
del_lfvector(id->X);
del_lfvector(id->Xnew);
del_lfvector(id->V);
del_lfvector(id->Vnew);
del_lfvector(id->F);
del_lfvector(id->B);
del_lfvector(id->dV);
del_lfvector(id->z);
MEM_freeN(id);
}
/* ==== Transformation from/to root reference frames ==== */
BLI_INLINE void world_to_root_v3(Implicit_Data *data, int index, float r[3], const float v[3])
{
copy_v3_v3(r, v);
mul_transposed_m3_v3(data->tfm[index].m, r);
}
BLI_INLINE void root_to_world_v3(Implicit_Data *data, int index, float r[3], const float v[3])
{
mul_v3_m3v3(r, data->tfm[index].m, v);
}
BLI_INLINE void world_to_root_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3])
{
float trot[3][3];
copy_m3_m3(trot, data->tfm[index].m);
transpose_m3(trot);
mul_m3_m3m3(r, trot, m);
}
BLI_INLINE void root_to_world_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3])
{
mul_m3_m3m3(r, data->tfm[index].m, m);
}
/* ================================ */
DO_INLINE void filter(lfVector *V, fmatrix3x3 *S)
{
unsigned int i = 0;
for (i = 0; i < S[0].vcount; i++) {
mul_m3_v3(S[i].m, V[S[i].r]);
}
}
/* this version of the CG algorithm does not work very well with partial constraints
* (where S has non-zero elements). */
# if 0
static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S)
{
// Solves for unknown X in equation AX=B
unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100;
float conjgrad_epsilon = 0.0001f /* , conjgrad_lasterror=0 */ /* UNUSED */;
lfVector *q, *d, *tmp, *r;
float s, starget, a, s_prev;
unsigned int numverts = lA[0].vcount;
q = create_lfvector(numverts);
d = create_lfvector(numverts);
tmp = create_lfvector(numverts);
r = create_lfvector(numverts);
// zero_lfvector(ldV, CLOTHPARTICLES);
filter(ldV, S);
add_lfvector_lfvector(ldV, ldV, z, numverts);
// r = B - Mul(tmp, A, X); // just use B if X known to be zero
cp_lfvector(r, lB, numverts);
mul_bfmatrix_lfvector(tmp, lA, ldV);
sub_lfvector_lfvector(r, r, tmp, numverts);
filter(r, S);
cp_lfvector(d, r, numverts);
s = dot_lfvector(r, r, numverts);
starget = s * sqrtf(conjgrad_epsilon);
while (s > starget && conjgrad_loopcount < conjgrad_looplimit) {
// Mul(q, A, d); // q = A*d;
mul_bfmatrix_lfvector(q, lA, d);
filter(q, S);
a = s / dot_lfvector(d, q, numverts);
// X = X + d*a;
add_lfvector_lfvectorS(ldV, ldV, d, a, numverts);
// r = r - q*a;
sub_lfvector_lfvectorS(r, r, q, a, numverts);
s_prev = s;
s = dot_lfvector(r, r, numverts);
//d = r+d*(s/s_prev);
add_lfvector_lfvectorS(d, r, d, (s / s_prev), numverts);
filter(d, S);
conjgrad_loopcount++;
}
/* conjgrad_lasterror = s; */ /* UNUSED */
del_lfvector(q);
del_lfvector(d);
del_lfvector(tmp);
del_lfvector(r);
// printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount);
return conjgrad_loopcount <
conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable
}
# endif
static int cg_filtered(lfVector *ldV,
fmatrix3x3 *lA,
lfVector *lB,
lfVector *z,
fmatrix3x3 *S,
ImplicitSolverResult *result)
{
// Solves for unknown X in equation AX=B
unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100;
float conjgrad_epsilon = 0.01f;
unsigned int numverts = lA[0].vcount;
lfVector *fB = create_lfvector(numverts);
lfVector *AdV = create_lfvector(numverts);
lfVector *r = create_lfvector(numverts);
lfVector *c = create_lfvector(numverts);
lfVector *q = create_lfvector(numverts);
lfVector *s = create_lfvector(numverts);
float bnorm2, delta_new, delta_old, delta_target, alpha;
cp_lfvector(ldV, z, numverts);
/* d0 = filter(B)^T * P * filter(B) */
cp_lfvector(fB, lB, numverts);
filter(fB, S);
bnorm2 = dot_lfvector(fB, fB, numverts);
delta_target = conjgrad_epsilon * conjgrad_epsilon * bnorm2;
/* r = filter(B - A * dV) */
mul_bfmatrix_lfvector(AdV, lA, ldV);
sub_lfvector_lfvector(r, lB, AdV, numverts);
filter(r, S);
/* c = filter(P^-1 * r) */
cp_lfvector(c, r, numverts);
filter(c, S);
/* delta = r^T * c */
delta_new = dot_lfvector(r, c, numverts);
# ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT
printf("==== A ====\n");
print_bfmatrix(lA);
printf("==== z ====\n");
print_lvector(z, numverts);
printf("==== B ====\n");
print_lvector(lB, numverts);
printf("==== S ====\n");
print_bfmatrix(S);
# endif
while (delta_new > delta_target && conjgrad_loopcount < conjgrad_looplimit) {
mul_bfmatrix_lfvector(q, lA, c);
filter(q, S);
alpha = delta_new / dot_lfvector(c, q, numverts);
add_lfvector_lfvectorS(ldV, ldV, c, alpha, numverts);
add_lfvector_lfvectorS(r, r, q, -alpha, numverts);
/* s = P^-1 * r */
cp_lfvector(s, r, numverts);
delta_old = delta_new;
delta_new = dot_lfvector(r, s, numverts);
add_lfvector_lfvectorS(c, s, c, delta_new / delta_old, numverts);
filter(c, S);
conjgrad_loopcount++;
}
# ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT
printf("==== dV ====\n");
print_lvector(ldV, numverts);
printf("========\n");
# endif
del_lfvector(fB);
del_lfvector(AdV);
del_lfvector(r);
del_lfvector(c);
del_lfvector(q);
del_lfvector(s);
// printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount);
result->status = conjgrad_loopcount < conjgrad_looplimit ? BPH_SOLVER_SUCCESS :
BPH_SOLVER_NO_CONVERGENCE;
result->iterations = conjgrad_loopcount;
result->error = bnorm2 > 0.0f ? sqrtf(delta_new / bnorm2) : 0.0f;
return conjgrad_loopcount <
conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable
}
# if 0
// block diagonalizer
DO_INLINE void BuildPPinv(fmatrix3x3 *lA, fmatrix3x3 *P, fmatrix3x3 *Pinv)
{
unsigned int i = 0;
// Take only the diagonal blocks of A
// #pragma omp parallel for private(i) if (lA[0].vcount > CLOTH_OPENMP_LIMIT)
for (i = 0; i < lA[0].vcount; i++) {
// block diagonalizer
cp_fmatrix(P[i].m, lA[i].m);
inverse_fmatrix(Pinv[i].m, P[i].m);
}
}
# if 0
// version 1.3
static int cg_filtered_pre(lfVector *dv,
fmatrix3x3 *lA,
lfVector *lB,
lfVector *z,
fmatrix3x3 *S,
fmatrix3x3 *P,
fmatrix3x3 *Pinv)
{
unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100;
float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0;
float conjgrad_epsilon = 0.0001; // 0.2 is dt for steps=5
lfVector *r = create_lfvector(numverts);
lfVector *p = create_lfvector(numverts);
lfVector *s = create_lfvector(numverts);
lfVector *h = create_lfvector(numverts);
BuildPPinv(lA, P, Pinv);
filter(dv, S);
add_lfvector_lfvector(dv, dv, z, numverts);
mul_bfmatrix_lfvector(r, lA, dv);
sub_lfvector_lfvector(r, lB, r, numverts);
filter(r, S);
mul_prevfmatrix_lfvector(p, Pinv, r);
filter(p, S);
deltaNew = dot_lfvector(r, p, numverts);
delta0 = deltaNew * sqrt(conjgrad_epsilon);
# ifdef DEBUG_TIME
double start = PIL_check_seconds_timer();
# endif
while ((deltaNew > delta0) && (iterations < conjgrad_looplimit)) {
iterations++;
mul_bfmatrix_lfvector(s, lA, p);
filter(s, S);
alpha = deltaNew / dot_lfvector(p, s, numverts);
add_lfvector_lfvectorS(dv, dv, p, alpha, numverts);
add_lfvector_lfvectorS(r, r, s, -alpha, numverts);
mul_prevfmatrix_lfvector(h, Pinv, r);
filter(h, S);
deltaOld = deltaNew;
deltaNew = dot_lfvector(r, h, numverts);
add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts);
filter(p, S);
}
# ifdef DEBUG_TIME
double end = PIL_check_seconds_timer();
printf("cg_filtered_pre time: %f\n", (float)(end - start));
# endif
del_lfvector(h);
del_lfvector(s);
del_lfvector(p);
del_lfvector(r);
printf("iterations: %d\n", iterations);
return iterations < conjgrad_looplimit;
}
# endif
// version 1.4
static int cg_filtered_pre(lfVector *dv,
fmatrix3x3 *lA,
lfVector *lB,
lfVector *z,
fmatrix3x3 *S,
fmatrix3x3 *P,
fmatrix3x3 *Pinv,
fmatrix3x3 *bigI)
{
unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100;
float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0, tol = 0;
lfVector *r = create_lfvector(numverts);
lfVector *p = create_lfvector(numverts);
lfVector *s = create_lfvector(numverts);
lfVector *h = create_lfvector(numverts);
lfVector *bhat = create_lfvector(numverts);
lfVector *btemp = create_lfvector(numverts);
BuildPPinv(lA, P, Pinv);
initdiag_bfmatrix(bigI, I);
sub_bfmatrix_Smatrix(bigI, bigI, S);
// x = Sx_0+(I-S)z
filter(dv, S);
add_lfvector_lfvector(dv, dv, z, numverts);
// b_hat = S(b-A(I-S)z)
mul_bfmatrix_lfvector(r, lA, z);
mul_bfmatrix_lfvector(bhat, bigI, r);
sub_lfvector_lfvector(bhat, lB, bhat, numverts);
// r = S(b-Ax)
mul_bfmatrix_lfvector(r, lA, dv);
sub_lfvector_lfvector(r, lB, r, numverts);
filter(r, S);
// p = SP^-1r
mul_prevfmatrix_lfvector(p, Pinv, r);
filter(p, S);
// delta0 = bhat^TP^-1bhat
mul_prevfmatrix_lfvector(btemp, Pinv, bhat);
delta0 = dot_lfvector(bhat, btemp, numverts);
// deltaNew = r^TP
deltaNew = dot_lfvector(r, p, numverts);
# if 0
filter(dv, S);
add_lfvector_lfvector(dv, dv, z, numverts);
mul_bfmatrix_lfvector(r, lA, dv);
sub_lfvector_lfvector(r, lB, r, numverts);
filter(r, S);
mul_prevfmatrix_lfvector(p, Pinv, r);
filter(p, S);
deltaNew = dot_lfvector(r, p, numverts);
delta0 = deltaNew * sqrt(conjgrad_epsilon);
# endif
# ifdef DEBUG_TIME
double start = PIL_check_seconds_timer();
# endif
tol = (0.01 * 0.2);
while ((deltaNew > delta0 * tol * tol) && (iterations < conjgrad_looplimit)) {
iterations++;
mul_bfmatrix_lfvector(s, lA, p);
filter(s, S);
alpha = deltaNew / dot_lfvector(p, s, numverts);
add_lfvector_lfvectorS(dv, dv, p, alpha, numverts);
add_lfvector_lfvectorS(r, r, s, -alpha, numverts);
mul_prevfmatrix_lfvector(h, Pinv, r);
filter(h, S);
deltaOld = deltaNew;
deltaNew = dot_lfvector(r, h, numverts);
add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts);
filter(p, S);
}
# ifdef DEBUG_TIME
double end = PIL_check_seconds_timer();
printf("cg_filtered_pre time: %f\n", (float)(end - start));
# endif
del_lfvector(btemp);
del_lfvector(bhat);
del_lfvector(h);
del_lfvector(s);
del_lfvector(p);
del_lfvector(r);
// printf("iterations: %d\n", iterations);
return iterations < conjgrad_looplimit;
}
# endif
bool BPH_mass_spring_solve_velocities(Implicit_Data *data, float dt, ImplicitSolverResult *result)
{
unsigned int numverts = data->dFdV[0].vcount;
lfVector *dFdXmV = create_lfvector(numverts);
zero_lfvector(data->dV, numverts);
cp_bfmatrix(data->A, data->M);
subadd_bfmatrixS_bfmatrixS(data->A, data->dFdV, dt, data->dFdX, (dt * dt));
mul_bfmatrix_lfvector(dFdXmV, data->dFdX, data->V);
add_lfvectorS_lfvectorS(data->B, data->F, dt, dFdXmV, (dt * dt), numverts);
# ifdef DEBUG_TIME
double start = PIL_check_seconds_timer();
# endif
/* Conjugate gradient algorithm to solve Ax=b. */
cg_filtered(data->dV, data->A, data->B, data->z, data->S, result);
// cg_filtered_pre(id->dV, id->A, id->B, id->z, id->S, id->P, id->Pinv, id->bigI);
# ifdef DEBUG_TIME
double end = PIL_check_seconds_timer();
printf("cg_filtered calc time: %f\n", (float)(end - start));
# endif
// advance velocities
add_lfvector_lfvector(data->Vnew, data->V, data->dV, numverts);
del_lfvector(dFdXmV);
return result->status == BPH_SOLVER_SUCCESS;
}
bool BPH_mass_spring_solve_positions(Implicit_Data *data, float dt)
{
int numverts = data->M[0].vcount;
// advance positions
add_lfvector_lfvectorS(data->Xnew, data->X, data->Vnew, dt, numverts);
return true;
}
void BPH_mass_spring_apply_result(Implicit_Data *data)
{
int numverts = data->M[0].vcount;
cp_lfvector(data->X, data->Xnew, numverts);
cp_lfvector(data->V, data->Vnew, numverts);
}
void BPH_mass_spring_set_vertex_mass(Implicit_Data *data, int index, float mass)
{
unit_m3(data->M[index].m);
mul_m3_fl(data->M[index].m, mass);
}
void BPH_mass_spring_set_rest_transform(Implicit_Data *data, int index, float tfm[3][3])
{
# ifdef CLOTH_ROOT_FRAME
copy_m3_m3(data->tfm[index].m, tfm);
# else
unit_m3(data->tfm[index].m);
(void)tfm;
# endif
}
void BPH_mass_spring_set_motion_state(Implicit_Data *data,
int index,
const float x[3],
const float v[3])
{
world_to_root_v3(data, index, data->X[index], x);
world_to_root_v3(data, index, data->V[index], v);
}
void BPH_mass_spring_set_position(Implicit_Data *data, int index, const float x[3])
{
world_to_root_v3(data, index, data->X[index], x);
}
void BPH_mass_spring_set_velocity(Implicit_Data *data, int index, const float v[3])
{
world_to_root_v3(data, index, data->V[index], v);
}
void BPH_mass_spring_get_motion_state(struct Implicit_Data *data,
int index,
float x[3],
float v[3])
{
if (x) {
root_to_world_v3(data, index, x, data->X[index]);
}
if (v) {
root_to_world_v3(data, index, v, data->V[index]);
}
}
void BPH_mass_spring_get_position(struct Implicit_Data *data, int index, float x[3])
{
root_to_world_v3(data, index, x, data->X[index]);
}
void BPH_mass_spring_get_new_position(struct Implicit_Data *data, int index, float x[3])
{
root_to_world_v3(data, index, x, data->Xnew[index]);
}
void BPH_mass_spring_set_new_position(struct Implicit_Data *data, int index, const float x[3])
{
world_to_root_v3(data, index, data->Xnew[index], x);
}
void BPH_mass_spring_get_new_velocity(struct Implicit_Data *data, int index, float v[3])
{
root_to_world_v3(data, index, v, data->Vnew[index]);
}
void BPH_mass_spring_set_new_velocity(struct Implicit_Data *data, int index, const float v[3])
{
world_to_root_v3(data, index, data->Vnew[index], v);
}
/* -------------------------------- */
static int BPH_mass_spring_add_block(Implicit_Data *data, int v1, int v2)
{
int s = data->M[0].vcount + data->num_blocks; /* index from array start */
BLI_assert(s < data->M[0].vcount + data->M[0].scount);
++data->num_blocks;
/* tfm and S don't have spring entries (diagonal blocks only) */
init_fmatrix(data->bigI + s, v1, v2);
init_fmatrix(data->M + s, v1, v2);
init_fmatrix(data->dFdX + s, v1, v2);
init_fmatrix(data->dFdV + s, v1, v2);
init_fmatrix(data->A + s, v1, v2);
init_fmatrix(data->P + s, v1, v2);
init_fmatrix(data->Pinv + s, v1, v2);
return s;
}
void BPH_mass_spring_clear_constraints(Implicit_Data *data)
{
int i, numverts = data->S[0].vcount;
for (i = 0; i < numverts; ++i) {
unit_m3(data->S[i].m);
zero_v3(data->z[i]);
}
}
void BPH_mass_spring_add_constraint_ndof0(Implicit_Data *data, int index, const float dV[3])
{
zero_m3(data->S[index].m);
world_to_root_v3(data, index, data->z[index], dV);
}
void BPH_mass_spring_add_constraint_ndof1(
Implicit_Data *data, int index, const float c1[3], const float c2[3], const float dV[3])
{
float m[3][3], p[3], q[3], u[3], cmat[3][3];
world_to_root_v3(data, index, p, c1);
mul_fvectorT_fvector(cmat, p, p);
sub_m3_m3m3(m, I, cmat);
world_to_root_v3(data, index, q, c2);
mul_fvectorT_fvector(cmat, q, q);
sub_m3_m3m3(m, m, cmat);
/* XXX not sure but multiplication should work here */
copy_m3_m3(data->S[index].m, m);
// mul_m3_m3m3(data->S[index].m, data->S[index].m, m);
world_to_root_v3(data, index, u, dV);
add_v3_v3(data->z[index], u);
}
void BPH_mass_spring_add_constraint_ndof2(Implicit_Data *data,
int index,
const float c1[3],
const float dV[3])
{
float m[3][3], p[3], u[3], cmat[3][3];
world_to_root_v3(data, index, p, c1);
mul_fvectorT_fvector(cmat, p, p);
sub_m3_m3m3(m, I, cmat);
copy_m3_m3(data->S[index].m, m);
// mul_m3_m3m3(data->S[index].m, data->S[index].m, m);
world_to_root_v3(data, index, u, dV);
add_v3_v3(data->z[index], u);
}
void BPH_mass_spring_clear_forces(Implicit_Data *data)
{
int numverts = data->M[0].vcount;
zero_lfvector(data->F, numverts);
init_bfmatrix(data->dFdX, ZERO);
init_bfmatrix(data->dFdV, ZERO);
data->num_blocks = 0;
}
void BPH_mass_spring_force_reference_frame(Implicit_Data *data,
int index,
const float acceleration[3],
const float omega[3],
const float domega_dt[3],
float mass)
{
# ifdef CLOTH_ROOT_FRAME
float acc[3], w[3], dwdt[3];
float f[3], dfdx[3][3], dfdv[3][3];
float euler[3], coriolis[3], centrifugal[3], rotvel[3];
float deuler[3][3], dcoriolis[3][3], dcentrifugal[3][3], drotvel[3][3];
world_to_root_v3(data, index, acc, acceleration);
world_to_root_v3(data, index, w, omega);
world_to_root_v3(data, index, dwdt, domega_dt);
cross_v3_v3v3(euler, dwdt, data->X[index]);
cross_v3_v3v3(coriolis, w, data->V[index]);
mul_v3_fl(coriolis, 2.0f);
cross_v3_v3v3(rotvel, w, data->X[index]);
cross_v3_v3v3(centrifugal, w, rotvel);
sub_v3_v3v3(f, acc, euler);
sub_v3_v3(f, coriolis);
sub_v3_v3(f, centrifugal);
mul_v3_fl(f, mass); /* F = m * a */
cross_v3_identity(deuler, dwdt);
cross_v3_identity(dcoriolis, w);
mul_m3_fl(dcoriolis, 2.0f);
cross_v3_identity(drotvel, w);
cross_m3_v3m3(dcentrifugal, w, drotvel);
add_m3_m3m3(dfdx, deuler, dcentrifugal);
negate_m3(dfdx);
mul_m3_fl(dfdx, mass);
copy_m3_m3(dfdv, dcoriolis);
negate_m3(dfdv);
mul_m3_fl(dfdv, mass);
add_v3_v3(data->F[index], f);
add_m3_m3m3(data->dFdX[index].m, data->dFdX[index].m, dfdx);
add_m3_m3m3(data->dFdV[index].m, data->dFdV[index].m, dfdv);
# else
(void)data;
(void)index;
(void)acceleration;
(void)omega;
(void)domega_dt;
# endif
}
void BPH_mass_spring_force_gravity(Implicit_Data *data, int index, float mass, const float g[3])
{
/* force = mass * acceleration (in this case: gravity) */
float f[3];
world_to_root_v3(data, index, f, g);
mul_v3_fl(f, mass);
add_v3_v3(data->F[index], f);
}
void BPH_mass_spring_force_drag(Implicit_Data *data, float drag)
{
int i, numverts = data->M[0].vcount;
for (i = 0; i < numverts; i++) {
float tmp[3][3];
/* NB: uses root space velocity, no need to transform */
madd_v3_v3fl(data->F[i], data->V[i], -drag);
copy_m3_m3(tmp, I);
mul_m3_fl(tmp, -drag);
add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tmp);
}
}
void BPH_mass_spring_force_extern(
struct Implicit_Data *data, int i, const float f[3], float dfdx[3][3], float dfdv[3][3])
{
float tf[3], tdfdx[3][3], tdfdv[3][3];
world_to_root_v3(data, i, tf, f);
world_to_root_m3(data, i, tdfdx, dfdx);
world_to_root_m3(data, i, tdfdv, dfdv);
add_v3_v3(data->F[i], tf);
add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, tdfdx);
add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tdfdv);
}
static float calc_nor_area_tri(float nor[3],
const float v1[3],
const float v2[3],
const float v3[3])
{
float n1[3], n2[3];
sub_v3_v3v3(n1, v1, v2);
sub_v3_v3v3(n2, v2, v3);
cross_v3_v3v3(nor, n1, n2);
return normalize_v3(nor);
}
/* XXX does not support force jacobians yet, since the effector system does not provide them either
*/
void BPH_mass_spring_force_face_wind(
Implicit_Data *data, int v1, int v2, int v3, const float (*winvec)[3])
{
const float effector_scale = 0.02f;
float win[3], nor[3], area;
float factor;
/* calculate face normal and area */
area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]);
factor = effector_scale * area / 3.0f;
world_to_root_v3(data, v1, win, winvec[v1]);
madd_v3_v3fl(data->F[v1], nor, factor * dot_v3v3(win, nor));
world_to_root_v3(data, v2, win, winvec[v2]);
madd_v3_v3fl(data->F[v2], nor, factor * dot_v3v3(win, nor));
world_to_root_v3(data, v3, win, winvec[v3]);
madd_v3_v3fl(data->F[v3], nor, factor * dot_v3v3(win, nor));
}
static void edge_wind_vertex(const float dir[3],
float length,
float radius,
const float wind[3],
float f[3],
float UNUSED(dfdx[3][3]),
float UNUSED(dfdv[3][3]))
{
const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */
float cos_alpha, sin_alpha, cross_section;
float windlen = len_v3(wind);
if (windlen == 0.0f) {
zero_v3(f);
return;
}
/* angle of wind direction to edge */
cos_alpha = dot_v3v3(wind, dir) / windlen;
sin_alpha = sqrtf(1.0f - cos_alpha * cos_alpha);
cross_section = radius * ((float)M_PI * radius * sin_alpha + length * cos_alpha);
mul_v3_v3fl(f, wind, density * cross_section);
}
void BPH_mass_spring_force_edge_wind(
Implicit_Data *data, int v1, int v2, float radius1, float radius2, const float (*winvec)[3])
{
float win[3], dir[3], length;
float f[3], dfdx[3][3], dfdv[3][3];
sub_v3_v3v3(dir, data->X[v1], data->X[v2]);
length = normalize_v3(dir);
world_to_root_v3(data, v1, win, winvec[v1]);
edge_wind_vertex(dir, length, radius1, win, f, dfdx, dfdv);
add_v3_v3(data->F[v1], f);
world_to_root_v3(data, v2, win, winvec[v2]);
edge_wind_vertex(dir, length, radius2, win, f, dfdx, dfdv);
add_v3_v3(data->F[v2], f);
}
void BPH_mass_spring_force_vertex_wind(Implicit_Data *data,
int v,
float UNUSED(radius),
const float (*winvec)[3])
{
const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */
float wind[3];
float f[3];
world_to_root_v3(data, v, wind, winvec[v]);
mul_v3_v3fl(f, wind, density);
add_v3_v3(data->F[v], f);
}
BLI_INLINE void dfdx_spring(float to[3][3], const float dir[3], float length, float L, float k)
{
// dir is unit length direction, rest is spring's restlength, k is spring constant.
// return ( (I-outerprod(dir, dir))*Min(1.0f, rest/length) - I) * -k;
outerproduct(to, dir, dir);
sub_m3_m3m3(to, I, to);
mul_m3_fl(to, (L / length));
sub_m3_m3m3(to, to, I);
mul_m3_fl(to, k);
}
/* unused */
# if 0
BLI_INLINE void dfdx_damp(float to[3][3],
const float dir[3],
float length,
const float vel[3],
float rest,
float damping)
{
// inner spring damping vel is the relative velocity of the endpoints.
// return (I-outerprod(dir, dir)) * (-damping * -(dot(dir, vel)/Max(length, rest)));
mul_fvectorT_fvector(to, dir, dir);
sub_fmatrix_fmatrix(to, I, to);
mul_fmatrix_S(to, (-damping * -(dot_v3v3(dir, vel) / MAX2(length, rest))));
}
# endif
BLI_INLINE void dfdv_damp(float to[3][3], const float dir[3], float damping)
{
// derivative of force wrt velocity
outerproduct(to, dir, dir);
mul_m3_fl(to, -damping);
}
BLI_INLINE float fb(float length, float L)
{
float x = length / L;
float xx = x * x;
float xxx = xx * x;
float xxxx = xxx * x;
return (-11.541f * xxxx + 34.193f * xxx - 39.083f * xx + 23.116f * x - 9.713f);
}
BLI_INLINE float fbderiv(float length, float L)
{
float x = length / L;
float xx = x * x;
float xxx = xx * x;
return (-46.164f * xxx + 102.579f * xx - 78.166f * x + 23.116f);
}
BLI_INLINE float fbstar(float length, float L, float kb, float cb)
{
float tempfb_fl = kb * fb(length, L);
float fbstar_fl = cb * (length - L);
if (tempfb_fl < fbstar_fl) {
return fbstar_fl;
}
else {
return tempfb_fl;
}
}
// function to calculae bending spring force (taken from Choi & Co)
BLI_INLINE float fbstar_jacobi(float length, float L, float kb, float cb)
{
float tempfb_fl = kb * fb(length, L);
float fbstar_fl = cb * (length - L);
if (tempfb_fl < fbstar_fl) {
return -cb;
}
else {
return -kb * fbderiv(length, L);
}
}
/* calculate elonglation */
BLI_INLINE bool spring_length(Implicit_Data *data,
int i,
int j,
float r_extent[3],
float r_dir[3],
float *r_length,
float r_vel[3])
{
sub_v3_v3v3(r_extent, data->X[j], data->X[i]);
sub_v3_v3v3(r_vel, data->V[j], data->V[i]);
*r_length = len_v3(r_extent);
if (*r_length > ALMOST_ZERO) {
# if 0
if (length > L) {
if ((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) &&
(((length - L) * 100.0f / L) > clmd->sim_parms->maxspringlen)) {
// cut spring!
s->flags |= CSPRING_FLAG_DEACTIVATE;
return false;
}
}
# endif
mul_v3_v3fl(r_dir, r_extent, 1.0f / (*r_length));
}
else {
zero_v3(r_dir);
}
return true;
}
BLI_INLINE void apply_spring(
Implicit_Data *data, int i, int j, const float f[3], float dfdx[3][3], float dfdv[3][3])
{
int block_ij = BPH_mass_spring_add_block(data, i, j);
add_v3_v3(data->F[i], f);
sub_v3_v3(data->F[j], f);
add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx);
add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfdx);
sub_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfdx);
add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv);
add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfdv);
sub_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfdv);
}
bool BPH_mass_spring_force_spring_linear(Implicit_Data *data,
int i,
int j,
float restlen,
float stiffness_tension,
float damping_tension,
float stiffness_compression,
float damping_compression,
bool resist_compress,
bool new_compress,
float clamp_force)
{
float extent[3], length, dir[3], vel[3];
float f[3], dfdx[3][3], dfdv[3][3];
float damping = 0;
// calculate elonglation
spring_length(data, i, j, extent, dir, &length, vel);
/* This code computes not only the force, but also its derivative.
* Zero derivative effectively disables the spring for the implicit solver.
* Thus length > restlen makes cloth unconstrained at the start of simulation. */
if ((length >= restlen && length > 0) || resist_compress) {
float stretch_force;
damping = damping_tension;
stretch_force = stiffness_tension * (length - restlen);
if (clamp_force > 0.0f && stretch_force > clamp_force) {
stretch_force = clamp_force;
}
mul_v3_v3fl(f, dir, stretch_force);
dfdx_spring(dfdx, dir, length, restlen, stiffness_tension);
}
else if (new_compress) {
/* This is based on the Choi and Ko bending model,
* which works surprisingly well for compression. */
float kb = stiffness_compression;
float cb = kb; /* cb equal to kb seems to work, but a factor can be added if necessary */
damping = damping_compression;
mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb));
outerproduct(dfdx, dir, dir);
mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb));
}
else {
return false;
}
madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir));
dfdv_damp(dfdv, dir, damping);
apply_spring(data, i, j, f, dfdx, dfdv);
return true;
}
/* See "Stable but Responsive Cloth" (Choi, Ko 2005) */
bool BPH_mass_spring_force_spring_bending(
Implicit_Data *data, int i, int j, float restlen, float kb, float cb)
{
float extent[3], length, dir[3], vel[3];
// calculate elonglation
spring_length(data, i, j, extent, dir, &length, vel);
if (length < restlen) {
float f[3], dfdx[3][3], dfdv[3][3];
mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb));
outerproduct(dfdx, dir, dir);
mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb));
/* XXX damping not supported */
zero_m3(dfdv);
apply_spring(data, i, j, f, dfdx, dfdv);
return true;
}
else {
return false;
}
}
BLI_INLINE void poly_avg(lfVector *data, int *inds, int len, float r_avg[3])
{
float fact = 1.0f / (float)len;
zero_v3(r_avg);
for (int i = 0; i < len; i++) {
madd_v3_v3fl(r_avg, data[inds[i]], fact);
}
}
BLI_INLINE void poly_norm(lfVector *data, int i, int j, int *inds, int len, float r_dir[3])
{
float mid[3];
poly_avg(data, inds, len, mid);
normal_tri_v3(r_dir, data[i], data[j], mid);
}
BLI_INLINE void edge_avg(lfVector *data, int i, int j, float r_avg[3])
{
r_avg[0] = (data[i][0] + data[j][0]) * 0.5f;
r_avg[1] = (data[i][1] + data[j][1]) * 0.5f;
r_avg[2] = (data[i][2] + data[j][2]) * 0.5f;
}
BLI_INLINE void edge_norm(lfVector *data, int i, int j, float r_dir[3])
{
sub_v3_v3v3(r_dir, data[i], data[j]);
normalize_v3(r_dir);
}
BLI_INLINE float bend_angle(float dir_a[3], float dir_b[3], float dir_e[3])
{
float cos, sin;
float tmp[3];
cos = dot_v3v3(dir_a, dir_b);
cross_v3_v3v3(tmp, dir_a, dir_b);
sin = dot_v3v3(tmp, dir_e);
return atan2f(sin, cos);
}
BLI_INLINE void spring_angle(Implicit_Data *data,
int i,
int j,
int *i_a,
int *i_b,
int len_a,
int len_b,
float r_dir_a[3],
float r_dir_b[3],
float *r_angle,
float r_vel_a[3],
float r_vel_b[3])
{
float dir_e[3], vel_e[3];
poly_norm(data->X, j, i, i_a, len_a, r_dir_a);
poly_norm(data->X, i, j, i_b, len_b, r_dir_b);
edge_norm(data->X, i, j, dir_e);
*r_angle = bend_angle(r_dir_a, r_dir_b, dir_e);
poly_avg(data->V, i_a, len_a, r_vel_a);
poly_avg(data->V, i_b, len_b, r_vel_b);
edge_avg(data->V, i, j, vel_e);
sub_v3_v3(r_vel_a, vel_e);
sub_v3_v3(r_vel_b, vel_e);
}
/* Angular springs roughly based on the bending model proposed by Baraff and Witkin in "Large Steps
* in Cloth Simulation". */
bool BPH_mass_spring_force_spring_angular(Implicit_Data *data,
int i,
int j,
int *i_a,
int *i_b,
int len_a,
int len_b,
float restang,
float stiffness,
float damping)
{
float angle, dir_a[3], dir_b[3], vel_a[3], vel_b[3];
float f_a[3], f_b[3], f_e[3];
float force;
int x;
spring_angle(data, i, j, i_a, i_b, len_a, len_b, dir_a, dir_b, &angle, vel_a, vel_b);
/* spring force */
force = stiffness * (angle - restang);
/* damping force */
force += -damping * (dot_v3v3(vel_a, dir_a) + dot_v3v3(vel_b, dir_b));
mul_v3_v3fl(f_a, dir_a, force / len_a);
mul_v3_v3fl(f_b, dir_b, force / len_b);
for (x = 0; x < len_a; x++) {
add_v3_v3(data->F[i_a[x]], f_a);
}
for (x = 0; x < len_b; x++) {
add_v3_v3(data->F[i_b[x]], f_b);
}
mul_v3_v3fl(f_a, dir_a, force * 0.5f);
mul_v3_v3fl(f_b, dir_b, force * 0.5f);
add_v3_v3v3(f_e, f_a, f_b);
sub_v3_v3(data->F[i], f_e);
sub_v3_v3(data->F[j], f_e);
return true;
}
/* Jacobian of a direction vector.
* Basically the part of the differential orthogonal to the direction,
* inversely proportional to the length of the edge.
*
* dD_ij/dx_i = -dD_ij/dx_j = (D_ij * D_ij^T - I) / len_ij
*/
BLI_INLINE void spring_grad_dir(
Implicit_Data *data, int i, int j, float edge[3], float dir[3], float grad_dir[3][3])
{
float length;
sub_v3_v3v3(edge, data->X[j], data->X[i]);
length = normalize_v3_v3(dir, edge);
if (length > ALMOST_ZERO) {
outerproduct(grad_dir, dir, dir);
sub_m3_m3m3(grad_dir, I, grad_dir);
mul_m3_fl(grad_dir, 1.0f / length);
}
else {
zero_m3(grad_dir);
}
}
BLI_INLINE void spring_hairbend_forces(Implicit_Data *data,
int i,
int j,
int k,
const float goal[3],
float stiffness,
float damping,
int q,
const float dx[3],
const float dv[3],
float r_f[3])
{
float edge_ij[3], dir_ij[3];
float edge_jk[3], dir_jk[3];
float vel_ij[3], vel_jk[3], vel_ortho[3];
float f_bend[3], f_damp[3];
float fk[3];
float dist[3];
zero_v3(fk);
sub_v3_v3v3(edge_ij, data->X[j], data->X[i]);
if (q == i) {
sub_v3_v3(edge_ij, dx);
}
if (q == j) {
add_v3_v3(edge_ij, dx);
}
normalize_v3_v3(dir_ij, edge_ij);
sub_v3_v3v3(edge_jk, data->X[k], data->X[j]);
if (q == j) {
sub_v3_v3(edge_jk, dx);
}
if (q == k) {
add_v3_v3(edge_jk, dx);
}
normalize_v3_v3(dir_jk, edge_jk);
sub_v3_v3v3(vel_ij, data->V[j], data->V[i]);
if (q == i) {
sub_v3_v3(vel_ij, dv);
}
if (q == j) {
add_v3_v3(vel_ij, dv);
}
sub_v3_v3v3(vel_jk, data->V[k], data->V[j]);
if (q == j) {
sub_v3_v3(vel_jk, dv);
}
if (q == k) {
add_v3_v3(vel_jk, dv);
}
/* bending force */
sub_v3_v3v3(dist, goal, edge_jk);
mul_v3_v3fl(f_bend, dist, stiffness);
add_v3_v3(fk, f_bend);
/* damping force */
madd_v3_v3v3fl(vel_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk));
mul_v3_v3fl(f_damp, vel_ortho, damping);
sub_v3_v3(fk, f_damp);
copy_v3_v3(r_f, fk);
}
/* Finite Differences method for estimating the jacobian of the force */
BLI_INLINE void spring_hairbend_estimate_dfdx(Implicit_Data *data,
int i,
int j,
int k,
const float goal[3],
float stiffness,
float damping,
int q,
float dfdx[3][3])
{
const float delta = 0.00001f; // TODO find a good heuristic for this
float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3];
float f[3];
int a, b;
zero_m3(dvec_null);
unit_m3(dvec_pos);
mul_m3_fl(dvec_pos, delta * 0.5f);
copy_m3_m3(dvec_neg, dvec_pos);
negate_m3(dvec_neg);
/* XXX TODO offset targets to account for position dependency */
for (a = 0; a < 3; ++a) {
spring_hairbend_forces(
data, i, j, k, goal, stiffness, damping, q, dvec_pos[a], dvec_null[a], f);
copy_v3_v3(dfdx[a], f);
spring_hairbend_forces(
data, i, j, k, goal, stiffness, damping, q, dvec_neg[a], dvec_null[a], f);
sub_v3_v3(dfdx[a], f);
for (b = 0; b < 3; ++b) {
dfdx[a][b] /= delta;
}
}
}
/* Finite Differences method for estimating the jacobian of the force */
BLI_INLINE void spring_hairbend_estimate_dfdv(Implicit_Data *data,
int i,
int j,
int k,
const float goal[3],
float stiffness,
float damping,
int q,
float dfdv[3][3])
{
const float delta = 0.00001f; // TODO find a good heuristic for this
float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3];
float f[3];
int a, b;
zero_m3(dvec_null);
unit_m3(dvec_pos);
mul_m3_fl(dvec_pos, delta * 0.5f);
copy_m3_m3(dvec_neg, dvec_pos);
negate_m3(dvec_neg);
/* XXX TODO offset targets to account for position dependency */
for (a = 0; a < 3; ++a) {
spring_hairbend_forces(
data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_pos[a], f);
copy_v3_v3(dfdv[a], f);
spring_hairbend_forces(
data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_neg[a], f);
sub_v3_v3(dfdv[a], f);
for (b = 0; b < 3; ++b) {
dfdv[a][b] /= delta;
}
}
}
/* Angular spring that pulls the vertex toward the local target
* See "Artistic Simulation of Curly Hair" (Pixar technical memo #12-03a)
*/
bool BPH_mass_spring_force_spring_bending_hair(Implicit_Data *data,
int i,
int j,
int k,
const float target[3],
float stiffness,
float damping)
{
float goal[3];
float fj[3], fk[3];
float dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3];
float dfj_dvi[3][3], dfj_dvj[3][3], dfk_dvi[3][3], dfk_dvj[3][3], dfk_dvk[3][3];
const float vecnull[3] = {0.0f, 0.0f, 0.0f};
int block_ij = BPH_mass_spring_add_block(data, i, j);
int block_jk = BPH_mass_spring_add_block(data, j, k);
int block_ik = BPH_mass_spring_add_block(data, i, k);
world_to_root_v3(data, j, goal, target);
spring_hairbend_forces(data, i, j, k, goal, stiffness, damping, k, vecnull, vecnull, fk);
negate_v3_v3(fj, fk); /* counterforce */
spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, i, dfk_dxi);
spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, j, dfk_dxj);
spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, k, dfk_dxk);
copy_m3_m3(dfj_dxi, dfk_dxi);
negate_m3(dfj_dxi);
copy_m3_m3(dfj_dxj, dfk_dxj);
negate_m3(dfj_dxj);
spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, i, dfk_dvi);
spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, j, dfk_dvj);
spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, k, dfk_dvk);
copy_m3_m3(dfj_dvi, dfk_dvi);
negate_m3(dfj_dvi);
copy_m3_m3(dfj_dvj, dfk_dvj);
negate_m3(dfj_dvj);
/* add forces and jacobians to the solver data */
add_v3_v3(data->F[j], fj);
add_v3_v3(data->F[k], fk);
add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj);
add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk);
add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi);
add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj);
add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi);
add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfj_dvj);
add_m3_m3m3(data->dFdV[k].m, data->dFdV[k].m, dfk_dvk);
add_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfj_dvi);
add_m3_m3m3(data->dFdV[block_jk].m, data->dFdV[block_jk].m, dfk_dvj);
add_m3_m3m3(data->dFdV[block_ik].m, data->dFdV[block_ik].m, dfk_dvi);
/* XXX analytical calculation of derivatives below is incorrect.
* This proved to be difficult, but for now just using the finite difference method for
* estimating the jacobians should be sufficient.
*/
# if 0
float edge_ij[3], dir_ij[3], grad_dir_ij[3][3];
float edge_jk[3], dir_jk[3], grad_dir_jk[3][3];
float dist[3], vel_jk[3], vel_jk_ortho[3], projvel[3];
float target[3];
float tmp[3][3];
float fi[3], fj[3], fk[3];
float dfi_dxi[3][3], dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3];
float dfdvi[3][3];
// TESTING
damping = 0.0f;
zero_v3(fi);
zero_v3(fj);
zero_v3(fk);
zero_m3(dfi_dxi);
zero_m3(dfj_dxi);
zero_m3(dfk_dxi);
zero_m3(dfk_dxj);
zero_m3(dfk_dxk);
/* jacobian of direction vectors */
spring_grad_dir(data, i, j, edge_ij, dir_ij, grad_dir_ij);
spring_grad_dir(data, j, k, edge_jk, dir_jk, grad_dir_jk);
sub_v3_v3v3(vel_jk, data->V[k], data->V[j]);
/* bending force */
mul_v3_v3fl(target, dir_ij, restlen);
sub_v3_v3v3(dist, target, edge_jk);
mul_v3_v3fl(fk, dist, stiffness);
/* damping force */
madd_v3_v3v3fl(vel_jk_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk));
madd_v3_v3fl(fk, vel_jk_ortho, damping);
/* XXX this only holds true as long as we assume straight rest shape!
* eventually will become a bit more involved since the opposite segment
* gets its own target, under condition of having equal torque on both sides.
*/
copy_v3_v3(fi, fk);
/* counterforce on the middle point */
sub_v3_v3(fj, fi);
sub_v3_v3(fj, fk);
/* === derivatives === */
madd_m3_m3fl(dfk_dxi, grad_dir_ij, stiffness * restlen);
madd_m3_m3fl(dfk_dxj, grad_dir_ij, -stiffness * restlen);
madd_m3_m3fl(dfk_dxj, I, stiffness);
madd_m3_m3fl(dfk_dxk, I, -stiffness);
copy_m3_m3(dfi_dxi, dfk_dxk);
negate_m3(dfi_dxi);
/* dfj_dfi == dfi_dfj due to symmetry,
* dfi_dfj == dfk_dfj due to fi == fk
* XXX see comment above on future bent rest shapes
*/
copy_m3_m3(dfj_dxi, dfk_dxj);
/* dfj_dxj == -(dfi_dxj + dfk_dxj) due to fj == -(fi + fk) */
sub_m3_m3m3(dfj_dxj, dfj_dxj, dfj_dxi);
sub_m3_m3m3(dfj_dxj, dfj_dxj, dfk_dxj);
/* add forces and jacobians to the solver data */
add_v3_v3(data->F[i], fi);
add_v3_v3(data->F[j], fj);
add_v3_v3(data->F[k], fk);
add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfi_dxi);
add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj);
add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk);
add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi);
add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj);
add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi);
# endif
return true;
}
bool BPH_mass_spring_force_spring_goal(Implicit_Data *data,
int i,
const float goal_x[3],
const float goal_v[3],
float stiffness,
float damping)
{
float root_goal_x[3], root_goal_v[3], extent[3], length, dir[3], vel[3];
float f[3], dfdx[3][3], dfdv[3][3];
/* goal is in world space */
world_to_root_v3(data, i, root_goal_x, goal_x);
world_to_root_v3(data, i, root_goal_v, goal_v);
sub_v3_v3v3(extent, root_goal_x, data->X[i]);
sub_v3_v3v3(vel, root_goal_v, data->V[i]);
length = normalize_v3_v3(dir, extent);
if (length > ALMOST_ZERO) {
mul_v3_v3fl(f, dir, stiffness * length);
// Ascher & Boxman, p.21: Damping only during elonglation
// something wrong with it...
madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir));
dfdx_spring(dfdx, dir, length, 0.0f, stiffness);
dfdv_damp(dfdv, dir, damping);
add_v3_v3(data->F[i], f);
add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx);
add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv);
return true;
}
else {
return false;
}
}
#endif /* IMPLICIT_SOLVER_BLENDER */
|
Graph.h | /*
* Graph.h
*
* Created on: 01.06.2014
* Author: Christian Staudt (christian.staudt@kit.edu), Klara Reichard (klara.reichard@gmail.com), Marvin Ritter (marvin.ritter@gmail.com)
*/
#ifndef GRAPH_H_
#define GRAPH_H_
#include <algorithm>
#include <vector>
#include <stack>
#include <queue>
#include <utility>
#include <stdexcept>
#include <functional>
#include <unordered_set>
#include "../Globals.h"
#include "Coordinates.h"
#include "../viz/Point.h"
#include "../auxiliary/Random.h"
#include "../auxiliary/FunctionTraits.h"
#include "../auxiliary/Log.h"
namespace NetworKit {
/**
* A weighted edge used for the graph constructor with
* initializer list syntax.
*/
struct WeightedEdge {
node u, v;
edgeweight weight;
WeightedEdge(node u, node v, edgeweight w) : u(u), v(v), weight(w) {
}
};
inline bool operator<(const WeightedEdge& e1, const WeightedEdge& e2) {
return e1.weight < e2.weight;
}
struct Edge {
node u, v;
Edge(node _u, node _v, bool sorted = false) {
if (sorted) {
u = std::min(_u, _v);
v = std::max(_u, _v);
} else {
u = _u;
v = _v;
}
}
};
inline bool operator==(const Edge& e1, const Edge& e2) {
return e1.u == e2.u && e1.v == e2.v;
}
}
namespace std {
template<>
struct hash<NetworKit::Edge> {
size_t operator()(const NetworKit::Edge& e) const {
return hash_node(e.u) ^ hash_node(e.v);
}
hash<NetworKit::node> hash_node;
};
}
namespace NetworKit {
/**
* @ingroup graph
* A graph (with optional weights) and parallel iterator methods.
*/
class Graph final {
friend class ParallelPartitionCoarsening;
friend class GraphBuilder;
private:
// graph attributes
count id; //!< unique graph id, starts at 0
std::string name; //!< name of the graph, initially G#ID
// scalars
count n; //!< current number of nodes
count m; //!< current number of edges
count storedNumberOfSelfLoops; //!< current number of self loops, edges which have the same origin and target
node z; //!< current upper bound of node ids, z will be the id of the next node
edgeid omega; //!< current upper bound of edge ids, will be the id of the next edge
count t; //!< current time step
bool weighted; //!< true if the graph is weighted, false otherwise
bool directed; //!< true if the graph is directed, false otherwise
bool edgesIndexed; //!< true if edge ids have been assigned
// per node data
std::vector<bool> exists; //!< exists[v] is true if node v has not been removed from the graph
Coordinates<float> coordinates; //!< coordinates of nodes (if present)
std::vector<count> inDeg; //!< only used for directed graphs, number of edges incoming per node
std::vector<count> outDeg; //!< degree of every node, zero if node was removed. For directed graphs only outgoing edges count
std::vector< std::vector<node> > inEdges; //!< only used for directed graphs, inEdges[v] contains all nodes u that have an edge (u, v)
std::vector< std::vector<node> > outEdges; //!< (outgoing) edges, for each edge (u, v) v is saved in outEdges[u] and for undirected also u in outEdges[v]
std::vector< std::vector<edgeweight> > inEdgeWeights; //!< only used for directed graphs, same schema as inEdges
std::vector< std::vector<edgeweight> > outEdgeWeights; //!< same schema (and same order!) as outEdges
std::vector< std::vector<edgeid> > inEdgeIds; //!< only used for directed graphs, same schema as inEdges
std::vector< std::vector<edgeid> > outEdgeIds; //!< same schema (and same order!) as outEdges
/**
* Returns the next unique graph id.
*/
count getNextGraphId();
/**
* Returns the index of node u in the array of incoming edges of node v. (for directed graphs inEdges is searched, while for indirected outEdges is searched, which gives the same result as indexInOutEdgeArray).
*/
index indexInInEdgeArray(node v, node u) const;
/**
* Returns the index of node v in the array of outgoing edges of node u.
*/
index indexInOutEdgeArray(node u, node v) const;
/**
* Returns the edge weight of the outgoing edge of index i in the outgoing edges of node u
* @param u The node
* @param i The index
* @return The weight of the outgoing edge or defaultEdgeWeight if the graph is unweighted
*/
template<bool hasWeights>
inline edgeweight getOutEdgeWeight(node u, index i) const;
/**
* Returns the edge weight of the incoming edge of index i in the incoming edges of node u
*
* @param u The node
* @param i The index in the incoming edge array
* @return The weight of the incoming edge
*/
template<bool hasWeights>
inline edgeweight getInEdgeWeight(node u, index i) const;
/**
* Returns the edge id of the edge of index i in the outgoing edges of node u
*
* @param u The node
* @param i The index in the outgoing edges
* @return The edge id
*/
template<bool graphHasEdgeIds>
inline edgeid getOutEdgeId(node u, index i) const;
/**
* Returns the edge id of the edge of index i in the incoming edges of node u
*
* @param u The node
* @param i The index in the incoming edges of u
* @return The edge id
*/
template<bool graphHasEdgeIds>
inline edgeid getInEdgeId(node u, index i) const;
/**
* @brief Returns if the edge (u, v) shall be used in the iteration of all edgesIndexed
*
* @param u The source node of the edge
* @param v The target node of the edge
* @return If the node shall be used, i.e. if v is not none and in the undirected case if u >= v
*/
template<bool graphIsDirected>
inline bool useEdgeInIteration(node u, node v) const;
/**
* @brief Implementation of the for loop for outgoing edges of u
*
* Note: If all (valid) outgoing edges shall be considered, graphIsDirected needs to be set to true
*
* @param u The node
* @param handle The handle that shall be executed for each edge
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void forOutEdgesOfImpl(node u, L handle) const;
/**
* @brief Implementation of the for loop for incoming edges of u
*
* For undirected graphs, this is the same as forOutEdgesOfImpl but u and v are changed in the handle
*
* @param u The node
* @param handle The handle that shall be executed for each edge
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void forInEdgesOfImpl(node u, L handle) const;
/**
* @brief Implementation of the for loop for all edges, @see forEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void forEdgeImpl(L handle) const;
/**
* @brief Parallel implementation of the for loop for all edges, @see parallelForEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void parallelForEdgesImpl(L handle) const;
/**
* @brief Summation variant of the parallel for loop for all edges, @see parallelSumForEdges
*
* @param handle The handle that shall be executed for all edges
* @return void
*/
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline double parallelSumForEdgesImpl(L handle) const;
/*
* In the following definition, Aux::FunctionTraits is used in order to only execute lambda functions
* with the appropriate parameters. The decltype-return type is used for determining the return type of
* the lambda (needed for summation) but also determines if the lambda accepts the correct number of parameters.
* Otherwise the return type declaration fails and the function is excluded from overload resoluation.
* Then there are multiple possible lambdas with three (third parameter id or weight) and two (second parameter
* can be second node id or edge weight for neighbor iterators). This is checked using Aux::FunctionTraits and
* std::enable_if. std::enable_if only defines the type member when the given bool is true, this bool comes from
* std::is_same which compares two types. The function traits give either the parameter type or if it is out of bounds
* they define type as void.
*/
/**
* Triggers a static assert error when no other method is chosen. Because of the use of "..." as arguments, the priority
* of this method is lower than the priority of the other methods. This method avoids ugly and unreadable template substitution
* error messages from the other declarations.
*/
template<class F, void* = (void*)0>
typename Aux::FunctionTraits<F>::result_type edgeLambda(F&f, ...) const {
// the strange condition is used in order to delay the eveluation of the static assert to the moment when this function is actually used
static_assert(! std::is_same<F, F>::value, "Your lambda does not support the required parameters or the parameters have the wrong type.");
return std::declval<typename Aux::FunctionTraits<F>::result_type>(); // use the correct return type (this won't compile)
}
/**
* Calls the given function f if its fourth argument is of the type edgeid and third of type edgeweight
* Note that the decltype check is not enough as edgeweight can be casted to node and we want to assure that .
*/
template < class F,
typename std::enable_if <
(Aux::FunctionTraits<F>::arity >= 3) &&
std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<2>::type>::value &&
std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<3>::type>::value
>::type * = (void*)0 >
auto edgeLambda(F &f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, ew, id)) {
return f(u, v, ew, id);
}
/**
* Calls the given function f if its third argument is of the type edgeid, discards the edge weight
* Note that the decltype check is not enough as edgeweight can be casted to node.
*/
template<class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 2) &&
std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<2>::type>::value &&
std::is_same<node, typename Aux::FunctionTraits<F>::template arg<1>::type>::value /* prevent f(v, weight, eid) */
>::type* = (void*)0>
auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, id)) {
return f(u, v, id);
}
/**
* Calls the given function f if its third argument is of type edgeweight, discards the edge id
* Note that the decltype check is not enough as node can be casted to edgeweight.
*/
template<class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 2) &&
std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<2>::type>::value
>::type* = (void*)0>
auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, ew)) {
return f(u, v, ew);
}
/**
* Calls the given function f if it has only two arguments and the second argument is of type node,
* discards edge weight and id
* Note that the decltype check is not enough as edgeweight can be casted to node.
*/
template<class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 1) &&
std::is_same<node, typename Aux::FunctionTraits<F>::template arg<1>::type>::value
>::type* = (void*)0>
auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v)) {
return f(u, v);
}
/**
* Calls the given function f if it has only two arguments and the second argument is of type edgeweight,
* discards the first node and the edge id
* Note that the decltype check is not enough as edgeweight can be casted to node.
*/
template<class F,
typename std::enable_if<
(Aux::FunctionTraits<F>::arity >= 1) &&
std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<1>::type>::value
>::type* = (void*)0>
auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, ew)) {
return f(v, ew);
}
/**
* Calls the given function f if it has only one argument, discards the first
* node id, the edge weight and the edge id
*/
template<class F,
void* = (void*)0>
auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(v)) {
return f(v);
}
/**
* Calls the given BFS handle with distance parameter
*/
template <class F>
auto callBFSHandle(F &f, node u, count dist) const -> decltype(f(u, dist)) {
return f(u, dist);
}
/**
* Calls the given BFS handle without distance parameter
*/
template <class F>
auto callBFSHandle(F &f, node u, count dist) const -> decltype(f(u)) {
return f(u);
}
public:
/**
* Create a graph of @a n nodes. The graph has assignable edge weights if @a weighted is set to <code>true</code>.
* If @a weighted is set to <code>false</code> each edge has edge weight 1.0 and any other weight assignment will
* be ignored.
* @param n Number of nodes.
* @param weighted If set to <code>true</code>, the graph has edge weights.
* @param directed If set to @c true, the graph will be directed.
*/
Graph(count n = 0, bool weighted = false, bool directed = false);
Graph(const Graph& G, bool weighted, bool directed);
/**
* Generate a weighted graph from a list of edges. (Useful for small
* graphs in unit tests that you do not want to read from a file.)
*
* @param[in] edges list of weighted edges
*/
Graph(std::initializer_list<WeightedEdge> edges);
/**
* Create a graph as copy of @a other.
* @param other The graph to copy.
*/
Graph(const Graph& other) = default;
/** Default move constructor */
Graph(Graph&& other) = default;
/** Default destructor */
~Graph() = default;
/** Default move assignment operator */
Graph& operator=(Graph&& other) = default;
/** Default copy assignment operator */
Graph& operator=(const Graph& other) = default;
/** EDGE IDS **/
/**
* Initially assign integer edge identifiers.
*
* @param force Force re-indexing of edges even if they have already been indexed
*/
void indexEdges(bool force = false);
/**
* Checks if edges have been indexed
*
* @return bool if edges have been indexed
*/
bool hasEdgeIds() const { return edgesIndexed; }
/**
* Get the id of the given edge.
*/
edgeid edgeId(node u, node v) const;
/**
* Get an upper bound for the edge ids in the graph.
* @return An upper bound for the edge ids.
*/
index upperEdgeIdBound() const { return omega; }
/** GRAPH INFORMATION **/
/**
* Get the ID of this graph. The ID is a unique unsigned integer given to
* every graph on construction.
*/
count getId() const { return id; }
/**
* Return the type of the graph.
* Graph: not weighted, undirected
* WeightedGraph: weighted, undirected
* DirectedGraph: not weighted, directed
* WeightedDirectedGraph: weighted, directed
*/
std::string typ() const;
/**
* Try to save some memory by shrinking internal data structures of the graph. Only run this
* once you finished editing the graph. Otherwise it will cause unnecessary reallocation of
* memory.
*/
void shrinkToFit();
/**
* Compacts the adjacency arrays by re-using no longer neede slots from deleted edges.
*/
void compactEdges();
/**
* Sorts the adjacency arrays by node id. While the running time is linear this
* temporarily duplicates the memory.
*/
void sortEdges();
/**
* Set name of graph to @a name.
* @param name The name.
*/
void setName(std::string name) { this->name = name; }
/*
* Returns the name of the graph.
* @return The name of the graph.
*/
std::string getName() const { return name; }
/**
* Returns a string representation of the graph.
* @return A string representation.
*/
std::string toString() const;
/* COPYING */
/*
* Copies all nodes to a new graph
* @return graph with the same nodes.
*/
Graph copyNodes() const;
/* NODE MODIFIERS */
/**
* Add a new node to the graph and return it.
* @return The new node.
*/
node addNode();
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Add a new node to the graph with coordinates @a x and @y and return it.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
node addNode(float x, float y);
/**
* Remove an isolated node @a v from the graph.
*
* @param u Node.
* @note Although it would be convenient to remove all incident edges at the same time,
* this causes complications for dynamic applications. Therefore, removeNode is an
* atomic event. All incident edges need to be removed first and an exception is thrown
* otherwise.
*/
void removeNode(node v);
/**
* Check if node @a v exists in the graph.
*
* @param v Node.
* @return @c true if @a v exists, @c false otherwise.
*/
bool hasNode(node v) const { return (v < z) && this->exists[v]; }
/**
* Restores a previously deleted node @a v with its previous id in the graph.
*
* @param v Node.
*
*/
void restoreNode(node v);
// SET OPERATIONS
/**
* Appends another graph to this graph as a new subgraph. Performs node
* id remapping.
* @param G [description]
*/
void append(const Graph& G);
/**
* Modifies this graph to be the union of it and another graph.
* Nodes with the same ids are identified with each other.
* @param G [description]
*/
void merge(const Graph& G);
// SUBGRAPHS
Graph subgraphFromNodes(const std::unordered_set<node>& nodes) const;
/** NODE PROPERTIES **/
/**
* Returns the number of outgoing neighbors of @a v.
*
* @param v Node.
* @return The number of outgoing neighbors.
*/
count degree(node v) const { return outDeg[v]; }
/**
* Get the number of incoming neighbors of @a v.
*
* @param v Node.
* @return The number of incoming neighbors.
* @note If the graph is not directed, the outgoing degree is returned.
*/
count degreeIn(node v) const { return directed ? inDeg[v] : outDeg[v]; }
/**
* Get the number of outgoing neighbors of @a v.
*
* @param v Node.
* @return The number of outgoing neighbors.
*/
count degreeOut(node v) const { return outDeg[v]; }
/**
* Check whether @a v is isolated, i.e. degree is 0.
* @param v Node.
* @return @c true if the node is isolated (= degree is 0)
*/
bool isIsolated(node v) const { return outDeg[v] == 0 && (!directed || inDeg[v] == 0); }
/**
* Returns the weighted degree of @a v.
*
* @param v Node.
* @return Weighted degree of @a v.
* @note For directed graphs this is the sum of weights of all outgoing edges of @a v.
*/
edgeweight weightedDegree(node v) const;
/**
* Returns the volume of the @a v, which is the weighted degree with self-loops counted twice.
*
* @param v Node.
* @return The volume of the @a v.
*/
edgeweight volume(node v) const;
/**
* Returns a random node of the graph.
* @return A random node.
*/
node randomNode() const;
/**
* Returns a random neighbor of @a u and @c none if degree is zero.
*
* @param u Node.
* @return A random neighbor of @a u.
*/
node randomNeighbor(node u) const;
/* EDGE MODIFIERS */
/**
* Insert an edge between the nodes @a u and @a v. If the graph is weighted you can optionally
* set a weight for this edge. The default weight is 1.0.
* Note: Multi-edges are not supported and will NOT be handled consistently by the graph data
* structure.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @param weight Optional edge weight.
*/
void addEdge(node u, node v, edgeweight ew = defaultEdgeWeight);
/**
* Removes the undirected edge {@a u,@a v}.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
*/
void removeEdge(node u, node v);
/**
* Removes all self-loops in the graph.
*/
void removeSelfLoops();
/**
* Changes the edges {@a s1, @a t1} into {@a s1, @a t2} and the edge {@a s2, @a t2} into {@a s2, @a t1}.
*
* If there are edge weights or edge ids, they are preserved. Note that no check is performed if the swap is actually possible, i.e. does not generate duplicate edges.
*
* @param s1 The first source
* @param t1 The first target
* @param s2 The second source
* @param t2 The second target
*/
void swapEdge(NetworKit::node s1, NetworKit::node t1, NetworKit::node s2, NetworKit::node t2);
/**
* Checks if undirected edge {@a u,@a v} exists in the graph.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @return <code>true</code> if the edge exists, <code>false</code> otherwise.
*/
bool hasEdge(node u, node v) const;
/**
* Returns a random edge. By default a random node u is chosen and then some random neighbor v. So the probability of choosing (u, v) highly
* depends on the degree of u.
* Setting uniformDistribution to true, will give you a real uniform distributed edge, but will be very slow. So only use uniformDistribution
* for single calls outside of any loops.
*/
std::pair<node, node> randomEdge(bool uniformDistribution = false) const;
/**
* Returns a vector with nr random edges. The edges are chosen uniform random.
*/
std::vector< std::pair<node, node> > randomEdges(count nr) const;
/* GLOBAL PROPERTIES */
/**
* Returns <code>true</code> if this graph supports edge weights other than 1.0.
* @return <code>true</code> if this graph supports edge weights other than 1.0.
*/
bool isWeighted() const { return weighted; }
/**
* Return @c true if this graph supports directed edges.
* @return @c true if this graph supports directed edges.
*/
bool isDirected() const { return directed; }
/**
* Return <code>true</code> if graph contains no nodes.
* @return <code>true</code> if graph contains no nodes.
*/
bool isEmpty() const { return n == 0; }
/**
* Return the number of nodes in the graph.
* @return The number of nodes.
*/
count numberOfNodes() const { return n; }
/**
* Return the number of edges in the graph.
* @return The number of edges.
*/
count numberOfEdges() const { return m; }
/**
* @return a pair (n, m) where n is the number of nodes and m is the number of edges
*/
std::pair<count, count> const size() { return {n, m}; };
/**
* @return the density of the graph
*/
double density() const {
count n = numberOfNodes();
count m = numberOfEdges();
count loops = numberOfSelfLoops();
m -= loops;
double d;
if (isDirected()) {
d = m / (double) (n * (n-1));
} else {
d = (2 * m) / (double) (n * (n-1));
}
return d;
}
/**
* Return the number of loops {v,v} in the graph.
* @return The number of loops.
* @note This involves calculation, so store result if needed multiple times.
*/
count numberOfSelfLoops() const;
/**
* Get an upper bound for the node ids in the graph.
* @return An upper bound for the node ids.
*/
index upperNodeIdBound() const { return z; }
/**
* Check for invalid graph states, such as multi-edges.
* @return False if the graph is in invalid state.
*/
bool checkConsistency() const;
/* DYNAMICS */
/**
* Trigger a time step - increments counter.
*/
void timeStep() { t++; }
/**
* Get time step counter.
* @return Time step counter.
*/
count time() { return t; }
/* COORDINATES */
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Sets the coordinate of @a v to @a value.
*
* @param v Node.
* @param value The coordinate of @a v.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
void setCoordinate(node v, Point<float> value) { coordinates.setCoordinate(v, value); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get the coordinate of @a v.
* @param v Node.
* @return The coordinate of @a v.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
Point<float>& getCoordinate(node v) { return coordinates.getCoordinate(v); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get minimum coordinate of all coordinates with respect to dimension @a dim.
* @param dim The dimension to search for minimum.
* @return The minimum coordinate in dimension @a dim.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
float minCoordinate(count dim) { return coordinates.minCoordinate(dim); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Get maximum coordinate of all coordinates with respect to dimension @a dim.
* @param dim The dimension to search for maximum.
* @return The maximum coordinate in dimension @a dim.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
float maxCoordinate(count dim) { return coordinates.maxCoordinate(dim); }
/**
* DEPRECATED: Coordinates should be handled outside the Graph class
* like general node attributes.
*
* Initializes the coordinates for the nodes in graph.
* @note This has to be called once and before you set coordinates. Call this method again if new nodes have
* been added.
*/
// TODO: remove method
// [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]]
void initCoordinates() { coordinates.init(z); }
/* EDGE ATTRIBUTES */
/**
* Return edge weight of edge {@a u,@a v}. Returns 0 if edge does not exist.
* BEWARE: Running time is \Theta(deg(u))!
*
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @return Edge weight of edge {@a u,@a v} or 0 if edge does not exist.
*/
edgeweight weight(node u, node v) const;
/**
* Set the weight of an edge. If the edge does not exist,
* it will be inserted.
*
* @param[in] u endpoint of edge
* @param[in] v endpoint of edge
* @param[in] weight edge weight
*/
void setWeight(node u, node v, edgeweight ew);
/**
* Increase the weight of an edge. If the edge does not exist,
* it will be inserted.
*
* @param[in] u endpoint of edge
* @param[in] v endpoint of edge
* @param[in] weight edge weight
*/
void increaseWeight(node u, node v, edgeweight ew);
/* SUMS */
/**
* Returns the sum of all edge weights.
* @return The sum of all edge weights.
*/
edgeweight totalEdgeWeight() const;
/* Collections */
/**
* Get list of all nodes.
* @return List of all nodes.
*/
std::vector<node> nodes() const;
/**
* Get list of edges as node pairs.
* @return List of edges as node pairs.
*/
std::vector<std::pair<node, node> > edges() const;
/**
* Get list of neighbors of @a u.
*
* @param u Node.
* @return List of neighbors of @a u.
*/
std::vector<node> neighbors(node u) const;
/* Derivative Graphs */
/**
* Return an undirected version of this graph.
*
* @return undirected graph.
*/
Graph toUndirected() const;
/**
* Return an unweighted version of this graph.
*
* @return unweighted graph.
*/
Graph toUnweighted() const;
/**
* Return the transpose of this graph. The graph must be directed.
*
* @return transpose of the graph.
*/
Graph transpose() const;
/* NODE ITERATORS */
/**
* Iterate over all nodes of the graph and call @a handle (lambda closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void forNodes(L handle) const;
/**
* Iterate randomly over all nodes of the graph and call @a handle (lambda closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void parallelForNodes(L handle) const;
/** Iterate over all nodes of the graph and call @a handle (lambda closure) as long as @a condition remains true.
* This allows for breaking from a node loop.
*
* @param condition Returning <code>false</code> breaks the loop.
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename C, typename L> void forNodesWhile(C condition, L handle) const;
/**
* Iterate randomly over all nodes of the graph and call @a handle (lambda closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void forNodesInRandomOrder(L handle) const;
/**
* Iterate in parallel over all nodes of the graph and call handler (lambda closure).
* Using schedule(guided) to remedy load-imbalances due to e.g. unequal degree distribution.
*
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void balancedParallelForNodes(L handle) const;
/**
* Iterate over all undirected pairs of nodes and call @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code>.
*/
template<typename L> void forNodePairs(L handle) const;
/**
* Iterate over all undirected pairs of nodes in parallel and call @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code>.
*/
template<typename L> void parallelForNodePairs(L handle) const;
/* EDGE ITERATORS */
/**
* Iterate over all edges of the const graph and call @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code>, <code>(node, node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code>.
*/
template<typename L> void forEdges(L handle) const;
/**
* Iterate in parallel over all edges of the const graph and call @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code> or <code>(node, node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code>.
*/
template<typename L> void parallelForEdges(L handle) const;
/* NEIGHBORHOOD ITERATORS */
/**
* Iterate over all neighbors of a node and call @a handle (lamdba closure).
*
* @param u Node.
* @param handle Takes parameter <code>(node)</code> or <code>(node, edgeweight)</code> which is a neighbor of @a u.
* @note For directed graphs only outgoing edges from @a u are considered.
* A node is its own neighbor if there is a self-loop.
*
*/
template<typename L> void forNeighborsOf(node u, L handle) const;
/**
* Iterate over all incident edges of a node and call @a handle (lamdba closure).
*
* @param u Node.
* @param handle Takes parameters <code>(node, node)</code>, <code>(node, node, edgeweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code> where the first node is @a u and the second is a neighbor of @a u.
* @note For undirected graphs all edges incident to @a u are also outgoing edges.
*/
template<typename L> void forEdgesOf(node u, L handle) const;
/**
* Iterate over all neighbors of a node and call handler (lamdba closure).
* For directed graphs only incoming edges from u are considered.
*/
template<typename L> void forInNeighborsOf(node u, L handle) const;
/**
* Iterate over all incoming edges of a node and call handler (lamdba closure).
* @note For undirected graphs all edges incident to u are also incoming edges.
*
* Handle takes parameters (u, v) or (u, v, w) where w is the edge weight.
*/
template<typename L> void forInEdgesOf(node u, L handle) const;
/* REDUCTION ITERATORS */
/**
* Iterate in parallel over all nodes and sum (reduce +) the values returned by the handler
*/
template<typename L> double parallelSumForNodes(L handle) const;
/**
* Iterate in parallel over all edges and sum (reduce +) the values returned by the handler
*/
template<typename L> double parallelSumForEdges(L handle) const;
/* GRAPH SEARCHES */
/**
* Iterate over nodes in breadth-first search order starting from r until connected component
* of r has been visited.
*
* @param r Node.
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void BFSfrom(node r, L handle) const;
template<typename L> void BFSfrom(const std::vector<node> &startNodes, L handle) const;
template<typename L> void BFSEdgesFrom(node r, L handle) const;
/**
* Iterate over nodes in depth-first search order starting from r until connected component
* of r has been visited.
*
* @param r Node.
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void DFSfrom(node r, L handle) const;
template<typename L> void DFSEdgesFrom(node r, L handle) const;
};
/* NODE ITERATORS */
template<typename L>
void Graph::forNodes(L handle) const {
for (node v = 0; v < z; ++v) {
if (exists[v]) {
handle(v);
}
}
}
template<typename L>
void Graph::parallelForNodes(L handle) const {
#pragma omp parallel for
for (node v = 0; v < z; ++v) {
if (exists[v]) {
handle(v);
}
}
}
template<typename C, typename L>
void Graph::forNodesWhile(C condition, L handle) const {
for (node v = 0; v < z; ++v) {
if (exists[v]) {
if (!condition()) {
break;
}
handle(v);
}
}
}
template<typename L>
void Graph::forNodesInRandomOrder(L handle) const {
std::vector<node> randVec = nodes();
std::shuffle(randVec.begin(), randVec.end(), Aux::Random::getURNG());
for (node v : randVec) {
handle(v);
}
}
template<typename L>
void Graph::balancedParallelForNodes(L handle) const {
#pragma omp parallel for schedule(guided) // TODO: define min block size (and test it!)
for (node v = 0; v < z; ++v) {
if (exists[v]) {
handle(v);
}
}
}
template<typename L>
void Graph::forNodePairs(L handle) const {
for (node u = 0; u < z; ++u) {
if (exists[u]) {
for (node v = u + 1; v < z; ++v) {
if (exists[v]) {
handle(u, v);
}
}
}
}
}
template<typename L>
void Graph::parallelForNodePairs(L handle) const {
#pragma omp parallel for schedule(guided)
for (node u = 0; u < z; ++u) {
if (exists[u]) {
for (node v = u + 1; v < z; ++v) {
if (exists[v]) {
handle(u, v);
}
}
}
}
}
/* EDGE ITERATORS */
/* HELPERS */
template<bool hasWeights> // implementation for weighted == true
inline edgeweight Graph::getOutEdgeWeight(node u, index i) const {
return outEdgeWeights[u][i];
}
template<> // implementation for weighted == false
inline edgeweight Graph::getOutEdgeWeight<false>(node, index) const {
return defaultEdgeWeight;
}
template<bool hasWeights> // implementation for weighted == true
inline edgeweight Graph::getInEdgeWeight(node u, index i) const {
return inEdgeWeights[u][i];
}
template<> // implementation for weighted == false
inline edgeweight Graph::getInEdgeWeight<false>(node, index) const {
return defaultEdgeWeight;
}
template<bool graphHasEdgeIds> // implementation for hasEdgeIds == true
inline edgeid Graph::getOutEdgeId(node u, index i) const {
return outEdgeIds[u][i];
}
template<> // implementation for hasEdgeIds == false
inline edgeid Graph::getOutEdgeId<false>(node, index) const {
return 0;
}
template<bool graphHasEdgeIds> // implementation for hasEdgeIds == true
inline edgeid Graph::getInEdgeId(node u, index i) const {
return inEdgeIds[u][i];
}
template<> // implementation for hasEdgeIds == false
inline edgeid Graph::getInEdgeId<false>(node, index) const {
return 0;
}
template<bool graphIsDirected> // implementation for graphIsDirected == true
inline bool Graph::useEdgeInIteration(node u, node v) const {
return v != none;
}
template<> // implementation for graphIsDirected == false
inline bool Graph::useEdgeInIteration<false>(node u, node v) const {
return u >= v;
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void Graph::forOutEdgesOfImpl(node u, L handle) const {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
if (useEdgeInIteration<graphIsDirected>(u, v)) {
edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void Graph::forInEdgesOfImpl(node u, L handle) const {
if (graphIsDirected) {
for (index i = 0; i < inEdges[u].size(); i++) {
node v = inEdges[u][i];
if (useEdgeInIteration<true>(u, v)) {
edgeLambda<L>(handle, u, v, getInEdgeWeight<hasWeights>(u, i), getInEdgeId<graphHasEdgeIds>(u, i));
}
}
} else {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
if (useEdgeInIteration<true>(u, v)) {
edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void Graph::forEdgeImpl(L handle) const {
for (node u = 0; u < z; ++u) {
forOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u, handle);
}
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline void Graph::parallelForEdgesImpl(L handle) const {
#pragma omp parallel for schedule(guided)
for (node u = 0; u < z; ++u) {
forOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u, handle);
}
}
template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>
inline double Graph::parallelSumForEdgesImpl(L handle) const {
double sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (node u = 0; u < z; ++u) {
for (index i = 0; i < outEdges[u].size(); ++i) {
node v = outEdges[u][i];
// undirected, do not iterate over edges twice
// {u, v} instead of (u, v); if v == none, u > v is not fulfilled
if (useEdgeInIteration<graphIsDirected>(u, v)) {
sum += edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i));
}
}
}
return sum;
}
template<typename L>
void Graph::forEdges(L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edgeIds
forEdgeImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edgeIds
forEdgeImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edgeIds
forEdgeImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edgeIds
forEdgeImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edgeIds
forEdgeImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edgeIds
forEdgeImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edgeIds
forEdgeImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edgeIds
forEdgeImpl<true, true, true, L>(handle);
break;
}
}
template<typename L>
void Graph::parallelForEdges(L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edgeIds
parallelForEdgesImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edgeIds
parallelForEdgesImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edgeIds
parallelForEdgesImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edgeIds
parallelForEdgesImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edgeIds
parallelForEdgesImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edgeIds
parallelForEdgesImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edgeIds
parallelForEdgesImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edgeIds
parallelForEdgesImpl<true, true, true, L>(handle);
break;
}
}
/* NEIGHBORHOOD ITERATORS */
template<typename L>
void Graph::forNeighborsOf(node u, L handle) const {
forEdgesOf(u, handle);
}
template<typename L>
void Graph::forEdgesOf(node u, L handle) const {
switch (weighted + 2 * edgesIndexed) {
case 0: //not weighted, no edge ids
forOutEdgesOfImpl<true, false, false, L>(u, handle);
break;
case 1: //weighted, no edge ids
forOutEdgesOfImpl<true, true, false, L>(u, handle);
break;
case 2: //not weighted, with edge ids
forOutEdgesOfImpl<true, false, true, L>(u, handle);
break;
case 3: //weighted, with edge ids
forOutEdgesOfImpl<true, true, true, L>(u, handle);
break;
}
}
template<typename L>
void Graph::forInNeighborsOf(node u, L handle) const {
forInEdgesOf(u, handle);
}
template<typename L>
void Graph::forInEdgesOf(node u, L handle) const {
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: //unweighted, undirected, no edge ids
forInEdgesOfImpl<false, false, false, L>(u, handle);
break;
case 1: //weighted, undirected, no edge ids
forInEdgesOfImpl<false, true, false, L>(u, handle);
break;
case 2: //unweighted, directed, no edge ids
forInEdgesOfImpl<true, false, false, L>(u, handle);
break;
case 3: //weighted, directed, no edge ids
forInEdgesOfImpl<true, true, false, L>(u, handle);
break;
case 4: //unweighted, undirected, with edge ids
forInEdgesOfImpl<false, false, true, L>(u, handle);
break;
case 5: //weighted, undirected, with edge ids
forInEdgesOfImpl<false, true, true, L>(u, handle);
break;
case 6: //unweighted, directed, with edge ids
forInEdgesOfImpl<true, false, true, L>(u, handle);
break;
case 7: //weighted, directed, with edge ids
forInEdgesOfImpl<true, true, true, L>(u, handle);
break;
}
}
/* REDUCTION ITERATORS */
template<typename L>
double Graph::parallelSumForNodes(L handle) const {
double sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (node v = 0; v < z; ++v) {
if (exists[v]) {
sum += handle(v);
}
}
return sum;
}
template<typename L>
double Graph::parallelSumForEdges(L handle) const {
double sum = 0.0;
switch (weighted + 2 * directed + 4 * edgesIndexed) {
case 0: // unweighted, undirected, no edge ids
sum = parallelSumForEdgesImpl<false, false, false, L>(handle);
break;
case 1: // weighted, undirected, no edge ids
sum = parallelSumForEdgesImpl<false, true, false, L>(handle);
break;
case 2: // unweighted, directed, no edge ids
sum = parallelSumForEdgesImpl<true, false, false, L>(handle);
break;
case 3: // weighted, directed, no edge ids
sum = parallelSumForEdgesImpl<true, true, false, L>(handle);
break;
case 4: // unweighted, undirected, with edge ids
sum = parallelSumForEdgesImpl<false, false, true, L>(handle);
break;
case 5: // weighted, undirected, with edge ids
sum = parallelSumForEdgesImpl<false, true, true, L>(handle);
break;
case 6: // unweighted, directed, with edge ids
sum = parallelSumForEdgesImpl<true, false, true, L>(handle);
break;
case 7: // weighted, directed, with edge ids
sum = parallelSumForEdgesImpl<true, true, true, L>(handle);
break;
}
return sum;
}
/* GRAPH SEARCHES */
template<typename L>
void Graph::BFSfrom(node r, L handle) const {
std::vector<node> startNodes(1, r);
BFSfrom(startNodes, handle);
}
template<typename L>
void Graph::BFSfrom(const std::vector<node> &startNodes, L handle) const {
std::vector<bool> marked(z);
std::queue<node> q, qNext;
count dist = 0;
// enqueue start nodes
for (node u : startNodes) {
q.push(u);
marked[u] = true;
}
do {
node u = q.front();
q.pop();
// apply function
callBFSHandle(handle, u, dist);
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
qNext.push(v);
marked[v] = true;
}
});
if (q.empty() && !qNext.empty()) {
q.swap(qNext);
++dist;
}
} while (!q.empty());
}
template<typename L>
void Graph::BFSEdgesFrom(node r, L handle) const {
std::vector<bool> marked(z);
std::queue<node> q;
q.push(r); // enqueue root
marked[r] = true;
do {
node u = q.front();
q.pop();
// apply function
forNeighborsOf(u, [&](node, node v, edgeweight w, edgeid eid) {
if (!marked[v]) {
handle(u, v, w, eid);
q.push(v);
marked[v] = true;
}
});
} while (!q.empty());
}
template<typename L>
void Graph::DFSfrom(node r, L handle) const {
std::vector<bool> marked(z);
std::stack<node> s;
s.push(r); // enqueue root
marked[r] = true;
do {
node u = s.top();
s.pop();
// apply function
handle(u);
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
s.push(v);
marked[v] = true;
}
});
} while (!s.empty());
}
template<typename L>
void Graph::DFSEdgesFrom(node r, L handle) const {
std::vector<bool> marked(z);
std::stack<node> s;
s.push(r); // enqueue root
marked[r] = true;
do {
node u = s.top();
s.pop();
// apply function
forNeighborsOf(u, [&](node v) {
if (!marked[v]) {
handle(u, v);
s.push(v);
marked[v] = true;
}
});
} while (!s.empty());
}
} /* namespace NetworKit */
#endif /* GRAPH_H_ */
|
integratePlanarOrbit.c | /*
Wrappers around the C integration code for planar Orbits
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <bovy_coords.h>
#include <bovy_symplecticode.h>
#include <bovy_rk.h>
#include <leung_dop853.h>
#include <integrateFullOrbit.h>
//Potentials
#include <galpy_potentials.h>
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
#ifndef ORBITS_CHUNKSIZE
#define ORBITS_CHUNKSIZE 1
#endif
//Macros to export functions in DLL on different OS
#if defined(_WIN32)
#define EXPORT __declspec(dllexport)
#elif defined(__GNUC__)
#define EXPORT __attribute__((visibility("default")))
#else
// Just do nothing?
#define EXPORT
#endif
/*
Function Declarations
*/
void evalPlanarRectForce(double, double *, double *,
int, struct potentialArg *);
void evalPlanarRectDeriv(double, double *, double *,
int, struct potentialArg *);
void evalPlanarRectDeriv_dxdv(double, double *, double *,
int, struct potentialArg *);
void initPlanarMovingObjectSplines(struct potentialArg *, double ** pot_args);
/*
Actual functions
*/
void parse_leapFuncArgs(int npot,struct potentialArg * potentialArgs,
int ** pot_type,
double ** pot_args){
int ii,jj;
int nr;
init_potentialArgs(npot,potentialArgs);
for (ii=0; ii < npot; ii++){
switch ( *(*pot_type)++ ) {
case 0: //LogarithmicHaloPotential, 4 arguments
potentialArgs->potentialEval= &LogarithmicHaloPotentialEval;
potentialArgs->planarRforce= &LogarithmicHaloPotentialPlanarRforce;
potentialArgs->planarphiforce= &LogarithmicHaloPotentialPlanarphiforce;
potentialArgs->planarR2deriv= &LogarithmicHaloPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &LogarithmicHaloPotentialPlanarphi2deriv;
potentialArgs->planarRphideriv= &LogarithmicHaloPotentialPlanarRphideriv;
potentialArgs->nargs= 4;
break;
case 1: //DehnenBarPotential, 6 arguments
potentialArgs->planarRforce= &DehnenBarPotentialPlanarRforce;
potentialArgs->planarphiforce= &DehnenBarPotentialPlanarphiforce;
potentialArgs->planarR2deriv= &DehnenBarPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &DehnenBarPotentialPlanarphi2deriv;
potentialArgs->planarRphideriv= &DehnenBarPotentialPlanarRphideriv;
potentialArgs->nargs= 6;
break;
case 2: //TransientLogSpiralPotential, 8 arguments
potentialArgs->planarRforce= &TransientLogSpiralPotentialRforce;
potentialArgs->planarphiforce= &TransientLogSpiralPotentialphiforce;
potentialArgs->nargs= 8;
break;
case 3: //SteadyLogSpiralPotential, 8 arguments
potentialArgs->planarRforce= &SteadyLogSpiralPotentialRforce;
potentialArgs->planarphiforce= &SteadyLogSpiralPotentialphiforce;
potentialArgs->nargs= 8;
break;
case 4: //EllipticalDiskPotential, 6 arguments
potentialArgs->planarRforce= &EllipticalDiskPotentialRforce;
potentialArgs->planarphiforce= &EllipticalDiskPotentialphiforce;
potentialArgs->planarR2deriv= &EllipticalDiskPotentialR2deriv;
potentialArgs->planarphi2deriv= &EllipticalDiskPotentialphi2deriv;
potentialArgs->planarRphideriv= &EllipticalDiskPotentialRphideriv;
potentialArgs->nargs= 6;
break;
case 5: //MiyamotoNagaiPotential, 3 arguments
potentialArgs->potentialEval= &MiyamotoNagaiPotentialEval;
potentialArgs->planarRforce= &MiyamotoNagaiPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &MiyamotoNagaiPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 3;
break;
case 6: //LopsidedDiskPotential, 4 arguments
potentialArgs->planarRforce= &LopsidedDiskPotentialRforce;
potentialArgs->planarphiforce= &LopsidedDiskPotentialphiforce;
potentialArgs->planarR2deriv= &LopsidedDiskPotentialR2deriv;
potentialArgs->planarphi2deriv= &LopsidedDiskPotentialphi2deriv;
potentialArgs->planarRphideriv= &LopsidedDiskPotentialRphideriv;
potentialArgs->nargs= 4;
break;
case 7: //PowerSphericalPotential, 2 arguments
potentialArgs->potentialEval= &PowerSphericalPotentialEval;
potentialArgs->planarRforce= &PowerSphericalPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &PowerSphericalPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 2;
break;
case 8: //HernquistPotential, 2 arguments
potentialArgs->potentialEval= &HernquistPotentialEval;
potentialArgs->planarRforce= &HernquistPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &HernquistPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 2;
break;
case 9: //NFWPotential, 2 arguments
potentialArgs->potentialEval= &NFWPotentialEval;
potentialArgs->planarRforce= &NFWPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &NFWPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 2;
break;
case 10: //JaffePotential, 2 arguments
potentialArgs->potentialEval= &JaffePotentialEval;
potentialArgs->planarRforce= &JaffePotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &JaffePotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 2;
break;
case 11: //DoubleExponentialDiskPotential, XX arguments
potentialArgs->potentialEval= &DoubleExponentialDiskPotentialEval;
potentialArgs->planarRforce= &DoubleExponentialDiskPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
//potentialArgs->planarR2deriv= &DoubleExponentialDiskPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
//Look at pot_args to figure out the number of arguments
potentialArgs->nargs= (int) (8 + 2 * *(*pot_args+5) + 4 * ( *(*pot_args+4) + 1 ));
break;
case 12: //FlattenedPowerPotential, 4 arguments
potentialArgs->potentialEval= &FlattenedPowerPotentialEval;
potentialArgs->planarRforce= &FlattenedPowerPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &FlattenedPowerPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 3;
break;
case 14: //IsochronePotential, 2 arguments
potentialArgs->potentialEval= &IsochronePotentialEval;
potentialArgs->planarRforce= &IsochronePotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &IsochronePotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 2;
break;
case 15: //PowerSphericalPotentialwCutoff, 3 arguments
potentialArgs->potentialEval= &PowerSphericalPotentialwCutoffEval;
potentialArgs->planarRforce= &PowerSphericalPotentialwCutoffPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &PowerSphericalPotentialwCutoffPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 3;
break;
case 16: //KuzminKutuzovStaeckelPotential, 3 arguments
potentialArgs->potentialEval= &KuzminKutuzovStaeckelPotentialEval;
potentialArgs->planarRforce= &KuzminKutuzovStaeckelPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &KuzminKutuzovStaeckelPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 3;
break;
case 17: //PlummerPotential, 2 arguments
potentialArgs->potentialEval= &PlummerPotentialEval;
potentialArgs->planarRforce= &PlummerPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &PlummerPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 2;
break;
case 18: //PseudoIsothermalPotential, 2 arguments
potentialArgs->potentialEval= &PseudoIsothermalPotentialEval;
potentialArgs->planarRforce= &PseudoIsothermalPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &PseudoIsothermalPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 2;
break;
case 19: //KuzminDiskPotential, 2 arguments
potentialArgs->potentialEval= &KuzminDiskPotentialEval;
potentialArgs->planarRforce= &KuzminDiskPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &KuzminDiskPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 2;
break;
case 20: //BurkertPotential, 2 arguments
potentialArgs->potentialEval= &BurkertPotentialEval;
potentialArgs->planarRforce= &BurkertPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &BurkertPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 2;
break;
case 21: // TriaxialHernquistPotential, lots of arguments
potentialArgs->planarRforce = &EllipsoidalPotentialPlanarRforce;
potentialArgs->planarphiforce = &EllipsoidalPotentialPlanarphiforce;
//potentialArgs->planarR2deriv = &EllipsoidalPotentialPlanarR2deriv;
//potentialArgs->planarphi2deriv = &EllipsoidalPotentialPlanarphi2deriv;
//potentialArgs->planarRphideriv = &EllipsoidalPotentialPlanarRphideriv;
// Also assign functions specific to EllipsoidalPotential
potentialArgs->psi= &TriaxialHernquistPotentialpsi;
potentialArgs->mdens= &TriaxialHernquistPotentialmdens;
potentialArgs->mdensDeriv= &TriaxialHernquistPotentialmdensDeriv;
potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args
+ (int) (*(*pot_args+7) + 20)));
break;
case 22: // TriaxialNFWPotential, lots of arguments
potentialArgs->planarRforce = &EllipsoidalPotentialPlanarRforce;
potentialArgs->planarphiforce = &EllipsoidalPotentialPlanarphiforce;
//potentialArgs->planarR2deriv = &EllipsoidalPotentialPlanarR2deriv;
//potentialArgs->planarphi2deriv = &EllipsoidalPotentialPlanarphi2deriv;
//potentialArgs->planarRphideriv = &EllipsoidalPotentialPlanarRphideriv;
// Also assign functions specific to EllipsoidalPotential
potentialArgs->psi= &TriaxialNFWPotentialpsi;
potentialArgs->mdens= &TriaxialNFWPotentialmdens;
potentialArgs->mdensDeriv= &TriaxialNFWPotentialmdensDeriv;
potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args
+ (int) (*(*pot_args+7) + 20)));
break;
case 23: // TriaxialJaffePotential, lots of arguments
potentialArgs->planarRforce = &EllipsoidalPotentialPlanarRforce;
potentialArgs->planarphiforce = &EllipsoidalPotentialPlanarphiforce;
//potentialArgs->planarR2deriv = &EllipsoidalPotentialPlanarR2deriv;
//potentialArgs->planarphi2deriv = &EllipsoidalPotentialPlanarphi2deriv;
//potentialArgs->planarRphideriv = &EllipsoidalPotentialPlanarRphideriv;
// Also assign functions specific to EllipsoidalPotential
potentialArgs->psi= &TriaxialJaffePotentialpsi;
potentialArgs->mdens= &TriaxialJaffePotentialmdens;
potentialArgs->mdensDeriv= &TriaxialJaffePotentialmdensDeriv;
potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args
+ (int) (*(*pot_args+7) + 20)));
break;
case 24: //SCFPotential, many arguments
potentialArgs->potentialEval= &SCFPotentialEval;
potentialArgs->planarRforce= &SCFPotentialPlanarRforce;
potentialArgs->planarphiforce= &SCFPotentialPlanarphiforce;
potentialArgs->planarR2deriv= &SCFPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &SCFPotentialPlanarphi2deriv;
potentialArgs->planarRphideriv= &SCFPotentialPlanarRphideriv;
potentialArgs->nargs= (int) (5 + (1 + *(*pot_args + 1)) * *(*pot_args+2) * *(*pot_args+3)* *(*pot_args+4) + 7);
break;
case 25: //SoftenedNeedleBarPotential, 13 arguments
potentialArgs->potentialEval= &SoftenedNeedleBarPotentialEval;
potentialArgs->planarRforce= &SoftenedNeedleBarPotentialPlanarRforce;
potentialArgs->planarphiforce= &SoftenedNeedleBarPotentialPlanarphiforce;
potentialArgs->nargs= (int) 13;
break;
case 26: //DiskSCFPotential, nsigma+3 arguments
potentialArgs->potentialEval= &DiskSCFPotentialEval;
potentialArgs->planarRforce= &DiskSCFPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->nargs= (int) **pot_args + 3;
break;
case 27: // SpiralArmsPotential, 10 arguments + array of Cs
potentialArgs->planarRforce = &SpiralArmsPotentialPlanarRforce;
potentialArgs->planarphiforce = &SpiralArmsPotentialPlanarphiforce;
potentialArgs->planarR2deriv = &SpiralArmsPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv = &SpiralArmsPotentialPlanarphi2deriv;
potentialArgs->planarRphideriv = &SpiralArmsPotentialPlanarRphideriv;
potentialArgs->nargs = (int) 10 + **pot_args;
break;
case 28: //CosmphiDiskPotential, 9 arguments
potentialArgs->planarRforce= &CosmphiDiskPotentialRforce;
potentialArgs->planarphiforce= &CosmphiDiskPotentialphiforce;
potentialArgs->planarR2deriv= &CosmphiDiskPotentialR2deriv;
potentialArgs->planarphi2deriv= &CosmphiDiskPotentialphi2deriv;
potentialArgs->planarRphideriv= &CosmphiDiskPotentialRphideriv;
potentialArgs->nargs= 9;
break;
case 29: //HenonHeilesPotential, 1 argument
potentialArgs->planarRforce= &HenonHeilesPotentialRforce;
potentialArgs->planarphiforce= &HenonHeilesPotentialphiforce;
potentialArgs->planarR2deriv= &HenonHeilesPotentialR2deriv;
potentialArgs->planarphi2deriv= &HenonHeilesPotentialphi2deriv;
potentialArgs->planarRphideriv= &HenonHeilesPotentialRphideriv;
potentialArgs->nargs= 1;
break;
case 30: // PerfectEllipsoidPotential, lots of arguments
potentialArgs->planarRforce = &EllipsoidalPotentialPlanarRforce;
potentialArgs->planarphiforce = &EllipsoidalPotentialPlanarphiforce;
//potentialArgs->planarR2deriv = &EllipsoidalPotentialPlanarR2deriv;
//potentialArgs->planarphi2deriv = &EllipsoidalPotentialPlanarphi2deriv;
//potentialArgs->planarRphideriv = &EllipsoidalPotentialPlanarRphideriv;
// Also assign functions specific to EllipsoidalPotential
potentialArgs->psi= &PerfectEllipsoidPotentialpsi;
potentialArgs->mdens= &PerfectEllipsoidPotentialmdens;
potentialArgs->mdensDeriv= &PerfectEllipsoidPotentialmdensDeriv;
potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args
+ (int) (*(*pot_args+7) + 20)));
break;
// 31: KGPotential
// 32: IsothermalDiskPotential
case 33: //DehnenCoreSphericalpotential
potentialArgs->potentialEval= &DehnenCoreSphericalPotentialEval;
potentialArgs->planarRforce= &DehnenCoreSphericalPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &DehnenCoreSphericalPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 2;
break;
case 34: //DehnenSphericalpotential
potentialArgs->potentialEval= &DehnenSphericalPotentialEval;
potentialArgs->planarRforce= &DehnenSphericalPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &DehnenSphericalPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 3;
break;
case 35: //HomogeneousSpherePotential, 3 arguments
potentialArgs->potentialEval= &HomogeneousSpherePotentialEval;
potentialArgs->planarRforce= &HomogeneousSpherePotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &HomogeneousSpherePotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
potentialArgs->nargs= 3;
break;
case 36: //interpSphericalPotential, XX arguments
// Set up 1 spline in potentialArgs
potentialArgs->nspline1d= 1;
potentialArgs->spline1d= (gsl_spline **) \
malloc ( potentialArgs->nspline1d*sizeof ( gsl_spline *) );
potentialArgs->acc1d= (gsl_interp_accel **) \
malloc ( potentialArgs->nspline1d * sizeof ( gsl_interp_accel * ) );
// allocate accelerator
*potentialArgs->acc1d= gsl_interp_accel_alloc();
// Set up interpolater
nr= (int) **pot_args;
*potentialArgs->spline1d= gsl_spline_alloc(gsl_interp_cspline,nr);
gsl_spline_init(*potentialArgs->spline1d,*pot_args+1,*pot_args+1+nr,nr);
*pot_args+= 2*nr+1;
// Bind forces
potentialArgs->potentialEval= &SphericalPotentialEval;
potentialArgs->planarRforce = &SphericalPotentialPlanarRforce;
potentialArgs->planarphiforce= &ZeroPlanarForce;
potentialArgs->planarR2deriv= &SphericalPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &ZeroPlanarForce;
potentialArgs->planarRphideriv= &ZeroPlanarForce;
// Also assign functions specific to SphericalPotential
potentialArgs->revaluate= &interpSphericalPotentialrevaluate;
potentialArgs->rforce= &interpSphericalPotentialrforce;
potentialArgs->r2deriv= &interpSphericalPotentialr2deriv;
potentialArgs->nargs = (int) 6;
potentialArgs->requiresVelocity= false;
break;
case 37: // TriaxialGaussianPotential, lots of arguments
potentialArgs->planarRforce = &EllipsoidalPotentialPlanarRforce;
potentialArgs->planarphiforce = &EllipsoidalPotentialPlanarphiforce;
//potentialArgs->planarR2deriv = &EllipsoidalPotentialPlanarR2deriv;
//potentialArgs->planarphi2deriv = &EllipsoidalPotentialPlanarphi2deriv;
//potentialArgs->planarRphideriv = &EllipsoidalPotentialPlanarRphideriv;
// Also assign functions specific to EllipsoidalPotential
potentialArgs->psi= &TriaxialGaussianPotentialpsi;
potentialArgs->mdens= &TriaxialGaussianPotentialmdens;
potentialArgs->mdensDeriv= &TriaxialGaussianPotentialmdensDeriv;
potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args
+ (int) (*(*pot_args+7) + 20)));
break;
//////////////////////////////// WRAPPERS /////////////////////////////////////
case -1: //DehnenSmoothWrapperPotential
potentialArgs->potentialEval= &DehnenSmoothWrapperPotentialEval;
potentialArgs->planarRforce= &DehnenSmoothWrapperPotentialPlanarRforce;
potentialArgs->planarphiforce= &DehnenSmoothWrapperPotentialPlanarphiforce;
potentialArgs->planarR2deriv= &DehnenSmoothWrapperPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &DehnenSmoothWrapperPotentialPlanarphi2deriv;
potentialArgs->planarRphideriv= &DehnenSmoothWrapperPotentialPlanarRphideriv;
potentialArgs->nargs= (int) 4;
break;
case -2: //SolidBodyRotationWrapperPotential
potentialArgs->planarRforce= &SolidBodyRotationWrapperPotentialPlanarRforce;
potentialArgs->planarphiforce= &SolidBodyRotationWrapperPotentialPlanarphiforce;
potentialArgs->planarR2deriv= &SolidBodyRotationWrapperPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &SolidBodyRotationWrapperPotentialPlanarphi2deriv;
potentialArgs->planarRphideriv= &SolidBodyRotationWrapperPotentialPlanarRphideriv;
potentialArgs->nargs= (int) 3;
break;
case -4: //CorotatingRotationWrapperPotential
potentialArgs->planarRforce= &CorotatingRotationWrapperPotentialPlanarRforce;
potentialArgs->planarphiforce= &CorotatingRotationWrapperPotentialPlanarphiforce;
potentialArgs->planarR2deriv= &CorotatingRotationWrapperPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &CorotatingRotationWrapperPotentialPlanarphi2deriv;
potentialArgs->planarRphideriv= &CorotatingRotationWrapperPotentialPlanarRphideriv;
potentialArgs->nargs= (int) 5;
break;
case -5: //GaussianAmplitudeWrapperPotential
potentialArgs->planarRforce= &GaussianAmplitudeWrapperPotentialPlanarRforce;
potentialArgs->planarphiforce= &GaussianAmplitudeWrapperPotentialPlanarphiforce;
potentialArgs->planarR2deriv= &GaussianAmplitudeWrapperPotentialPlanarR2deriv;
potentialArgs->planarphi2deriv= &GaussianAmplitudeWrapperPotentialPlanarphi2deriv;
potentialArgs->planarRphideriv= &GaussianAmplitudeWrapperPotentialPlanarRphideriv;
potentialArgs->nargs= (int) 3;
break;
case -6: //MovingObjectPotential
potentialArgs->planarRforce= &MovingObjectPotentialPlanarRforce;
potentialArgs->planarphiforce= &MovingObjectPotentialPlanarphiforce;
potentialArgs->nargs= (int) 3;
break;
}
int setupSplines = *(*pot_type-1) == -6 ? 1 : 0;
if ( *(*pot_type-1) < 0) { // Parse wrapped potential for wrappers
potentialArgs->nwrapped= (int) *(*pot_args)++;
potentialArgs->wrappedPotentialArg= \
(struct potentialArg *) malloc ( potentialArgs->nwrapped \
* sizeof (struct potentialArg) );
parse_leapFuncArgs(potentialArgs->nwrapped,
potentialArgs->wrappedPotentialArg,
pot_type,pot_args);
}
if (setupSplines) initPlanarMovingObjectSplines(potentialArgs, pot_args);
potentialArgs->args= (double *) malloc( potentialArgs->nargs * sizeof(double));
for (jj=0; jj < potentialArgs->nargs; jj++){
*(potentialArgs->args)= *(*pot_args)++;
potentialArgs->args++;
}
potentialArgs->args-= potentialArgs->nargs;
potentialArgs++;
}
potentialArgs-= npot;
}
EXPORT void integratePlanarOrbit(int nobj,
double *yo,
int nt,
double *t,
int npot,
int * pot_type,
double * pot_args,
double dt,
double rtol,
double atol,
double *result,
int * err,
int odeint_type){
//Set up the forces, first count
int ii,jj;
int dim;
int max_threads;
int * thread_pot_type;
double * thread_pot_args;
max_threads= ( nobj < omp_get_max_threads() ) ? nobj : omp_get_max_threads();
// Because potentialArgs may cache, safest to have one / thread
struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( max_threads * npot * sizeof (struct potentialArg) );
#pragma omp parallel for schedule(static,1) private(ii,thread_pot_type,thread_pot_args) num_threads(max_threads)
for (ii=0; ii < max_threads; ii++) {
thread_pot_type= pot_type; // need to make thread-private pointers, bc
thread_pot_args= pot_args; // these pointers are changed in parse_...
parse_leapFuncArgs(npot,potentialArgs+ii*npot,
&thread_pot_type,&thread_pot_args);
}
//Integrate
void (*odeint_func)(void (*func)(double, double *, double *,
int, struct potentialArg *),
int,
double *,
int, double, double *,
int, struct potentialArg *,
double, double,
double *,int *);
void (*odeint_deriv_func)(double, double *, double *,
int,struct potentialArg *);
switch ( odeint_type ) {
case 0: //leapfrog
odeint_func= &leapfrog;
odeint_deriv_func= &evalPlanarRectForce;
dim= 2;
break;
case 1: //RK4
odeint_func= &bovy_rk4;
odeint_deriv_func= &evalPlanarRectDeriv;
dim= 4;
break;
case 2: //RK6
odeint_func= &bovy_rk6;
odeint_deriv_func= &evalPlanarRectDeriv;
dim= 4;
break;
case 3: //symplec4
odeint_func= &symplec4;
odeint_deriv_func= &evalPlanarRectForce;
dim= 2;
break;
case 4: //symplec6
odeint_func= &symplec6;
odeint_deriv_func= &evalPlanarRectForce;
dim= 2;
break;
case 5: //DOPR54
odeint_func= &bovy_dopr54;
odeint_deriv_func= &evalPlanarRectDeriv;
dim= 4;
break;
case 6: //DOP853
odeint_func= &dop853;
odeint_deriv_func= &evalPlanarRectDeriv;
dim= 4;
break;
}
#pragma omp parallel for schedule(dynamic,ORBITS_CHUNKSIZE) private(ii,jj) num_threads(max_threads)
for (ii=0; ii < nobj; ii++) {
polar_to_rect_galpy(yo+4*ii);
odeint_func(odeint_deriv_func,dim,yo+4*ii,nt,dt,t,
npot,potentialArgs+omp_get_thread_num()*npot,rtol,atol,
result+4*nt*ii,err+ii);
for (jj= 0; jj < nt; jj++)
rect_to_polar_galpy(result+4*jj+4*nt*ii);
}
//Free allocated memory
#pragma omp parallel for schedule(static,1) private(ii) num_threads(max_threads)
for (ii=0; ii < max_threads; ii++)
free_potentialArgs(npot,potentialArgs+ii*npot);
free(potentialArgs);
//Done!
}
EXPORT void integratePlanarOrbit_dxdv(double *yo,
int nt,
double *t,
int npot,
int * pot_type,
double * pot_args,
double dt,
double rtol,
double atol,
double *result,
int * err,
int odeint_type){
//Set up the forces, first count
int dim;
struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) );
parse_leapFuncArgs(npot,potentialArgs,&pot_type,&pot_args);
//Integrate
void (*odeint_func)(void (*func)(double, double *, double *,
int, struct potentialArg *),
int,
double *,
int, double, double *,
int, struct potentialArg *,
double, double,
double *,int *);
void (*odeint_deriv_func)(double, double *, double *,
int,struct potentialArg *);
switch ( odeint_type ) {
case 1: //RK4
odeint_func= &bovy_rk4;
odeint_deriv_func= &evalPlanarRectDeriv_dxdv;
dim= 8;
break;
case 2: //RK6
odeint_func= &bovy_rk6;
odeint_deriv_func= &evalPlanarRectDeriv_dxdv;
dim= 8;
break;
case 5: //DOPR54
odeint_func= &bovy_dopr54;
odeint_deriv_func= &evalPlanarRectDeriv_dxdv;
dim= 8;
break;
case 6: //DOP853
odeint_func= &dop853;
odeint_deriv_func= &evalPlanarRectDeriv_dxdv;
dim= 8;
break;
}
odeint_func(odeint_deriv_func,dim,yo,nt,dt,t,npot,potentialArgs,rtol,atol,
result,err);
//Free allocated memory
free_potentialArgs(npot,potentialArgs);
free(potentialArgs);
//Done!
}
void evalPlanarRectForce(double t, double *q, double *a,
int nargs, struct potentialArg * potentialArgs){
double sinphi, cosphi, x, y, phi,R,Rforce,phiforce;
//q is rectangular so calculate R and phi
x= *q;
y= *(q+1);
R= sqrt(x*x+y*y);
phi= acos(x/R);
sinphi= y/R;
cosphi= x/R;
if ( y < 0. ) phi= 2.*M_PI-phi;
//Calculate the forces
Rforce= calcPlanarRforce(R,phi,t,nargs,potentialArgs);
phiforce= calcPlanarphiforce(R,phi,t,nargs,potentialArgs);
*a++= cosphi*Rforce-1./R*sinphi*phiforce;
*a--= sinphi*Rforce+1./R*cosphi*phiforce;
}
void evalPlanarRectDeriv(double t, double *q, double *a,
int nargs, struct potentialArg * potentialArgs){
double sinphi, cosphi, x, y, phi,R,Rforce,phiforce;
//first two derivatives are just the velocities
*a++= *(q+2);
*a++= *(q+3);
//Rest is force
//q is rectangular so calculate R and phi
x= *q;
y= *(q+1);
R= sqrt(x*x+y*y);
phi= acos(x/R);
sinphi= y/R;
cosphi= x/R;
if ( y < 0. ) phi= 2.*M_PI-phi;
//Calculate the forces
Rforce= calcPlanarRforce(R,phi,t,nargs,potentialArgs);
phiforce= calcPlanarphiforce(R,phi,t,nargs,potentialArgs);
*a++= cosphi*Rforce-1./R*sinphi*phiforce;
*a= sinphi*Rforce+1./R*cosphi*phiforce;
}
void evalPlanarRectDeriv_dxdv(double t, double *q, double *a,
int nargs, struct potentialArg * potentialArgs){
double sinphi, cosphi, x, y, phi,R,Rforce,phiforce;
double R2deriv, phi2deriv, Rphideriv, dFxdx, dFxdy, dFydx, dFydy;
//first two derivatives are just the velocities
*a++= *(q+2);
*a++= *(q+3);
//Rest is force
//q is rectangular so calculate R and phi
x= *q;
y= *(q+1);
R= sqrt(x*x+y*y);
phi= acos(x/R);
sinphi= y/R;
cosphi= x/R;
if ( y < 0. ) phi= 2.*M_PI-phi;
//Calculate the forces
Rforce= calcPlanarRforce(R,phi,t,nargs,potentialArgs);
phiforce= calcPlanarphiforce(R,phi,t,nargs,potentialArgs);
*a++= cosphi*Rforce-1./R*sinphi*phiforce;
*a++= sinphi*Rforce+1./R*cosphi*phiforce;
//dx derivatives are just dv
*a++= *(q+6);
*a++= *(q+7);
//for the dv derivatives we need also R2deriv, phi2deriv, and Rphideriv
R2deriv= calcPlanarR2deriv(R,phi,t,nargs,potentialArgs);
phi2deriv= calcPlanarphi2deriv(R,phi,t,nargs,potentialArgs);
Rphideriv= calcPlanarRphideriv(R,phi,t,nargs,potentialArgs);
//..and dFxdx, dFxdy, dFydx, dFydy
dFxdx= -cosphi*cosphi*R2deriv
+2.*cosphi*sinphi/R/R*phiforce
+sinphi*sinphi/R*Rforce
+2.*sinphi*cosphi/R*Rphideriv
-sinphi*sinphi/R/R*phi2deriv;
dFxdy= -sinphi*cosphi*R2deriv
+(sinphi*sinphi-cosphi*cosphi)/R/R*phiforce
-cosphi*sinphi/R*Rforce
-(cosphi*cosphi-sinphi*sinphi)/R*Rphideriv
+cosphi*sinphi/R/R*phi2deriv;
dFydx= -cosphi*sinphi*R2deriv
+(sinphi*sinphi-cosphi*cosphi)/R/R*phiforce
+(sinphi*sinphi-cosphi*cosphi)/R*Rphideriv
-sinphi*cosphi/R*Rforce
+sinphi*cosphi/R/R*phi2deriv;
dFydy= -sinphi*sinphi*R2deriv
-2.*sinphi*cosphi/R/R*phiforce
-2.*sinphi*cosphi/R*Rphideriv
+cosphi*cosphi/R*Rforce
-cosphi*cosphi/R/R*phi2deriv;
*a++= dFxdx * *(q+4) + dFxdy * *(q+5);
*a= dFydx * *(q+4) + dFydy * *(q+5);
}
void initPlanarMovingObjectSplines(struct potentialArg * potentialArgs, double ** pot_args){
gsl_interp_accel *x_accel_ptr = gsl_interp_accel_alloc();
gsl_interp_accel *y_accel_ptr = gsl_interp_accel_alloc();
int nPts = (int) **pot_args;
gsl_spline *x_spline = gsl_spline_alloc(gsl_interp_cspline, nPts);
gsl_spline *y_spline = gsl_spline_alloc(gsl_interp_cspline, nPts);
double * t_arr = *pot_args+1;
double * x_arr = t_arr+1*nPts;
double * y_arr = t_arr+2*nPts;
double * t= (double *) malloc ( nPts * sizeof (double) );
double tf = *(t_arr+3*nPts+2);
double to = *(t_arr+3*nPts+1);
int ii;
for (ii=0; ii < nPts; ii++)
*(t+ii) = (t_arr[ii]-to)/(tf-to);
gsl_spline_init(x_spline, t, x_arr, nPts);
gsl_spline_init(y_spline, t, y_arr, nPts);
potentialArgs->nspline1d= 2;
potentialArgs->spline1d= (gsl_spline **) malloc ( 2*sizeof ( gsl_spline *) );
potentialArgs->acc1d= (gsl_interp_accel **) \
malloc ( 2 * sizeof ( gsl_interp_accel * ) );
*potentialArgs->spline1d = x_spline;
*potentialArgs->acc1d = x_accel_ptr;
*(potentialArgs->spline1d+1)= y_spline;
*(potentialArgs->acc1d+1)= y_accel_ptr;
*pot_args = *pot_args+ (int) (1+3*nPts);
free(t);
}
|
kvstore_dist_server.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file mxnet_node.h
* \brief implement mxnet nodes
*/
#ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#include <mxnet/c_api.h>
#include <mxnet/kvstore.h>
#include <ps/ps.h>
#include <queue>
#include <string>
#include <mutex>
#include <condition_variable>
#include <memory>
#include <functional>
#include <future>
#include <vector>
#include "../profiler/profiler.h"
#include "../operator/tensor/elemwise_binary_op-inl.h"
#include "../operator/tensor/init_op.h"
namespace mxnet {
namespace kvstore {
// maintain same order in frontend.
enum class CommandType {
kController, kSetMultiPrecision, kStopServer, kSyncMode,
kSetGradientCompression, kSetProfilerParams
};
enum class RequestType {
kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull
};
struct DataHandleType {
RequestType requestType;
int dtype;
};
/*!
* Uses Cantor pairing function to generate a unique number given two numbers.
* This number can also be inverted to find the unique pair whose Cantor value is this number.
* Ref: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function
* \param requestType RequestType
* \param dtype integer
* \return Cantor value of arguments
*/
static int GetCommandType(RequestType requestType, int d) {
int m = static_cast<int>(requestType);
return (((m + d) * (m + d + 1)) / 2) + d;
}
/*!
* Unpairs Cantor value and finds the two integers used to pair.
* Then returns DataHandleType object with those numbers.
* \param cmd DataHandleCommand generated by GetCommandType function
* \return DataHandleType
*/
static DataHandleType DepairDataHandleType(int cmd) {
int w = std::floor((std::sqrt(8 * cmd + 1) - 1)/2);
int t = ((w * w) + w) / 2;
int y = cmd - t;
int x = w - y;
CHECK_GE(x, 0);
CHECK_GE(y, 0);
DataHandleType type;
type.requestType = static_cast<RequestType>(x);
type.dtype = y;
return type;
}
/**
* \brief executor runs a function using the thread called \ref Start
*/
class Executor {
public:
/**
* \brief start the executor
*/
void Start() {
std::unique_lock<std::mutex> lk(mu_);
while (true) {
cond_.wait(lk, [this]{return !queue_.empty();});
Block blk = std::move(queue_.front());
queue_.pop();
lk.unlock();
if (blk.f) {
blk.f();
blk.p->set_value();
} else {
blk.p->set_value(); break;
}
lk.lock();
}
}
/**
* \brief function
*/
typedef std::function<void()> Func;
/**
* \brief let the thread called \ref Start to exec a function. threadsafe
*/
void Exec(const Func& func) {
Block blk(func);
auto fut = blk.p->get_future();
{
std::lock_guard<std::mutex> lk(mu_);
queue_.push(std::move(blk));
cond_.notify_one();
}
fut.wait();
}
/**
* \brief stop the thread, threadsafe
*/
void Stop() {
Exec(Func());
}
private:
struct Block {
explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { }
Func f;
std::shared_ptr<std::promise<void>> p;
};
std::queue<Block> queue_;
std::mutex mu_;
std::condition_variable cond_;
};
class KVStoreDistServer {
public:
KVStoreDistServer() {
using namespace std::placeholders;
ps_server_ = new ps::KVServer<char>(0);
static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle(
std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2));
ps_server_->set_request_handle(
std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3));
sync_mode_ = false;
gradient_compression_ = std::make_shared<GradientCompression>();
log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false);
}
~KVStoreDistServer() {
profiler::Profiler::Get()->SetState(profiler::Profiler::ProfilerState(0));
delete ps_server_;
}
void set_controller(const KVStore::Controller& controller) {
CHECK(controller);
controller_ = controller;
}
void set_updater(const KVStore::Updater& updater) {
CHECK(updater);
updater_ = updater;
}
/**
* \brief blocked until received the command \a kSyncMode
*/
void Run() {
exec_.Start();
}
private:
struct UpdateBuf {
std::vector<ps::KVMeta> request;
NDArray merged;
// temp_array is used to cast received values as float32 for computation if required
NDArray temp_array;
};
void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) {
CommandType recved_type = static_cast<CommandType>(recved.head);
switch (recved_type) {
case CommandType::kStopServer:
exec_.Stop();
break;
case CommandType::kSyncMode:
sync_mode_ = true;
break;
case CommandType::kSetGradientCompression:
gradient_compression_->DecodeParams(recved.body);
break;
case CommandType::kSetProfilerParams:
// last char is the type of profiler command
ProcessServerProfilerCommands(static_cast<KVStoreServerProfilerCommand>
(recved.body.back() - '0'),
recved.body);
break;
case CommandType::kSetMultiPrecision:
// uses value 1 for message id from frontend
if (!multi_precision_) {
multi_precision_ = true;
CreateMultiPrecisionCopies();
}
break;
case CommandType::kController:
// this uses value 0 for message id from frontend
// let the main thread to execute ctrl, which is necessary for python
exec_.Exec([this, recved]() {
CHECK(controller_);
controller_(recved.head, recved.body);
});
break;
}
app->Response(recved);
}
/*
* For keys already initialized, if necessary create stored_realt.
* This will only be used if by some wrong usage of kvstore,
* some keys are initialized before optimizer is set.
*/
void CreateMultiPrecisionCopies() {
for (auto const &stored_entry : store_) {
const int key = stored_entry.first;
const NDArray &stored = stored_entry.second;
if (stored.dtype() != mshadow::kFloat32) {
auto &stored_realt = store_realt_[key];
if (stored.storage_type() == kRowSparseStorage) {
stored_realt = NDArray(kRowSparseStorage, stored.shape(), stored.ctx(),
true, mshadow::kFloat32);
} else {
stored_realt = NDArray(stored.shape(), stored.ctx(), false, mshadow::kFloat32);
}
auto &update = update_buf_[key];
if (!update.merged.is_none()) {
if (update.merged.storage_type() == kRowSparseStorage) {
update.merged = NDArray(kRowSparseStorage, update.merged.shape(), update.merged.ctx(),
true, mshadow::kFloat32);
} else {
update.merged = NDArray(update.merged.shape(), update.merged.ctx(), false,
mshadow::kFloat32);
}
}
CHECK(update.request.size() == 0)
<< ps::MyRank() << "Multiprecision mode can not be set while pushes are underway."
<< "Please set optimizer before pushing keys." << key << " " << update.request.size();
CopyFromTo(stored, stored_realt);
}
}
for (auto const &stored_realt_entry : store_realt_) {
stored_realt_entry.second.WaitToRead();
}
}
void ProcessServerProfilerCommands(KVStoreServerProfilerCommand type, const std::string& body) {
switch (type) {
case KVStoreServerProfilerCommand::kSetConfig:
SetProfilerConfig(body.substr(0, body.size() - 1));
break;
case KVStoreServerProfilerCommand::kState:
MXSetProfilerState(static_cast<int>(body.front() - '0'));
break;
case KVStoreServerProfilerCommand::kPause:
MXProfilePause(static_cast<int>(body.front() - '0'));
break;
case KVStoreServerProfilerCommand::kDump:
MXDumpProfile(static_cast<int>(body.front() - '0'));
break;
}
}
void SetProfilerConfig(std::string params_str) {
std::vector<std::string> elems;
mxnet::kvstore::split(params_str, ',', std::back_inserter(elems));
std::vector<const char*> ckeys;
std::vector<const char*> cvals;
ckeys.reserve(elems.size());
cvals.reserve(elems.size());
for (size_t i=0; i < elems.size(); i++) {
std::vector<std::string> parts;
mxnet::kvstore::split(elems[i], ':', std::back_inserter(parts));
CHECK_EQ(parts.size(), 2) << "Improper profiler config passed from worker";
CHECK(!parts[0].empty()) << "ProfilerConfig parameter is empty";
CHECK(!parts[1].empty()) << "ProfilerConfig value is empty for parameter "<< parts[0];
if (parts[0] == "filename") {
parts[1] = "rank" + std::to_string(ps::MyRank()) + "_" + parts[1];
}
char* ckey = new char[parts[0].length() + 1];
std::snprintf(ckey, parts[0].length() + 1, "%s", parts[0].c_str());
ckeys.push_back(ckey);
char* cval = new char[parts[1].length() + 1];
std::snprintf(cval, parts[1].length() + 1, "%s", parts[1].c_str());
cvals.push_back(cval);
}
MXSetProfilerConfig(elems.size(), &ckeys[0], &cvals[0]);
for (size_t i=0; i < ckeys.size(); i++) {
delete[] ckeys[i];
delete[] cvals[i];
}
}
void DataHandleEx(const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
DataHandleType type = DepairDataHandleType(req_meta.cmd);
switch (type.requestType) {
case RequestType::kRowSparsePushPull:
DataHandleRowSparse(type, req_meta, req_data, server);
break;
case RequestType::kCompressedPushPull:
DataHandleCompressed(type, req_meta, req_data, server);
break;
case RequestType::kDefaultPushPull:
DataHandleDefault(type, req_meta, req_data, server);
break;
}
}
inline bool has_multi_precision_copy(const DataHandleType type) {
return multi_precision_ && type.dtype != mshadow::kFloat32;
}
/**\brief add by cqq, apply update in data default mode*/
inline void ApplyUpdatesDefault(const DataHandleType type, const int key,
UpdateBuf *update_buf, int& storev,
const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
if (!sync_mode_ || update_buf->request.size() == (size_t) ps::NumWorkers()) {
// let the main thread to execute updater_, which is necessary for python
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array;
if (updater_) {
exec_.Exec([this, key, &update, &stored](){
CHECK(updater_);
updater_(key, update, &stored);
});
} else {
CHECK(sync_mode_) << "Updater needs to be set for async mode";
// if no updater, just copy
CopyFromTo(update_buf->merged, &stored);
}
if (log_verbose_) {
LOG(INFO) << "sent response to " << update_buf->request.size() << " workers";
}
update_buf->request.clear();
storev++;
if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]);
stored.WaitToRead();
DefaultAutoPull(type, key, store_v_[key], req_meta, req_data, server);
} else {
update_buf->merged.WaitToRead();
}
}
inline void ApplyUpdates(const DataHandleType type, const int key,
UpdateBuf *update_buf, ps::KVServer<char>* server) {
if (!sync_mode_ || update_buf->request.size() == (size_t) ps::NumWorkers()) {
// let the main thread to execute updater_, which is necessary for python
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array;
if (updater_) {
exec_.Exec([this, key, &update, &stored](){
CHECK(updater_);
updater_(key, update, &stored);
});
} else {
CHECK(sync_mode_) << "Updater needs to be set for async mode";
// if no updater, just copy
CopyFromTo(update_buf->merged, &stored);
}
if (log_verbose_) {
LOG(INFO) << "sent response to " << update_buf->request.size() << " workers";
}
for (const auto& req : update_buf->request) {
server->Response(req);
}
update_buf->request.clear();
if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]);
stored.WaitToRead();
} else {
update_buf->merged.WaitToRead();
}
}
void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices,
const int64_t master_key, const int64_t num_rows) {
indices[0] = 0;
for (int64_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(keys[i]);
auto row_id = key - master_key;
indices[i - 1] = row_id;
}
}
void AccumulateRowSparseGrads(const DataHandleType type,
const NDArray& recved,
UpdateBuf* updateBuf) {
NDArray out(kRowSparseStorage, updateBuf->merged.shape(), Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
if (has_multi_precision_copy(type)) CopyFromTo(recved, updateBuf->temp_array);
const NDArray& to_merge = has_multi_precision_copy(type) ? updateBuf->temp_array : recved;
// accumulate row_sparse gradients
using namespace mshadow;
Engine::Get()->PushAsync(
[to_merge, updateBuf, out](RunContext ctx, Engine::CallbackOnComplete on_complete) {
op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>(
{}, {}, {to_merge, updateBuf->merged}, {kWriteTo}, {out});
on_complete();
}, to_merge.ctx(), {to_merge.var(), updateBuf->merged.var()}, {out.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
CopyFromTo(out, &(updateBuf->merged), 0);
updateBuf->merged.WaitToRead();
}
void RowSparsePullResponse(const DataHandleType type,
const int master_key,
const size_t num_rows,
const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
if (log_verbose_) LOG(INFO) << "pull: " << master_key;
ps::KVPairs<char> response;
if (num_rows == 0) {
std::vector<int> lens(req_data.keys.size(), 0);
response.keys = req_data.keys;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
return;
}
const NDArray& stored = store_[master_key];
if (has_multi_precision_copy(type)) stored.WaitToRead();
CHECK(!stored.is_none()) << "init " << master_key << " first";
auto shape = stored.shape();
auto unit_len = shape.ProdShape(1, shape.ndim());
const int num_bytes = mshadow::mshadow_sizeof(type.dtype);
const int unit_size = unit_len * num_bytes;
const char* data = static_cast<char *> (stored.data().dptr_);
auto len = num_rows * unit_size;
// concat values
response.vals.resize(len);
#pragma omp parallel for
for (size_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(req_data.keys[i]);
int64_t row_id = key - master_key;
const auto src = data + row_id * unit_size;
auto begin = (i - 1) * unit_size;
auto end = i * unit_size;
response.vals.segment(begin, end).CopyFrom(src, unit_size);
}
// setup response
response.keys = req_data.keys;
std::vector<int> lens(req_data.keys.size(), unit_len);
lens[0] = 0;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
}
void InitRowSparseStored(const DataHandleType type,
const int master_key,
const size_t num_rows,
const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
auto& stored = has_multi_precision_copy(type) ? store_realt_[master_key] : store_[master_key];
int dtype = type.dtype;
int num_bytes = mshadow::mshadow_sizeof(dtype);
auto unit_len = req_data.lens[1] / num_bytes;
CHECK_GT(unit_len, 0);
size_t ds[] = {num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
CHECK_EQ(req_data.vals.size(), num_rows * unit_len * num_bytes);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask);
})
NDArray recved = NDArray(recv_blob, 0);
stored = NDArray(kRowSparseStorage, dshape, Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
if (has_multi_precision_copy(type)) {
store_[master_key] = NDArray(kRowSparseStorage, dshape, Context(), true, type.dtype);
}
Engine::Get()->PushAsync(
[this, recved, stored, type](RunContext ctx, Engine::CallbackOnComplete on_complete) {
NDArray rsp = stored;
stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])});
mshadow::Stream<cpu> *s = ctx.get_stream<cpu>();
using namespace mxnet::op;
nnvm::dim_t nnr = rsp.shape()[0];
MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, {
IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>();
mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx);
});
TBlob rsp_data = rsp.data();
// copies or casts as appropriate
ndarray::Copy<cpu, cpu>(recved.data(), &rsp_data, Context(), Context(), RunContext());
on_complete();
}, recved.ctx(), {recved.var()}, {stored.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
if (has_multi_precision_copy(type)) {
CopyFromTo(stored, store_[master_key]);
store_[master_key].WaitToRead();
}
stored.WaitToRead();
server->Response(req_meta);
}
void DataHandleRowSparse(const DataHandleType type, const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
int master_key = DecodeKey(req_data.keys[0]);
auto num_rows = req_data.keys.size() - 1;
auto& stored = store_[master_key];
if (req_meta.push) {
CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty";
CHECK_EQ(req_data.lens[0], 0);
if (stored.is_none()) {
if (log_verbose_) LOG(INFO) << "initial push: " << master_key;
// initialization
CHECK_GT(num_rows, 0) << "init with empty data is not supported";
InitRowSparseStored(type, master_key, num_rows, req_meta, req_data, server);
return;
} else {
if (log_verbose_) LOG(INFO) << "push: " << master_key << " " << req_data.keys;
auto& updates = update_buf_[master_key];
if (sync_mode_ && updates.merged.is_none()) {
updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
}
if (has_multi_precision_copy(type) && updates.temp_array.is_none()) {
updates.temp_array = NDArray(kRowSparseStorage, stored.shape(), Context(), false,
mshadow::kFloat32);
}
if (num_rows == 0) {
if (sync_mode_) {
if (updates.request.empty()) {
// reset to zeros
int merged_dtype = has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype;
updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(),
true, merged_dtype);
} // else nothing to aggregate
updates.request.push_back(req_meta);
ApplyUpdates(type, master_key, &updates, server);
} else {
server->Response(req_meta);
}
} else {
auto unit_len = req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype);
CHECK_GT(unit_len, 0);
// indices
std::vector<int64_t> indices(num_rows);
DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows);
// data
TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask);
size_t ds[] = {(size_t) num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()),
dshape, cpu::kDevMask);
})
// row_sparse NDArray
NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0);
if (updates.request.empty()) {
if (sync_mode_) {
CopyFromTo(recved, updates.merged);
} else {
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
} else {
updates.temp_array = recved;
}
}
} else {
CHECK(sync_mode_);
AccumulateRowSparseGrads(type, recved, &updates);
}
updates.request.push_back(req_meta);
ApplyUpdates(type, master_key, &updates, server);
}
}
} else {
// pull
RowSparsePullResponse(type, master_key, num_rows, req_meta, req_data, server);
}
}
void DefaultStorageResponse(const DataHandleType type,
const int key,
const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
ps::KVPairs<char> response;
const NDArray& stored = store_[key];
CHECK(!stored.is_none()) << "init " << key << " first";
// as server returns when store_realt is ready in this case
if (has_multi_precision_copy(type)) stored.WaitToRead();
auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype());
response.keys = req_data.keys;
response.lens = {len};
// TODO(mli) try to remove this CopyFrom
response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len);
server->Response(req_meta, response);
}
/** add by cqq, respose all worker nodes */
void DefaultAutoPull(const DataHandleType type,
const int key,
const int version,
const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
CHECK(type.requestType == RequestType::kDefaultPushPull);
ps::KVPairs<char> response;
const NDArray& stored = store_[key];
CHECK(!stored.is_none()) << "init " << key << " first";
// as server returns when store_realt is ready in this case
if (has_multi_precision_copy(type)) stored.WaitToRead();
auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype());
response.keys = req_data.keys;
response.lens = {len};
// TODO(mli) try to remove this CopyFrom
response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len);
server->AutoPullUpdate(version, req_meta, response);
//LOG(INFO) << "Auto pull key: " << req_meta.key << " to all worker.";
}
void DataHandleCompressed(const DataHandleType type,
const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
CHECK_EQ(type.dtype, mshadow::kFloat32)
<< "Gradient compression is currently supported for fp32 only";
if (req_meta.push) {
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
// first for dummy key which represents original size of array, whose len is 0
CHECK_EQ(req_data.keys.size(), (size_t)2);
CHECK_EQ(req_data.lens.size(), (size_t)2);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]);
int original_size = DecodeKey(req_data.keys[0]);
int key = DecodeKey(req_data.keys[1]);
auto& stored = store_[key];
size_t ds[] = {(size_t)req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype)};
TShape dshape(ds, ds + 1);
TBlob recv_blob(reinterpret_cast<real_t*>(req_data.vals.data()), dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0);
NDArray decomp_buf = decomp_buf_[key];
dshape = TShape{(int64_t) original_size};
if (decomp_buf.is_none()) {
decomp_buf = NDArray(dshape, Context());
}
if (stored.is_none()) {
stored = NDArray(dshape, Context());
gradient_compression_->Dequantize(recved, &stored, 0);
server->Response(req_meta);
stored.WaitToRead();
} else if (sync_mode_) {
// synced push
auto& merged = update_buf_[key];
if (merged.merged.is_none()) {
merged.merged = NDArray(dshape, Context());
}
if (merged.request.size() == 0) {
gradient_compression_->Dequantize(recved, &merged.merged, 0);
} else {
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
merged.merged += decomp_buf;
}
merged.request.push_back(req_meta);
ApplyUpdates(type, key, &merged, server);
} else {
// async push
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
exec_.Exec([this, key, &decomp_buf, &stored]() {
CHECK(updater_);
updater_(key, decomp_buf, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else { // pull
CHECK_EQ(req_data.keys.size(), (size_t)1);
CHECK_EQ(req_data.lens.size(), (size_t)0);
int key = DecodeKey(req_data.keys[0]);
DefaultStorageResponse(type, key, req_meta, req_data, server);
}
}
void DataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
// do some check
CHECK_EQ(req_data.keys.size(), (size_t)1);
if (req_meta.push) {
CHECK_EQ(req_data.lens.size(), (size_t)1);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]);
}
int key = DecodeKey(req_data.keys[0]);
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
if (req_meta.push) {
server->Response(req_meta);
size_t ds[] = {(size_t) req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)};
TShape dshape(ds, ds + 1); //tensor.shape, tensor.shape+tensor.dim
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask);
})
NDArray recved = NDArray(recv_blob, 0);
if (stored.is_none()) {
// initialization
stored = NDArray(dshape, Context(), false,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
CopyFromTo(recved, &stored, 0);
if (has_multi_precision_copy(type)) {
auto& stored_dtype = store_[key];
stored_dtype = NDArray(dshape, Context(), false, type.dtype);
CopyFromTo(stored, stored_dtype); //store in store_
stored_dtype.WaitToRead();
}
stored.WaitToRead();
store_v_[key] = 0;
DefaultAutoPull(type, key, store_v_[key], req_meta, req_data, server);
} else {
auto &updates = update_buf_[key];
if (sync_mode_ && updates.merged.is_none()) {
updates.merged = NDArray(dshape, Context(), false,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
}
if (has_multi_precision_copy(type) && updates.temp_array.is_none()) {
updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32);
}
if (updates.request.empty()) {
if (sync_mode_) {
CopyFromTo(recved, updates.merged);
} else {
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
} else {
updates.temp_array = recved;
}
}
} else {
CHECK(sync_mode_);
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
updates.merged += updates.temp_array;
} else {
updates.merged += recved;
}
}
updates.request.push_back(req_meta);
ApplyUpdatesDefault(type, key, &updates, store_v_[key], req_meta, req_data, server);
}
} else {
DefaultStorageResponse(type, key, req_meta, req_data, server); //pull
}
}
int DecodeKey(ps::Key key) {
auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()];
return key - kr.begin();
}
/**
* \brief user defined mode for push
*/
bool sync_mode_;
KVStore::Controller controller_;
KVStore::Updater updater_;
/**
* \brief store_ contains the value at kvstore for each key
*/
std::unordered_map<int, NDArray> store_;
std::unordered_map<int, NDArray> store_realt_;
/** \brief add by cqq, the version for store_ */
std::unordered_map<int, int> store_v_;
/**
* \brief merge_buf_ is a buffer used if sync_mode is true. It represents
* values from different workers being merged. The store will be updated
* to this value when values from all workers are pushed into this buffer.
*/
std::unordered_map<int, UpdateBuf> update_buf_;
/**
* \brief decomp_buf_ is a buffer into which compressed values are
* decompressed before merging to the store. used when compress_!='none'
*/
std::unordered_map<int, NDArray> decomp_buf_;
Executor exec_;
ps::KVServer<char>* ps_server_;
// whether to LOG verbose information
bool log_verbose_;
/*
* \brief whether to use multi precision mode.
* in multi precision mode, all weights are stored as float32.
* any gradient received will be cast to float32 before accumulation and updating of weights.
*/
bool multi_precision_;
/**
* \brief gradient compression object.
* starts with none, used after SetGradientCompression sets the type
* currently there is no support for unsetting gradient compression
*/
std::shared_ptr<kvstore::GradientCompression> gradient_compression_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
|
for-2.c | /* { dg-do compile } */
void foo()
{
int i;
#pragma omp for nowait
for (i = 0; i < 10; ++i) ;
#pragma omp for nowait nowait /* { dg-error "too many" } */
for (i = 0; i < 10; ++i) ;
#pragma omp for ordered
for (i = 0; i < 10; ++i) ;
#pragma omp for ordered ordered /* { dg-error "too many" } */
for (i = 0; i < 10; ++i) ;
}
|
HybridAdoptorBase.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
//////////////////////////////////////////////////////////////////////////////////////
/** @file HybridAdoptorBase.h
*
* Hybrid adoptor base class
*/
#ifndef QMCPLUSPLUS_HYBRID_ADOPTOR_BASE_H
#define QMCPLUSPLUS_HYBRID_ADOPTOR_BASE_H
#include <Particle/DistanceTableData.h>
#include <QMCWaveFunctions/lcao/SoaSphericalTensor.h>
#include <spline2/MultiBspline1D.hpp>
#include <Numerics/SmoothFunctions.hpp>
namespace qmcplusplus
{
template<typename ST>
struct AtomicOrbitalSoA
{
static const int D = 3;
using AtomicSplineType = typename bspline_traits<ST, 1>::SplineType;
using AtomicBCType = typename bspline_traits<ST, 1>::BCType;
using AtomicSingleSplineType = UBspline_1d_d;
using PointType = TinyVector<ST, D>;
using value_type = ST;
using vContainer_type = aligned_vector<ST>;
// near core cutoff
ST rmin;
// far from core cutoff, rmin_sqrt>=rmin
ST rmin_sqrt;
ST cutoff, cutoff_buffer, spline_radius, non_overlapping_radius;
int spline_npoints, BaseN;
int NumBands, Npad;
PointType pos;
const int lmax, lm_tot;
SoaSphericalTensor<ST> Ylm;
vContainer_type l_vals;
vContainer_type r_power_minus_l;
///1D spline of radial functions of all the orbitals
std::shared_ptr<MultiBspline1D<ST>> SplineInst;
vContainer_type localV, localG, localL;
AtomicOrbitalSoA(int Lmax)
: lmax(Lmax), lm_tot((Lmax + 1) * (Lmax + 1)), Ylm(Lmax)
{
r_power_minus_l.resize(lm_tot);
l_vals.resize(lm_tot);
for (int l = 0; l <= lmax; l++)
for (int m = -l; m <= l; m++)
l_vals[l * (l + 1) + m] = l;
rmin = std::exp(std::log(std::numeric_limits<ST>::min()) / std::max(Lmax, 1));
rmin = std::max(rmin, std::numeric_limits<ST>::epsilon());
rmin_sqrt = std::max(rmin, std::sqrt(std::numeric_limits<ST>::epsilon()));
}
inline void resizeStorage(size_t Nb)
{
NumBands = Nb;
Npad = getAlignedSize<ST>(Nb);
localV.resize(Npad * lm_tot);
localG.resize(Npad * lm_tot);
localL.resize(Npad * lm_tot);
create_spline();
}
void bcast_tables(Communicate* comm) { chunked_bcast(comm, SplineInst->getSplinePtr()); }
void gather_tables(Communicate* comm, std::vector<int>& offset) { gatherv(comm, SplineInst->getSplinePtr(), Npad, offset); }
template<typename PT, typename VT>
inline void set_info(const PT& R,
const VT& cutoff_in,
const VT& cutoff_buffer_in,
const VT& spline_radius_in,
const VT& non_overlapping_radius_in,
const int spline_npoints_in)
{
pos[0] = R[0];
pos[1] = R[1];
pos[2] = R[2];
cutoff = cutoff_in;
cutoff_buffer = cutoff_buffer_in;
spline_radius = spline_radius_in;
spline_npoints = spline_npoints_in;
non_overlapping_radius = non_overlapping_radius_in;
BaseN = spline_npoints + 2;
}
inline void create_spline()
{
AtomicBCType bc;
bc.lCode = FLAT;
bc.rCode = NATURAL;
Ugrid grid;
grid.start = 0.0;
grid.end = spline_radius;
grid.num = spline_npoints;
SplineInst = std::make_shared<MultiBspline1D<ST>>();
SplineInst->create(grid, bc, lm_tot * Npad);
}
inline void flush_zero() { SplineInst->flush_zero(); }
inline void set_spline(AtomicSingleSplineType* spline, int lm, int ispline)
{
SplineInst->copy_spline(spline, lm * Npad + ispline, 0, BaseN);
}
bool read_splines(hdf_archive& h5f)
{
einspline_engine<AtomicSplineType> bigtable(SplineInst->getSplinePtr());
int lmax_in, spline_npoints_in;
ST spline_radius_in;
bool success = true;
success = success && h5f.readEntry(lmax_in, "l_max");
success = success && h5f.readEntry(spline_radius_in, "spline_radius");
success = success && h5f.readEntry(spline_npoints_in, "spline_npoints");
if (lmax_in != lmax)
return false;
if (spline_radius_in != spline_radius)
return false;
if (spline_npoints_in != spline_npoints)
return false;
return success && h5f.readEntry(bigtable, "radial_spline");
}
bool write_splines(hdf_archive& h5f)
{
bool success = true;
success = success && h5f.writeEntry(spline_radius, "spline_radius");
success = success && h5f.writeEntry(spline_npoints, "spline_npoints");
success = success && h5f.writeEntry(lmax, "l_max");
success = success && h5f.writeEntry(pos, "position");
einspline_engine<AtomicSplineType> bigtable(SplineInst->getSplinePtr());
success = success && h5f.writeEntry(bigtable, "radial_spline");
return success;
}
//evaluate only V
template<typename VV>
inline void evaluate_v(const ST& r, const PointType& dr, VV& myV)
{
if (r > std::numeric_limits<ST>::epsilon())
Ylm.evaluateV(dr[0] / r, dr[1] / r, dr[2] / r);
else
Ylm.evaluateV(0, 0, 1);
const ST* restrict Ylm_v = Ylm[0];
constexpr ST czero(0);
ST* restrict val = myV.data();
ST* restrict local_val = localV.data();
std::fill(myV.begin(), myV.end(), czero);
SplineInst->evaluate(r, localV);
for (size_t lm = 0; lm < lm_tot; lm++)
{
#pragma omp simd aligned(val, local_val)
for (size_t ib = 0; ib < myV.size(); ib++)
val[ib] += Ylm_v[lm] * local_val[ib];
local_val += Npad;
}
}
template<typename DISPL, typename VM>
inline void evaluateValues(const DISPL& Displacements, const int center_idx, const ST& r, VM& multi_myV)
{
if (r <= std::numeric_limits<ST>::epsilon())
Ylm.evaluateV(0, 0, 1);
const ST* restrict Ylm_v = Ylm[0];
const size_t m = multi_myV.cols();
constexpr ST czero(0);
std::fill(multi_myV.begin(), multi_myV.end(), czero);
SplineInst->evaluate(r, localV);
for (int ivp = 0; ivp < Displacements.size(); ivp++)
{
PointType dr = Displacements[ivp][center_idx];
if (r > std::numeric_limits<ST>::epsilon())
Ylm.evaluateV(-dr[0] / r, -dr[1] / r, -dr[2] / r);
ST* restrict val = multi_myV[ivp];
ST* restrict local_val = localV.data();
for (size_t lm = 0; lm < lm_tot; lm++)
{
#pragma omp simd aligned(val, local_val)
for (size_t ib = 0; ib < m; ib++)
val[ib] += Ylm_v[lm] * local_val[ib];
local_val += Npad;
}
}
}
//evaluate VGL
template<typename VV, typename GV>
inline void evaluate_vgl(const ST& r, const PointType& dr, VV& myV, GV& myG, VV& myL)
{
ST drx, dry, drz, rhatx, rhaty, rhatz, rinv;
if (r > rmin)
{
rinv = 1.0 / r;
}
else
{
rinv = 0;
}
drx = dr[0];
dry = dr[1];
drz = dr[2];
rhatx = drx * rinv;
rhaty = dry * rinv;
rhatz = drz * rinv;
Ylm.evaluateVGL(drx, dry, drz);
const ST* restrict Ylm_v = Ylm[0];
const ST* restrict Ylm_gx = Ylm[1];
const ST* restrict Ylm_gy = Ylm[2];
const ST* restrict Ylm_gz = Ylm[3];
ST* restrict g0 = myG.data(0);
ST* restrict g1 = myG.data(1);
ST* restrict g2 = myG.data(2);
constexpr ST czero(0), cone(1), chalf(0.5);
std::fill(myV.begin(), myV.end(), czero);
std::fill(g0, g0 + Npad, czero);
std::fill(g1, g1 + Npad, czero);
std::fill(g2, g2 + Npad, czero);
std::fill(myL.begin(), myL.end(), czero);
ST* restrict val = myV.data();
ST* restrict lapl = myL.data();
ST* restrict local_val = localV.data();
ST* restrict local_grad = localG.data();
ST* restrict local_lapl = localL.data();
SplineInst->evaluate_vgl(r, localV, localG, localL);
if (r > rmin_sqrt)
{
// far from core
r_power_minus_l[0] = cone;
ST r_power_temp = cone;
for (int l = 1; l <= lmax; l++)
{
r_power_temp *= rinv;
for (int m = -l, lm = l * l; m <= l; m++, lm++)
r_power_minus_l[lm] = r_power_temp;
}
for (size_t lm = 0; lm < lm_tot; lm++)
{
const ST& l_val = l_vals[lm];
const ST& r_power = r_power_minus_l[lm];
const ST Ylm_rescale = Ylm_v[lm] * r_power;
const ST rhat_dot_G = (rhatx * Ylm_gx[lm] + rhaty * Ylm_gy[lm] + rhatz * Ylm_gz[lm]) * r_power;
#pragma omp simd aligned(val, g0, g1, g2, lapl, local_val, local_grad, local_lapl)
for (size_t ib = 0; ib < myV.size(); ib++)
{
const ST local_v = local_val[ib];
const ST local_g = local_grad[ib];
const ST local_l = local_lapl[ib];
// value
const ST Vpart = l_val * rinv * local_v;
val[ib] += Ylm_rescale * local_v;
// grad
const ST factor1 = local_g * Ylm_rescale;
const ST factor2 = local_v * r_power;
const ST factor3 = -Vpart * Ylm_rescale;
g0[ib] += factor1 * rhatx + factor2 * Ylm_gx[lm] + factor3 * rhatx;
g1[ib] += factor1 * rhaty + factor2 * Ylm_gy[lm] + factor3 * rhaty;
g2[ib] += factor1 * rhatz + factor2 * Ylm_gz[lm] + factor3 * rhatz;
// laplacian
lapl[ib] += (local_l + (local_g * (2 - l_val) - Vpart) * rinv) * Ylm_rescale + (local_g - Vpart) * rhat_dot_G;
}
local_val += Npad;
local_grad += Npad;
local_lapl += Npad;
}
}
else if (r > rmin)
{
// the possibility of reaching here is very very low
std::cout << "Warning: an electron is very close to an ion, distance=" << r << " be careful!" << std::endl;
// near core, kill divergence in the laplacian
r_power_minus_l[0] = cone;
ST r_power_temp = cone;
for (int l = 1; l <= lmax; l++)
{
r_power_temp *= rinv;
for (int m = -l, lm = l * l; m <= l; m++, lm++)
r_power_minus_l[lm] = r_power_temp;
}
for (size_t lm = 0; lm < lm_tot; lm++)
{
const ST& l_val = l_vals[lm];
const ST& r_power = r_power_minus_l[lm];
const ST Ylm_rescale = Ylm_v[lm] * r_power;
const ST rhat_dot_G = (Ylm_gx[lm] * rhatx + Ylm_gy[lm] * rhaty + Ylm_gz[lm] * rhatz) * r_power * r;
#pragma omp simd aligned(val, g0, g1, g2, lapl, local_val, local_grad, local_lapl)
for (size_t ib = 0; ib < myV.size(); ib++)
{
const ST local_v = local_val[ib];
const ST local_g = local_grad[ib];
const ST local_l = local_lapl[ib];
// value
const ST Vpart = Ylm_rescale * local_v;
val[ib] += Vpart;
// grad
const ST factor1 = local_g * Ylm_rescale;
const ST factor2 = local_v * r_power;
const ST factor3 = -l_val * Vpart * rinv;
g0[ib] += factor1 * rhatx + factor2 * Ylm_gx[lm] + factor3 * rhatx;
g1[ib] += factor1 * rhaty + factor2 * Ylm_gy[lm] + factor3 * rhaty;
g2[ib] += factor1 * rhatz + factor2 * Ylm_gz[lm] + factor3 * rhatz;
// laplacian
lapl[ib] += local_l * (cone - chalf * l_val) * (3 * Ylm_rescale + rhat_dot_G);
}
local_val += Npad;
local_grad += Npad;
local_lapl += Npad;
}
}
else
{
std::cout << "Warning: an electron is on top of an ion!" << std::endl;
// strictly zero
#pragma omp simd aligned(val, lapl, local_val, local_lapl)
for (size_t ib = 0; ib < myV.size(); ib++)
{
// value
val[ib] = Ylm_v[0] * local_val[ib];
// laplacian
lapl[ib] = local_lapl[ib] * static_cast<ST>(3) * Ylm_v[0];
}
local_val += Npad;
local_grad += Npad;
local_lapl += Npad;
if (lm_tot > 0)
{
//std::cout << std::endl;
for (size_t lm = 1; lm < 4; lm++)
{
#pragma omp simd aligned(g0, g1, g2, local_grad)
for (size_t ib = 0; ib < myV.size(); ib++)
{
const ST local_g = local_grad[ib];
// grad
g0[ib] += local_g * Ylm_gx[lm];
g1[ib] += local_g * Ylm_gy[lm];
g2[ib] += local_g * Ylm_gz[lm];
}
local_grad += Npad;
}
}
}
}
template<typename VV, typename GV, typename HT>
void evaluate_vgh(const ST& r, const PointType& dr, VV& myV, GV& myG, HT& myH)
{
//Needed to do tensor product here
APP_ABORT("AtomicOrbitalSoA::evaluate_vgh");
}
};
/** adoptor class to match
*
*/
template<typename ST>
struct HybridAdoptorBase
{
static const int D = 3;
using PointType = typename AtomicOrbitalSoA<ST>::PointType;
using RealType = typename DistanceTableData::RealType;
// atomic centers
std::vector<AtomicOrbitalSoA<ST>> AtomicCenters;
///table index
int myTableID;
//mapping supercell to primitive cell
std::vector<int> Super2Prim;
// r, dr for distance table
RealType dist_r;
DistanceTableData::PosType dist_dr;
// for APBC
PointType r_image;
// smooth function derivatives
RealType f, df_dr, d2f_dr2;
/// smoothing schemes
enum class smoothing_schemes
{
CONSISTENT = 0,
SMOOTHALL,
SMOOTHPARTIAL
} smooth_scheme;
/// smoothing function
smoothing_functions smooth_func_id;
HybridAdoptorBase() {}
void set_info(const ParticleSet& ions, ParticleSet& els, const std::vector<int>& mapping)
{
myTableID = els.addTable(ions, DT_SOA);
Super2Prim = mapping;
}
inline void resizeStorage(size_t Nb)
{
size_t SplineCoefsBytes = 0;
for (int ic = 0; ic < AtomicCenters.size(); ic++)
{
AtomicCenters[ic].resizeStorage(Nb);
SplineCoefsBytes += AtomicCenters[ic].SplineInst->sizeInByte();
}
app_log() << "MEMORY " << SplineCoefsBytes / (1 << 20) << " MB allocated "
<< "for the atomic radial splines in hybrid orbital representation" << std::endl;
}
void bcast_tables(Communicate* comm)
{
for (int ic = 0; ic < AtomicCenters.size(); ic++)
AtomicCenters[ic].bcast_tables(comm);
}
void gather_atomic_tables(Communicate* comm, std::vector<int>& offset)
{
if (comm->size() == 1)
return;
for (int ic = 0; ic < AtomicCenters.size(); ic++)
AtomicCenters[ic].gather_tables(comm, offset);
}
inline void flush_zero()
{
for (int ic = 0; ic < AtomicCenters.size(); ic++)
AtomicCenters[ic].flush_zero();
}
bool read_splines(hdf_archive& h5f)
{
bool success = true;
size_t ncenter;
success = success && h5f.push("atomic_centers", false);
success = success && h5f.readEntry(ncenter, "number_of_centers");
if (!success)
return success;
if (ncenter != AtomicCenters.size())
success = false;
// read splines of each center
for (int ic = 0; ic < AtomicCenters.size(); ic++)
{
std::ostringstream gname;
gname << "center_" << ic;
success = success && h5f.push(gname.str().c_str(), false);
success = success && AtomicCenters[ic].read_splines(h5f);
h5f.pop();
}
h5f.pop();
return success;
}
bool write_splines(hdf_archive& h5f)
{
bool success = true;
int ncenter = AtomicCenters.size();
success = success && h5f.push("atomic_centers", true);
success = success && h5f.writeEntry(ncenter, "number_of_centers");
// write splines of each center
for (int ic = 0; ic < AtomicCenters.size(); ic++)
{
std::ostringstream gname;
gname << "center_" << ic;
success = success && h5f.push(gname.str().c_str(), true);
success = success && AtomicCenters[ic].write_splines(h5f);
h5f.pop();
}
h5f.pop();
return success;
}
template<typename Cell>
inline int get_bc_sign(const PointType& r, const Cell& PrimLattice, TinyVector<int, D>& HalfG)
{
int bc_sign = 0;
PointType shift_unit = PrimLattice.toUnit(r - r_image);
for (int i = 0; i < D; i++)
{
ST img = round(shift_unit[i]);
bc_sign += HalfG[i] * (int)img;
}
return bc_sign;
}
//evaluate only V
template<typename VV>
inline RealType evaluate_v(const ParticleSet& P, const int iat, VV& myV)
{
const auto& ei_dist = P.getDistTable(myTableID);
const int center_idx = ei_dist.get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl == iat);
if (center_idx < 0)
abort();
auto& myCenter = AtomicCenters[Super2Prim[center_idx]];
if (dist_r < myCenter.cutoff)
{
PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]);
r_image = myCenter.pos + dr;
myCenter.evaluate_v(dist_r, dr, myV);
return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r);
}
return RealType(-1);
}
/* check if the batched algorithm is safe to operate
* @param VP virtual particle set
* @return true if it is safe
*
* When the reference electron in the NLPP evaluation has a distance larger than the non overlapping radius of the reference center.
* Some qudrature points may get its SPOs evaluated from the nearest center which is not the reference center.
* The batched algorthm forces the evaluation on the reference center and introduce some error.
* In this case, the non-batched algorithm should be used.
*/
bool is_batched_safe(const VirtualParticleSet& VP)
{
const int center_idx = VP.refSourcePtcl;
auto& myCenter = AtomicCenters[Super2Prim[center_idx]];
return VP.refPS.getDistTable(myTableID).Distances[VP.refPtcl][center_idx] < myCenter.non_overlapping_radius;
}
// C2C, C2R cases
template<typename VM>
inline RealType evaluateValuesC2X(const VirtualParticleSet& VP, VM& multi_myV)
{
const int center_idx = VP.refSourcePtcl;
dist_r = VP.refPS.getDistTable(myTableID).Distances[VP.refPtcl][center_idx];
auto& myCenter = AtomicCenters[Super2Prim[center_idx]];
if (dist_r < myCenter.cutoff)
{
myCenter.evaluateValues(VP.getDistTable(myTableID).Displacements, center_idx, dist_r, multi_myV);
return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r);
}
return RealType(-1);
}
// R2R case
template<typename VM, typename Cell, typename SV>
inline RealType evaluateValuesR2R(const VirtualParticleSet& VP,
const Cell& PrimLattice,
TinyVector<int, D>& HalfG,
VM& multi_myV,
SV& bc_signs)
{
const int center_idx = VP.refSourcePtcl;
dist_r = VP.refPS.getDistTable(myTableID).Distances[VP.refPtcl][center_idx];
auto& myCenter = AtomicCenters[Super2Prim[center_idx]];
if (dist_r < myCenter.cutoff)
{
const auto& displ = VP.getDistTable(myTableID).Displacements;
for (int ivp = 0; ivp < VP.getTotalNum(); ivp++)
{
r_image = myCenter.pos - displ[ivp][center_idx];
bc_signs[ivp] = get_bc_sign(VP.R[ivp], PrimLattice, HalfG);
;
}
myCenter.evaluateValues(displ, center_idx, dist_r, multi_myV);
return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r);
}
return RealType(-1);
}
//evaluate only VGL
template<typename VV, typename GV>
inline RealType evaluate_vgl(const ParticleSet& P, const int iat, VV& myV, GV& myG, VV& myL)
{
const auto& ei_dist = P.getDistTable(myTableID);
const int center_idx = ei_dist.get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl == iat);
if (center_idx < 0)
abort();
auto& myCenter = AtomicCenters[Super2Prim[center_idx]];
if (dist_r < myCenter.cutoff)
{
PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]);
r_image = myCenter.pos + dr;
myCenter.evaluate_vgl(dist_r, dr, myV, myG, myL);
return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r);
}
return RealType(-1);
}
//evaluate only VGH
template<typename VV, typename GV, typename HT>
inline RealType evaluate_vgh(const ParticleSet& P, const int iat, VV& myV, GV& myG, HT& myH)
{
const auto& ei_dist = P.getDistTable(myTableID);
const int center_idx = ei_dist.get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl == iat);
if (center_idx < 0)
abort();
auto& myCenter = AtomicCenters[Super2Prim[center_idx]];
if (dist_r < myCenter.cutoff)
{
PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]);
r_image = myCenter.pos + dr;
myCenter.evaluate_vgh(dist_r, dr, myV, myG, myH);
return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r);
}
return RealType(-1);
}
// interpolate buffer region, value only
template<typename VV>
inline void interpolate_buffer_v(VV& psi, const VV& psi_AO) const
{
const RealType cone(1);
for (size_t i = 0; i < psi.size(); i++)
psi[i] = psi_AO[i] * f + psi[i] * (cone - f);
}
// interpolate buffer region, value, gradients and laplacian
template<typename VV, typename GV>
inline void interpolate_buffer_vgl(VV& psi, GV& dpsi, VV& d2psi, const VV& psi_AO, const GV& dpsi_AO, const VV& d2psi_AO) const
{
const RealType cone(1), ctwo(2);
const RealType rinv(1.0 / dist_r);
if(smooth_scheme == smoothing_schemes::CONSISTENT)
for (size_t i = 0; i < psi.size(); i++)
{ // psi, dpsi, d2psi are all consistent
d2psi[i] = d2psi_AO[i] * f + d2psi[i] * (cone - f) +
df_dr * rinv * ctwo * dot(dpsi[i] - dpsi_AO[i], dist_dr) +
(psi_AO[i] - psi[i]) * (d2f_dr2 + ctwo * rinv * df_dr);
dpsi[i] = dpsi_AO[i] * f + dpsi[i] * (cone - f) +
df_dr * rinv * dist_dr * (psi[i] - psi_AO[i]);
psi[i] = psi_AO[i] * f + psi[i] * (cone - f);
}
else if(smooth_scheme == smoothing_schemes::SMOOTHALL)
for (size_t i = 0; i < psi.size(); i++)
{
d2psi[i] = d2psi_AO[i] * f + d2psi[i] * (cone - f);
dpsi[i] = dpsi_AO[i] * f + dpsi[i] * (cone - f);
psi[i] = psi_AO[i] * f + psi[i] * (cone - f);
}
else if(smooth_scheme == smoothing_schemes::SMOOTHPARTIAL)
for (size_t i = 0; i < psi.size(); i++)
{ // dpsi, d2psi are consistent but psi is not.
d2psi[i] = d2psi_AO[i] * f + d2psi[i] * (cone - f) +
df_dr * rinv * ctwo * dot(dpsi[i] - dpsi_AO[i], dist_dr);
dpsi[i] = dpsi_AO[i] * f + dpsi[i] * (cone - f);
psi[i] = psi_AO[i] * f + psi[i] * (cone - f);
}
else
throw std::runtime_error("Unknown smooth scheme!");
}
inline RealType smooth_function(const ST& cutoff_buffer, const ST& cutoff, const RealType r)
{
const RealType cone(1);
if (r < cutoff_buffer)
return cone;
const RealType scale = cone / (cutoff - cutoff_buffer);
const RealType x = (r - cutoff_buffer) * scale;
f = smoothing(smooth_func_id, x, df_dr, d2f_dr2);
df_dr *= scale;
d2f_dr2 *= scale*scale;
return f;
}
};
} // namespace qmcplusplus
#endif
|
relic_cp_rsa.c | /*
* RELIC is an Efficient LIbrary for Cryptography
* Copyright (c) 2009 RELIC Authors
*
* This file is part of RELIC. RELIC is legal property of its developers,
* whose names are not listed here. Please refer to the COPYRIGHT file
* for contact information.
*
* RELIC is free software; you can redistribute it and/or modify it under the
* terms of the version 2.1 (or later) of the GNU Lesser General Public License
* as published by the Free Software Foundation; or version 2.0 of the Apache
* License as published by the Apache Software Foundation. See the LICENSE files
* for more details.
*
* RELIC is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE. See the LICENSE files for more details.
*
* You should have received a copy of the GNU Lesser General Public or the
* Apache License along with RELIC. If not, see <https://www.gnu.org/licenses/>
* or <https://www.apache.org/licenses/>.
*/
/**
* @file
*
* Implementation of the RSA cryptosystem.
*
* @ingroup cp
*/
#include <string.h>
#include "relic_core.h"
#include "relic_conf.h"
#include "relic_rand.h"
#include "relic_bn.h"
#include "relic_util.h"
#include "relic_cp.h"
#include "relic_md.h"
#include "relic_multi.h"
/*============================================================================*/
/* Private definitions */
/*============================================================================*/
/**
* Length of chosen padding scheme.
*/
#if CP_RSAPD == PKCS1
#define RSA_PAD_LEN (11)
#elif CP_RSAPD == PKCS2
#define RSA_PAD_LEN (2 * RLC_MD_LEN + 2)
#else
#define RSA_PAD_LEN (2)
#endif
/**
* Identifier for encrypted messages.
*/
#define RSA_PUB (02)
/**
* Identifier for signed messages.
*/
#define RSA_PRV (01)
/**
* Byte used as padding unit.
*/
#define RSA_PAD (0xFF)
/**
* Byte used as padding unit in PSS signatures.
*/
#define RSA_PSS (0xBC)
/**
* Identifier for encryption.
*/
#define RSA_ENC 1
/**
* Identifier for decryption.
*/
#define RSA_DEC 2
/**
* Identifier for signature.
*/
#define RSA_SIG 3
/**
* Identifier for verification.
*/
#define RSA_VER 4
/**
* Identifier for second encryption step.
*/
#define RSA_ENC_FIN 5
/**
* Identifier for second sining step.
*/
#define RSA_SIG_FIN 6
/**
* Identifier for signature of a precomputed hash.
*/
#define RSA_SIG_HASH 7
/**
* Identifier for verification of a precomputed hash.
*/
#define RSA_VER_HASH 8
#if CP_RSAPD == BASIC
/**
* Applies or removes simple encryption padding.
*
* @param[out] m - the buffer to pad.
* @param[out] p_len - the number of added pad bytes.
* @param[in] m_len - the message length in bytes.
* @param[in] k_len - the key length in bytes.
* @param[in] operation - flag to indicate the operation type.
* @return RLC_ERR if errors occurred, RLC_OK otherwise.
*/
static int pad_basic(bn_t m, int *p_len, int m_len, int k_len, int operation) {
uint8_t pad = 0;
int result = RLC_ERR;
bn_t t;
RLC_TRY {
bn_null(t);
bn_new(t);
switch (operation) {
case RSA_ENC:
case RSA_SIG:
case RSA_SIG_HASH:
/* EB = 00 | FF | D. */
bn_zero(m);
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PAD);
/* Make room for the real message. */
bn_lsh(m, m, m_len * 8);
result = RLC_OK;
break;
case RSA_DEC:
case RSA_VER:
case RSA_VER_HASH:
/* EB = 00 | FF | D. */
m_len = k_len - 1;
bn_rsh(t, m, 8 * m_len);
if (bn_is_zero(t)) {
*p_len = 1;
do {
(*p_len)++;
m_len--;
bn_rsh(t, m, 8 * m_len);
pad = (uint8_t)t->dp[0];
} while (pad == 0 && m_len > 0);
if (pad == RSA_PAD) {
result = RLC_OK;
}
bn_mod_2b(m, m, (k_len - *p_len) * 8);
}
break;
}
}
RLC_CATCH_ANY {
result = RLC_ERR;
}
RLC_FINALLY {
bn_free(t);
}
return result;
}
#endif
#if CP_RSAPD == PKCS1
/**
* ASN.1 identifier of the hash function SHA-224.
*/
static const uint8_t sh224_id[] =
{ 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65,
0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c };
/**
* ASN.1 identifier of the hash function SHA-256.
*/
static const uint8_t sh256_id[] =
{ 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65,
0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20 };
/**
* ASN.1 identifier of the hash function SHA-384.
*/
static const uint8_t sh384_id[] =
{ 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65,
0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30 };
/**
* ASN.1 identifier of the hash function SHA-512.
*/
static const uint8_t sh512_id[] =
{ 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65,
0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40 };
/**
* Returns a pointer to the ASN.1 identifier of a hash function according to the
* PKCS#1 v1.5 padding standard.
*
* @param[in] md - the hash function.
* @param[in, out] len - the length of the identifier.
* @return The pointer to the hash function identifier.
*/
static uint8_t *hash_id(int md, int *len) {
switch (md) {
case SH224:
*len = sizeof(sh224_id);
return (uint8_t *)sh224_id;
case SH256:
*len = sizeof(sh256_id);
return (uint8_t *)sh256_id;
case SH384:
*len = sizeof(sh384_id);
return (uint8_t *)sh384_id;
case SH512:
*len = sizeof(sh512_id);
return (uint8_t *)sh512_id;
default:
RLC_THROW(ERR_NO_VALID);
return NULL;
}
}
/**
* Applies or removes a PKCS#1 v1.5 encryption padding.
*
* @param[out] m - the buffer to pad.
* @param[out] p_len - the number of added pad bytes.
* @param[in] m_len - the message length in bytes.
* @param[in] k_len - the key length in bytes.
* @param[in] operation - flag to indicate the operation type.
* @return RLC_ERR if errors occurred, RLC_OK otherwise.
*/
static int pad_pkcs1(bn_t m, int *p_len, int m_len, int k_len, int operation) {
uint8_t *id, pad = 0;
int len, result = RLC_ERR;
bn_t t;
bn_null(t);
RLC_TRY {
bn_new(t);
switch (operation) {
case RSA_ENC:
/* EB = 00 | 02 | PS | 00 | D. */
bn_zero(m);
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PUB);
*p_len = k_len - 3 - m_len;
for (int i = 0; i < *p_len; i++) {
bn_lsh(m, m, 8);
do {
rand_bytes(&pad, 1);
} while (pad == 0);
bn_add_dig(m, m, pad);
}
bn_lsh(m, m, 8);
bn_add_dig(m, m, 0);
/* Make room for the real message. */
bn_lsh(m, m, m_len * 8);
result = RLC_OK;
break;
case RSA_DEC:
m_len = k_len - 1;
bn_rsh(t, m, 8 * m_len);
if (bn_is_zero(t)) {
*p_len = m_len;
m_len--;
bn_rsh(t, m, 8 * m_len);
pad = (uint8_t)t->dp[0];
if (pad == RSA_PUB) {
do {
m_len--;
bn_rsh(t, m, 8 * m_len);
pad = (uint8_t)t->dp[0];
} while (pad != 0 && m_len > 0);
/* Remove padding and trailing zero. */
*p_len -= (m_len - 1);
bn_mod_2b(m, m, (k_len - *p_len) * 8);
result = (m_len > 0 ? RLC_OK : RLC_ERR);
}
}
break;
case RSA_SIG:
/* EB = 00 | 01 | PS | 00 | D. */
id = hash_id(MD_MAP, &len);
bn_zero(m);
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PRV);
*p_len = k_len - 3 - m_len - len;
for (int i = 0; i < *p_len; i++) {
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PAD);
}
bn_lsh(m, m, 8);
bn_add_dig(m, m, 0);
bn_lsh(m, m, 8 * len);
bn_read_bin(t, id, len);
bn_add(m, m, t);
/* Make room for the real message. */
bn_lsh(m, m, m_len * 8);
result = RLC_OK;
break;
case RSA_SIG_HASH:
/* EB = 00 | 01 | PS | 00 | D. */
bn_zero(m);
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PRV);
*p_len = k_len - 3 - m_len;
for (int i = 0; i < *p_len; i++) {
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PAD);
}
bn_lsh(m, m, 8);
bn_add_dig(m, m, 0);
/* Make room for the real message. */
bn_lsh(m, m, m_len * 8);
result = RLC_OK;
break;
case RSA_VER:
m_len = k_len - 1;
bn_rsh(t, m, 8 * m_len);
if (bn_is_zero(t)) {
m_len--;
bn_rsh(t, m, 8 * m_len);
pad = (uint8_t)t->dp[0];
if (pad == RSA_PRV) {
int counter = 0;
do {
counter++;
m_len--;
bn_rsh(t, m, 8 * m_len);
pad = (uint8_t)t->dp[0];
} while (pad == RSA_PAD && m_len > 0);
/* Remove padding and trailing zero. */
id = hash_id(MD_MAP, &len);
m_len -= len;
bn_rsh(t, m, m_len * 8);
int r = 0;
for (int i = 0; i < len; i++) {
pad = (uint8_t)t->dp[0];
r |= pad ^ id[len - i - 1];
bn_rsh(t, t, 8);
}
*p_len = k_len - m_len;
bn_mod_2b(m, m, m_len * 8);
if (r == 0 && m_len > 0 && counter >= 8) {
result = RLC_OK;
}
}
}
break;
case RSA_VER_HASH:
m_len = k_len - 1;
bn_rsh(t, m, 8 * m_len);
if (bn_is_zero(t)) {
m_len--;
bn_rsh(t, m, 8 * m_len);
pad = (uint8_t)t->dp[0];
if (pad == RSA_PRV) {
int counter = 0;
do {
counter++;
m_len--;
bn_rsh(t, m, 8 * m_len);
pad = (uint8_t)t->dp[0];
} while (pad == RSA_PAD && m_len > 0);
/* Remove padding and trailing zero. */
*p_len = k_len - m_len;
bn_mod_2b(m, m, m_len * 8);
if (m_len > 0 && counter >= 8) {
result = RLC_OK;
}
}
}
break;
}
}
RLC_CATCH_ANY {
result = RLC_ERR;
}
RLC_FINALLY {
bn_free(t);
}
return result;
}
#endif
#if CP_RSAPD == PKCS2
/**
* Applies or removes a PKCS#1 v2.1 encryption padding.
*
* @param[out] m - the buffer to pad.
* @param[out] p_len - the number of added pad bytes.
* @param[in] m_len - the message length in bytes.
* @param[in] k_len - the key length in bytes.
* @param[in] operation - flag to indicate the operation type.
* @return RLC_ERR if errors occurred, RLC_OK otherwise.
*/
static int pad_pkcs2(bn_t m, int *p_len, int m_len, int k_len, int operation) {
uint8_t pad, h1[RLC_MD_LEN], h2[RLC_MD_LEN];
/* MSVC does not allow dynamic stack arrays */
uint8_t *mask = RLC_ALLOCA(uint8_t, k_len);
int result = RLC_ERR;
bn_t t;
bn_null(t);
RLC_TRY {
bn_new(t);
switch (operation) {
case RSA_ENC:
/* DB = lHash | PS | 01 | D. */
md_map(h1, NULL, 0);
bn_read_bin(m, h1, RLC_MD_LEN);
*p_len = k_len - 2 * RLC_MD_LEN - 2 - m_len;
bn_lsh(m, m, *p_len * 8);
bn_lsh(m, m, 8);
bn_add_dig(m, m, 0x01);
/* Make room for the real message. */
bn_lsh(m, m, m_len * 8);
result = RLC_OK;
break;
case RSA_ENC_FIN:
/* EB = 00 | maskedSeed | maskedDB. */
rand_bytes(h1, RLC_MD_LEN);
md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN);
bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1);
for (int i = 0; i < t->used; i++) {
m->dp[i] ^= t->dp[i];
}
bn_write_bin(mask, k_len - RLC_MD_LEN - 1, m);
md_mgf(h2, RLC_MD_LEN, mask, k_len - RLC_MD_LEN - 1);
for (int i = 0; i < RLC_MD_LEN; i++) {
h1[i] ^= h2[i];
}
bn_read_bin(t, h1, RLC_MD_LEN);
bn_lsh(t, t, 8 * (k_len - RLC_MD_LEN - 1));
bn_add(t, t, m);
bn_copy(m, t);
result = RLC_OK;
break;
case RSA_DEC:
m_len = k_len - 1;
bn_rsh(t, m, 8 * m_len);
if (bn_is_zero(t)) {
m_len -= RLC_MD_LEN;
bn_rsh(t, m, 8 * m_len);
bn_write_bin(h1, RLC_MD_LEN, t);
bn_mod_2b(m, m, 8 * m_len);
bn_write_bin(mask, m_len, m);
md_mgf(h2, RLC_MD_LEN, mask, m_len);
for (int i = 0; i < RLC_MD_LEN; i++) {
h1[i] ^= h2[i];
}
md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN);
bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1);
for (int i = 0; i < t->used; i++) {
m->dp[i] ^= t->dp[i];
}
m_len -= RLC_MD_LEN;
bn_rsh(t, m, 8 * m_len);
bn_write_bin(h2, RLC_MD_LEN, t);
md_map(h1, NULL, 0);
pad = 0;
for (int i = 0; i < RLC_MD_LEN; i++) {
pad |= h1[i] ^ h2[i];
}
bn_mod_2b(m, m, 8 * m_len);
*p_len = bn_size_bin(m);
(*p_len)--;
bn_rsh(t, m, *p_len * 8);
if (pad == 0 && bn_cmp_dig(t, 1) == RLC_EQ) {
result = RLC_OK;
}
bn_mod_2b(m, m, *p_len * 8);
*p_len = k_len - *p_len;
}
break;
case RSA_SIG:
case RSA_SIG_HASH:
/* M' = 00 00 00 00 00 00 00 00 | H(M). */
bn_zero(m);
bn_lsh(m, m, 64);
/* Make room for the real message. */
bn_lsh(m, m, RLC_MD_LEN * 8);
result = RLC_OK;
break;
case RSA_SIG_FIN:
memset(mask, 0, 8);
bn_write_bin(mask + 8, RLC_MD_LEN, m);
md_map(h1, mask, RLC_MD_LEN + 8);
bn_read_bin(m, h1, RLC_MD_LEN);
md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN);
bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1);
t->dp[0] ^= 0x01;
/* m_len is now the size in bits of the modulus. */
bn_lsh(t, t, 8 * RLC_MD_LEN);
bn_add(m, t, m);
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PSS);
for (int i = m_len - 1; i < 8 * k_len; i++) {
bn_set_bit(m, i, 0);
}
result = RLC_OK;
break;
case RSA_VER:
case RSA_VER_HASH:
bn_mod_2b(t, m, 8);
pad = (uint8_t)t->dp[0];
if (pad == RSA_PSS) {
int r = 1;
for (int i = m_len; i < 8 * k_len; i++) {
if (bn_get_bit(m, i) != 0) {
r = 0;
}
}
bn_rsh(m, m, 8);
bn_mod_2b(t, m, 8 * RLC_MD_LEN);
bn_write_bin(h2, RLC_MD_LEN, t);
bn_rsh(m, m, 8 * RLC_MD_LEN);
bn_write_bin(h1, RLC_MD_LEN, t);
md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN);
bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1);
for (int i = 0; i < t->used; i++) {
m->dp[i] ^= t->dp[i];
}
m->dp[0] ^= 0x01;
for (int i = m_len - 1; i < 8 * k_len; i++) {
bn_set_bit(m, i - ((RLC_MD_LEN + 1) * 8), 0);
}
if (r == 1 && bn_is_zero(m)) {
result = RLC_OK;
}
bn_read_bin(m, h2, RLC_MD_LEN);
*p_len = k_len - RLC_MD_LEN;
}
break;
}
}
RLC_CATCH_ANY {
result = RLC_ERR;
}
RLC_FINALLY {
bn_free(t);
}
RLC_FREE(mask);
return result;
}
#endif
/*============================================================================*/
/* Public definitions */
/*============================================================================*/
int cp_rsa_gen(rsa_t pub, rsa_t prv, int bits) {
bn_t t, r;
int result = RLC_OK;
if (pub == NULL || prv == NULL || bits == 0) {
return RLC_ERR;
}
bn_null(t);
bn_null(r);
RLC_TRY {
bn_new(t);
bn_new(r);
/* Generate different primes p and q. */
do {
bn_gen_prime(prv->crt->p, bits / 2);
bn_gen_prime(prv->crt->q, bits / 2);
} while (bn_cmp(prv->crt->p, prv->crt->q) == RLC_EQ);
/* Swap p and q so that p is smaller. */
if (bn_cmp(prv->crt->p, prv->crt->q) != RLC_LT) {
bn_copy(t, prv->crt->p);
bn_copy(prv->crt->p, prv->crt->q);
bn_copy(prv->crt->q, t);
}
/* n = pq. */
bn_mul(pub->crt->n, prv->crt->p, prv->crt->q);
bn_copy(prv->crt->n, pub->crt->n);
bn_sub_dig(prv->crt->p, prv->crt->p, 1);
bn_sub_dig(prv->crt->q, prv->crt->q, 1);
/* phi(n) = (p - 1)(q - 1). */
bn_mul(t, prv->crt->p, prv->crt->q);
bn_set_2b(pub->e, 16);
bn_add_dig(pub->e, pub->e, 1);
#if !defined(CP_CRT)
/* d = e^(-1) mod phi(n). */
bn_gcd_ext(r, prv->d, NULL, pub->e, t);
if (bn_sign(prv->d) == RLC_NEG) {
bn_add(prv->d, prv->d, t);
}
if (bn_cmp_dig(r, 1) == RLC_EQ) {
/* Restore p and q. */
bn_add_dig(prv->crt->p, prv->crt->p, 1);
bn_add_dig(prv->crt->q, prv->crt->q, 1);
result = RLC_OK;
}
#else
/* d = e^(-1) mod phi(n). */
bn_gcd_ext(r, prv->d, NULL, pub->e, t);
if (bn_sign(prv->d) == RLC_NEG) {
bn_add(prv->d, prv->d, t);
}
if (bn_cmp_dig(r, 1) == RLC_EQ) {
/* dP = d mod (p - 1). */
bn_mod(prv->crt->dp, prv->d, prv->crt->p);
/* dQ = d mod (q - 1). */
bn_mod(prv->crt->dq, prv->d, prv->crt->q);
/* Restore p and q. */
bn_add_dig(prv->crt->p, prv->crt->p, 1);
bn_add_dig(prv->crt->q, prv->crt->q, 1);
/* qInv = q^(-1) mod p. */
bn_mod_inv(prv->crt->qi, prv->crt->q, prv->crt->p);
result = RLC_OK;
}
#endif /* CP_CRT */
}
RLC_CATCH_ANY {
result = RLC_ERR;
}
RLC_FINALLY {
bn_free(t);
bn_free(r);
}
return result;
}
int cp_rsa_enc(uint8_t *out, int *out_len, uint8_t *in, int in_len, rsa_t pub) {
bn_t m, eb;
int size, pad_len, result = RLC_OK;
bn_null(m);
bn_null(eb);
size = bn_size_bin(pub->crt->n);
if (pub == NULL || in_len <= 0 || in_len > (size - RSA_PAD_LEN)) {
return RLC_ERR;
}
RLC_TRY {
bn_new(m);
bn_new(eb);
bn_zero(m);
bn_zero(eb);
#if CP_RSAPD == BASIC
if (pad_basic(eb, &pad_len, in_len, size, RSA_ENC) == RLC_OK) {
#elif CP_RSAPD == PKCS1
if (pad_pkcs1(eb, &pad_len, in_len, size, RSA_ENC) == RLC_OK) {
#elif CP_RSAPD == PKCS2
if (pad_pkcs2(eb, &pad_len, in_len, size, RSA_ENC) == RLC_OK) {
#endif
bn_read_bin(m, in, in_len);
bn_add(eb, eb, m);
#if CP_RSAPD == PKCS2
pad_pkcs2(eb, &pad_len, in_len, size, RSA_ENC_FIN);
#endif
bn_mxp(eb, eb, pub->e, pub->crt->n);
if (size <= *out_len) {
*out_len = size;
memset(out, 0, *out_len);
bn_write_bin(out, size, eb);
} else {
result = RLC_ERR;
}
} else {
result = RLC_ERR;
}
}
RLC_CATCH_ANY {
result = RLC_ERR;
}
RLC_FINALLY {
bn_free(m);
bn_free(eb);
}
return result;
}
int cp_rsa_dec(uint8_t *out, int *out_len, uint8_t *in, int in_len, rsa_t prv) {
bn_t m, eb;
int size, pad_len, result = RLC_OK;
bn_null(m);
bn_null(eb);
size = bn_size_bin(prv->crt->n);
if (prv == NULL || in_len != size || in_len < RSA_PAD_LEN) {
return RLC_ERR;
}
RLC_TRY {
bn_new(m);
bn_new(eb);
bn_read_bin(eb, in, in_len);
#if !defined(CP_CRT)
bn_mxp(eb, eb, prv->d, prv->crt->n);
#else
bn_copy(m, eb);
#if MULTI == OPENMP
omp_set_num_threads(CORES);
#pragma omp parallel copyin(core_ctx) firstprivate(prv)
{
#pragma omp sections
{
#pragma omp section
{
#endif
/* m1 = c^dP mod p. */
bn_mxp(eb, eb, prv->crt->dp, prv->crt->p);
#if MULTI == OPENMP
}
#pragma omp section
{
#endif
/* m2 = c^dQ mod q. */
bn_mxp(m, m, prv->crt->dq, prv->crt->q);
#if MULTI == OPENMP
}
}
}
#endif
/* m1 = m1 - m2 mod p. */
bn_sub(eb, eb, m);
while (bn_sign(eb) == RLC_NEG) {
bn_add(eb, eb, prv->crt->p);
}
bn_mod(eb, eb, prv->crt->p);
/* m1 = qInv(m1 - m2) mod p. */
bn_mul(eb, eb, prv->crt->qi);
bn_mod(eb, eb, prv->crt->p);
/* m = m2 + m1 * q. */
bn_mul(eb, eb, prv->crt->q);
bn_add(eb, eb, m);
#endif /* CP_CRT */
if (bn_cmp(eb, prv->crt->n) != RLC_LT) {
result = RLC_ERR;
}
#if CP_RSAPD == BASIC
if (pad_basic(eb, &pad_len, in_len, size, RSA_DEC) == RLC_OK) {
#elif CP_RSAPD == PKCS1
if (pad_pkcs1(eb, &pad_len, in_len, size, RSA_DEC) == RLC_OK) {
#elif CP_RSAPD == PKCS2
if (pad_pkcs2(eb, &pad_len, in_len, size, RSA_DEC) == RLC_OK) {
#endif
size = size - pad_len;
if (size <= *out_len) {
memset(out, 0, size);
bn_write_bin(out, size, eb);
*out_len = size;
} else {
result = RLC_ERR;
}
} else {
result = RLC_ERR;
}
}
RLC_CATCH_ANY {
result = RLC_ERR;
}
RLC_FINALLY {
bn_free(m);
bn_free(eb);
}
return result;
}
int cp_rsa_sig(uint8_t *sig, int *sig_len, uint8_t *msg, int msg_len, int hash, rsa_t prv) {
bn_t m, eb;
int pad_len, size, result = RLC_OK;
uint8_t h[RLC_MD_LEN];
if (prv == NULL || msg_len < 0) {
return RLC_ERR;
}
pad_len = (!hash ? RLC_MD_LEN : msg_len);
#if CP_RSAPD == PKCS2
size = bn_bits(prv->crt->n) - 1;
size = (size / 8) + (size % 8 > 0);
if (pad_len > (size - 2)) {
return RLC_ERR;
}
#else
size = bn_size_bin(prv->crt->n);
if (pad_len > (size - RSA_PAD_LEN)) {
return RLC_ERR;
}
#endif
bn_null(m);
bn_null(eb);
RLC_TRY {
bn_new(m);
bn_new(eb);
bn_zero(m);
bn_zero(eb);
int operation = (!hash ? RSA_SIG : RSA_SIG_HASH);
#if CP_RSAPD == BASIC
if (pad_basic(eb, &pad_len, pad_len, size, operation) == RLC_OK) {
#elif CP_RSAPD == PKCS1
if (pad_pkcs1(eb, &pad_len, pad_len, size, operation) == RLC_OK) {
#elif CP_RSAPD == PKCS2
if (pad_pkcs2(eb, &pad_len, pad_len, size, operation) == RLC_OK) {
#endif
if (!hash) {
md_map(h, msg, msg_len);
bn_read_bin(m, h, RLC_MD_LEN);
bn_add(eb, eb, m);
} else {
bn_read_bin(m, msg, msg_len);
bn_add(eb, eb, m);
}
#if CP_RSAPD == PKCS2
pad_pkcs2(eb, &pad_len, bn_bits(prv->crt->n), size, RSA_SIG_FIN);
#endif
bn_copy(m, eb);
#if !defined(CP_CRT)
bn_mxp(eb, eb, prv->d, prv->crt->n);
#else /* CP_CRT */
#if MULTI == OPENMP
omp_set_num_threads(CORES);
#pragma omp parallel copyin(core_ctx) firstprivate(prv)
{
#pragma omp sections
{
#pragma omp section
{
#endif
/* m1 = c^dP mod p. */
bn_mxp(eb, eb, prv->crt->dp, prv->crt->p);
#if MULTI == OPENMP
}
#pragma omp section
{
#endif
/* m2 = c^dQ mod q. */
bn_mxp(m, m, prv->crt->dq, prv->crt->q);
#if MULTI == OPENMP
}
}
}
#endif
/* m1 = m1 - m2 mod p. */
bn_sub(eb, eb, m);
while (bn_sign(eb) == RLC_NEG) {
bn_add(eb, eb, prv->crt->p);
}
bn_mod(eb, eb, prv->crt->p);
/* m1 = qInv(m1 - m2) mod p. */
bn_mul(eb, eb, prv->crt->qi);
bn_mod(eb, eb, prv->crt->p);
/* m = m2 + m1 * q. */
bn_mul(eb, eb, prv->crt->q);
bn_add(eb, eb, m);
bn_mod(eb, eb, prv->crt->n);
#endif /* CP_CRT */
size = bn_size_bin(prv->crt->n);
if (size <= *sig_len) {
memset(sig, 0, size);
bn_write_bin(sig, size, eb);
*sig_len = size;
} else {
result = RLC_ERR;
}
} else {
result = RLC_ERR;
}
}
RLC_CATCH_ANY {
RLC_THROW(ERR_CAUGHT);
}
RLC_FINALLY {
bn_free(m);
bn_free(eb);
}
return result;
}
int cp_rsa_ver(uint8_t *sig, int sig_len, uint8_t *msg, int msg_len, int hash, rsa_t pub) {
bn_t m, eb;
int size, pad_len, result;
uint8_t *h1 = RLC_ALLOCA(uint8_t, RLC_MAX(msg_len, RLC_MD_LEN) + 8);
uint8_t *h2 = RLC_ALLOCA(uint8_t, RLC_MAX(msg_len, RLC_MD_LEN));
/* We suppose that the signature is invalid. */
result = 0;
if (h1 == NULL || h2 == NULL) {
RLC_FREE(h1);
RLC_FREE(h2);
return 0;
}
if (pub == NULL || msg_len < 0) {
return 0;
}
pad_len = (!hash ? RLC_MD_LEN : msg_len);
#if CP_RSAPD == PKCS2
size = bn_bits(pub->crt->n) - 1;
if (size % 8 == 0) {
size = size / 8 - 1;
} else {
size = bn_size_bin(pub->crt->n);
}
if (pad_len > (size - 2)) {
return 0;
}
#else
size = bn_size_bin(pub->crt->n);
if (pad_len > (size - RSA_PAD_LEN)) {
return 0;
}
#endif
bn_null(m);
bn_null(eb);
RLC_TRY {
bn_new(m);
bn_new(eb);
bn_read_bin(eb, sig, sig_len);
bn_mxp(eb, eb, pub->e, pub->crt->n);
int operation = (!hash ? RSA_VER : RSA_VER_HASH);
#if CP_RSAPD == BASIC
if (pad_basic(eb, &pad_len, RLC_MD_LEN, size, operation) == RLC_OK) {
#elif CP_RSAPD == PKCS1
if (pad_pkcs1(eb, &pad_len, RLC_MD_LEN, size, operation) == RLC_OK) {
#elif CP_RSAPD == PKCS2
if (pad_pkcs2(eb, &pad_len, bn_bits(pub->crt->n), size, operation) == RLC_OK) {
#endif
#if CP_RSAPD == PKCS2
memset(h1, 0, 8);
if (!hash) {
md_map(h1 + 8, msg, msg_len);
md_map(h2, h1, RLC_MD_LEN + 8);
memset(h1, 0, RLC_MD_LEN);
bn_write_bin(h1, size - pad_len, eb);
/* Everything went ok, so signature status is changed. */
result = util_cmp_const(h1, h2, RLC_MD_LEN);
} else {
memcpy(h1 + 8, msg, msg_len);
md_map(h2, h1, RLC_MD_LEN + 8);
memset(h1, 0, msg_len);
bn_write_bin(h1, size - pad_len, eb);
/* Everything went ok, so signature status is changed. */
result = util_cmp_const(h1, h2, msg_len);
}
#else
memset(h1, 0, RLC_MAX(msg_len, RLC_MD_LEN));
bn_write_bin(h1, size - pad_len, eb);
if (!hash) {
md_map(h2, msg, msg_len);
/* Everything went ok, so signature status is changed. */
result = util_cmp_const(h1, h2, RLC_MD_LEN);
} else {
/* Everything went ok, so signature status is changed. */
result = util_cmp_const(h1, msg, msg_len);
}
#endif
result = (result == RLC_EQ ? 1 : 0);
} else {
result = 0;
}
}
RLC_CATCH_ANY {
result = 0;
}
RLC_FINALLY {
bn_free(m);
bn_free(eb);
RLC_FREE(h1);
RLC_FREE(h2);
}
return result;
}
|
lchol_csc_inspector.h | //
//
#include <cstdio>
#include <vector>
#include <assert.h>
#include<set>
#undef MIN
#define MIN(x,y) ((x) < (y) ? (x) : (y))
#undef MAX
#define MAX(x,y) ((x) > (y) ? (x) : (y))
/*
* Computes the DAG of dependency after simplification
*/
void lchol_csc_inspector(int n, int *prunePtr, int *pruneSet, std::vector<std::vector<int>>& DAG){
#define pruneSet(Out_2,Out_4) pruneSet[Out_4]
#define prunePtr(Out_2) prunePtr[Out_2]
#define prunePtr_(Out_2) prunePtr[Out_2 + 1]
#define s0(Out_2,Out_4,In_2) DAG[In_2].push_back( Out_2 );
// Omega generated Code Generated
#pragma omp parallel for schedule(auto)
for(int t1 = 1; t1 <= n-1; t1++) {
for(int t2 = prunePtr(t1); t2 <= prunePtr_(t1)-1; t2++) {
if (pruneSet(t1,t2) >= 0 && t1 >= pruneSet(t1,t2)+1) {
int t3=pruneSet(t1,t2);
s0(t1,t2,t3);
}
}
}
}
/*
* Computes the DAG of dependency after simplification
void lch_csc_inspector(int n, int *prunePtr, int *pruneSet, std::vector<std::set<int>>& DAG){
#define pruneSet(Out_2,Out_4) pruneSet[Out_4]
#define prunePtr(Out_2) prunePtr[Out_2]
#define prunePtr_(Out_2) prunePtr[Out_2 + 1]
#define s0(Out_2,Out_4,In_2) DAG[In_2].insert( Out_2 );
// Omega generated Code Generated
#pragma omp parallel for schedule(auto)
for(int t1 = 1; t1 <= n-1; t1++) {
for(int t2 = prunePtr(t1); t2 <= prunePtr_(t1)-1; t2++) {
if (pruneSet(t1,t2) >= 0 && t1 >= pruneSet(t1,t2)+1) {
int t3=pruneSet(t1,t2);
s0(t1,t2,t3);
}
}
}
}
*/
|
GB_binop__first_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__first_int32
// A.*B function (eWiseMult): GB_AemultB__first_int32
// A*D function (colscale): GB_AxD__first_int32
// D*A function (rowscale): GB_DxB__first_int32
// C+=B function (dense accum): GB_Cdense_accumB__first_int32
// C+=b function (dense accum): GB_Cdense_accumb__first_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_int32
// C=scalar+B GB_bind1st__first_int32
// C=scalar+B' GB_bind1st_tran__first_int32
// C=A+scalar (none)
// C=A'+scalar (none)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = aij
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = x ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_INT32 || GxB_NO_FIRST_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__first_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__first_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__first_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__first_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__first_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__first_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__first_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__first_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB_bind1st_tran__first_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
vector.h | #ifndef VECTOR_H
#define VECTOR_H
#include <fstream>
#ifdef WINDOWS
#include <string>
#else
#include <cstring>
#endif
#include "../declare_structures.h"
#include "../../timer.h"
/// Class for dense vector
template<typename floating_type> class Vector {
friend class SpMatrix<floating_type>;
friend class Matrix<floating_type>;
friend class SpVector<floating_type>;
public:
typedef floating_type value_type;
typedef floating_type element;
/// Empty constructor
Vector();
/// Constructor. Create a new vector of size n
Vector(INTM n);
/// Constructor with existing data
Vector(floating_type* X, INTM n);
/// Copy constructor
explicit Vector<floating_type>(const Vector<floating_type>& vec);
/// Destructor
virtual ~Vector();
/// Accessors
/// Print the vector to std::cout
inline void print(const char* name) const;
inline void dump(const std::string& name) const;
/// returns the index of the largest value
inline INTM max() const;
/// returns the index of the minimum value
inline INTM min() const;
/// returns the maximum value
inline floating_type maxval() const;
/// returns the minimum value
inline floating_type minval() const;
/// returns the index of the value with largest magnitude
inline INTM fmax() const;
/// returns the index of the value with smallest magnitude
inline INTM fmin() const;
/// returns the maximum magnitude
inline floating_type fmaxval() const;
/// returns the minimum magnitude
inline floating_type fminval() const;
/// returns a reference to X[index]
inline floating_type& operator[](const INTM index);
/// returns X[index]
inline floating_type operator[](const INTM index) const;
/// make a copy of x
inline void copy(const Vector<floating_type>& x);
inline void copyRef(const Vector<floating_type>& x);
/// returns the size of the vector
inline int n() const { return _n; };
/// returns the size of the vector
inline int size() const { return _n; };
/// returns a modifiable reference of the data, DANGEROUS
inline floating_type* rawX() const { return _X; };
/// change artificially the size of the vector, DANGEROUS
inline void fakeSize(const INTM n) { _n = n; };
/// generate logarithmically spaced values
inline void logspace(const INTM n, const floating_type a, const floating_type b);
inline INTM nnz() const;
/// Modifiers
/// Set all values to zero
inline void setZeros();
/// resize the vector
inline void resize(const INTM n, const bool set_zeros = true);
/// change the data of the vector
inline void setPointer(floating_type* X, const INTM n);
inline void setData(floating_type* X, const INTM n) { this->setPointer(X, n); };
inline void refData(const Vector<floating_type>& vec) { this->setPointer(vec.rawX(), vec.n()); };
inline void refSubVec(INTM i, INTM n, Vector<floating_type>& mat) const { mat.setData(_X + i, n); };
//inline void print(const char* name) const;
inline void print(const std::string& name) const;
/// put a random permutation of size n (for integral vectors)
inline void randperm(int n);
/// put a random permutation of size n (for integral vectors)
inline void randi(int n);
/// put random values in the vector (White Gaussian Noise)
inline void setAleat();
/// clear the vector
inline void clear();
/// performs soft-thresholding of the vector
inline void softThrshold(const floating_type nu);
inline void fastSoftThrshold(const floating_type nu);
inline void fastSoftThrshold(Vector<floating_type>& out, const floating_type nu) const;
inline void softThrsholdScal(Vector<floating_type>& out, const floating_type nu, const floating_type s);
inline void hardThrshold(const floating_type nu);
/// performs soft-thresholding of the vector
inline void thrsmax(const floating_type nu);
inline void thrsmin(const floating_type nu);
inline void thrsabsmin(const floating_type nu);
/// performs soft-thresholding of the vector
inline void thrshold(const floating_type nu);
/// performs soft-thresholding of the vector
inline void thrsPos();
/// set each value of the vector to val
inline void set(const floating_type val);
inline void setn(const INTM n) { _n = n; }; //DANGEROUS
inline bool alltrue() const;
inline bool allfalse() const;
/// Algebric operations
/// returns ||A||_2
inline floating_type nrm2() const;
/// returns ||A||_2^2
inline floating_type nrm2sq() const;
/// returns A'x
inline floating_type dot(const Vector<floating_type>& x) const;
/// returns A'x, when x is sparse
template <typename I>
inline floating_type dot(const SpVector<floating_type, I>& x) const;
/// A <- A + a*x
inline void add(const Vector<floating_type>& x, const floating_type a = 1.0);
/// A <- A + a*x
template <typename I>
inline void add(const SpVector<floating_type, I>& x, const floating_type a = 1.0);
/// adds a to each value in the vector
inline void add(const floating_type a);
/// A <- b*A + a*x
inline void add_scal(const Vector<floating_type>& x, const floating_type a = 1.0, const floating_type b = 0);
/// A <- b*A + a*x
template <typename I>
inline void add_scal(const SpVector<floating_type, I>& x, const floating_type a = 1.0, const floating_type b = 0);
/// A <- A - x
inline void sub(const Vector<floating_type>& x);
/// A <- A + a*x
template <typename I>
inline void sub(const SpVector<floating_type, I>& x);
/// A <- A ./ x
inline void div(const Vector<floating_type>& x);
/// A <- x ./ y
inline void div(const Vector<floating_type>& x, const Vector<floating_type>& y);
/// A <- x .^ 2
inline void sqr(const Vector<floating_type>& x);
/// A <- 1 ./ sqrt(x)
inline void sqr();
/// A <- 1 ./ sqrt(A)
inline void Sqrt(const Vector<floating_type>& x);
/// A <- 1 ./ sqrt(x)
inline void Sqrt();
/// A <- 1 ./ sqrt(x)
inline void Invsqrt(const Vector<floating_type>& x);
/// A <- 1 ./ sqrt(A)
inline void Invsqrt();
/// A <- 1./x
inline void inv(const Vector<floating_type>& x);
/// A <- 1./A
inline void inv();
/// A <- x .* y
inline void mult(const Vector<floating_type>& x, const Vector<floating_type>& y);
inline void mult_elementWise(const Vector<floating_type>& B, Vector<floating_type>& C) const { C.mult(*this, B); };
/// normalize the vector
inline void normalize();
/// normalize the vector
inline void normalize2(const floating_type thrs = 1.0);
/// whiten
inline void whiten(Vector<floating_type>& mean, const bool pattern = false);
/// whiten
inline void whiten(Vector<floating_type>& mean, const
Vector<floating_type>& mask);
/// whiten
inline void whiten(const INTM V);
/// whiten
inline floating_type mean() const;
inline floating_type abs_mean() const;
inline floating_type mean_non_uniform(const Vector<floating_type>& qi) const;
/// whiten
inline floating_type std();
/// compute the Kuhlback-Leiber divergence
inline floating_type KL(const Vector<floating_type>& X);
/// whiten
inline void unwhiten(Vector<floating_type>& mean, const bool pattern = false);
/// scale the vector by a
inline void scal(const floating_type a);
/// A <- -A
inline void neg();
/// replace each value by its exponential
inline void exp();
/// replace each value by its logarithm
inline void log();
/// replace each value by its absolute value
inline void abs_vec();
/// replace each value by its exponential
inline void logexp();
/// replace each value by its exponential
inline floating_type softmax(const int y);
inline floating_type logsumexp();
/// computes the sum of the magnitudes of the vector
inline floating_type asum() const;
inline floating_type lzero() const;
/// compute the sum of the differences
inline floating_type afused() const;
/// returns the sum of the vector
inline floating_type sum() const;
/// puts in signs, the sign of each point in the vector
inline void sign(Vector<floating_type>& signs) const;
/// projects the vector onto the l1 ball of radius thrs,
/// returns true if the returned vector is null
inline void l1project(Vector<floating_type>& out, const floating_type thrs, const bool simplex = false) const;
inline void l1project_weighted(Vector<floating_type>& out, const Vector<floating_type>& weights, const floating_type thrs, const bool residual = false) const;
inline void l1l2projectb(Vector<floating_type>& out, const floating_type thrs, const floating_type gamma, const bool pos = false,
const int mode = 1);
inline void sparseProject(Vector<floating_type>& out, const floating_type thrs, const int mode = 1, const floating_type lambda_1 = 0,
const floating_type lambda_2 = 0, const floating_type lambda_3 = 0, const bool pos = false);
inline void project_sft(const Vector<int>& labels, const int clas);
inline void project_sft_binary(const Vector<floating_type>& labels);
/// projects the vector onto the l1 ball of radius thrs,
/// projects the vector onto the l1 ball of radius thrs,
/// returns true if the returned vector is null
inline void l1l2project(Vector<floating_type>& out, const floating_type thrs, const floating_type gamma, const bool pos = false) const;
inline void fusedProject(Vector<floating_type>& out, const floating_type lambda_1, const floating_type lambda_2, const int itermax);
inline void fusedProjectHomotopy(Vector<floating_type>& out, const floating_type lambda_1, const floating_type lambda_2, const floating_type lambda_3 = 0,
const bool penalty = true);
/// projects the vector onto the l1 ball of radius thrs,
/// _sort the vector
inline void sort(Vector<floating_type>& out, const bool mode) const;
/// sort the vector
inline void sort(const bool mode);
//// sort the vector
inline void sort2(Vector<floating_type>& out, Vector<INTM>& key, const bool mode) const;
/// sort the vector
inline void sort2(Vector<INTM>& key, const bool mode);
/// Conversion
/// make a sparse copy
inline void toSparse(SpVector<floating_type>& vec) const;
/// extract the rows of a matrix corresponding to a binary mask
inline void copyMask(Vector<floating_type>& out, Vector<bool>& mask) const;
inline void getIndices(Vector<int>& ind) const { }; // irrelevant for dense vectors
template <typename I>
inline void refIndices(Vector<I>& ind) const { }; // irrelevant for dense vectors
private:
/// = operator,
Vector<floating_type>& operator=(const Vector<floating_type>& vec);
/// if the data has been externally allocated
bool _externAlloc;
/// data
floating_type* _X;
/// size of the vector
INTM _n;
};
/* ***********************************
* Implementation of the class Vector
* ***********************************/
/// Empty constructor
template <typename floating_type> Vector<floating_type>::Vector() :
_externAlloc(true), _X(NULL), _n(0) { };
/// Constructor. Create a new vector of size n
template <typename floating_type> Vector<floating_type>::Vector(INTM n) :
_externAlloc(false), _n(n) {
#pragma omp critical
{
_X = new floating_type[_n];
}
};
/// Constructor with existing data
template <typename floating_type> Vector<floating_type>::Vector(floating_type* X, INTM n) :
_externAlloc(true), _X(X), _n(n) { };
/// Copy constructor
template <typename floating_type> Vector<floating_type>::Vector(const Vector<floating_type>& vec) :
_externAlloc(false), _n(vec._n) {
#pragma omp critical
{
_X = new floating_type[_n];
}
cblas_copy<floating_type>(_n, vec._X, 1, _X, 1);
};
/// Destructor
template <typename floating_type> Vector<floating_type>::~Vector() {
clear();
};
/// Print the matrix to std::cout
template <typename floating_type> inline void Vector<floating_type>::print(const std::string& name) const {
logging(logERROR) << name;
logging(logERROR) << _n;
for (INTM j = 0; j < _n; ++j) {
fprintf(stderr, "%10.5g ", static_cast<double>(_X[j]));
}
fprintf(stderr, "\n ");
};
/// Print the matrix to std::cout
template <typename floating_type> inline void Vector<floating_type>::dump(const std::string& name) const {
std::ofstream f;
const char* cname = name.c_str();
f.open(cname);
f.precision(20);
logging(logERROR) << name;
f << _n << std::endl;
for (INTM j = 0; j < _n; ++j) {
f << static_cast<double>(_X[j]) << " ";
}
f << std::endl;
f.close();
};
/// Print the vector to std::cout
template <> inline void Vector<double>::print(const char* name) const {
printf("%s, %d\n", name, (int)_n);
for (INTM i = 0; i < _n; ++i) {
printf("%g ", _X[i]);
}
printf("\n");
};
/// Print the vector to std::cout
template <> inline void Vector<float>::print(const char* name) const {
printf("%s, %d\n", name, (int)_n);
for (INTM i = 0; i < _n; ++i) {
printf("%g ", _X[i]);
}
printf("\n");
};
/// Print the vector to std::cout
template <> inline void Vector<int>::print(const char* name) const {
printf("%s, %d\n", name, (int)_n);
for (INTM i = 0; i < _n; ++i) {
printf("%d ", _X[i]);
}
printf("\n");
};
/// Print the vector to std::cout
template <> inline void Vector<bool>::print(const char* name) const {
printf("%s, %d\n", name, (int)_n);
for (INTM i = 0; i < _n; ++i) {
printf("%d ", _X[i] ? 1 : 0);
}
printf("\n");
};
/// returns the index of the largest value
template <typename floating_type> inline INTM Vector<floating_type>::max() const {
INTM imax = 0;
floating_type max = _X[0];
for (INTM j = 1; j < _n; ++j) {
floating_type cur = _X[j];
if (cur > max) {
imax = j;
max = cur;
}
}
return imax;
};
/// returns the index of the minimum value
template <typename floating_type> inline INTM Vector<floating_type>::min() const {
INTM imin = 0;
floating_type min = _X[0];
for (INTM j = 1; j < _n; ++j) {
floating_type cur = _X[j];
if (cur < min) {
imin = j;
min = cur;
}
}
return imin;
};
/// returns the maximum value
template <typename floating_type> inline floating_type Vector<floating_type>::maxval() const {
return _X[this->max()];
};
/// returns the minimum value
template <typename floating_type> inline floating_type Vector<floating_type>::minval() const {
return _X[this->min()];
};
/// returns the maximum magnitude
template <typename floating_type> inline floating_type Vector<floating_type>::fmaxval() const {
return fabs(_X[this->fmax()]);
};
/// returns the minimum magnitude
template <typename floating_type> inline floating_type Vector<floating_type>::fminval() const {
return fabs(_X[this->fmin()]);
};
template <typename floating_type>
inline void Vector<floating_type>::logspace(const INTM n, const floating_type a, const floating_type b) {
floating_type first = log10(a);
floating_type last = log10(b);
floating_type step = (last - first) / (n - 1);
this->resize(n);
_X[0] = first;
for (INTM i = 1; i < _n; ++i)
_X[i] = _X[i - 1] + step;
for (INTM i = 0; i < _n; ++i)
_X[i] = pow(floating_type(10.0), _X[i]);
}
template <typename floating_type>
inline INTM Vector<floating_type>::nnz() const {
INTM sum = 0;
for (INTM i = 0; i < _n; ++i)
if (_X[i] != floating_type())
++sum;
return sum;
};
/// generate logarithmically spaced values
template <>
inline void Vector<INTM>::logspace(const INTM n, const INTM a, const INTM b) {
Vector<double> tmp(n);
tmp.logspace(n, double(a), double(b));
this->resize(n);
_X[0] = a;
_X[n - 1] = b;
for (INTM i = 1; i < _n - 1; ++i) {
INTM candidate = static_cast<INTM>(floor(static_cast<double>(tmp[i])));
_X[i] = candidate > _X[i - 1] ? candidate : _X[i - 1] + 1;
}
}
/// returns the index of the value with largest magnitude
template <typename floating_type> inline INTM Vector<floating_type>::fmax() const {
return cblas_iamax<floating_type>(_n, _X, 1);
};
/// returns the index of the value with smallest magnitude
template <typename floating_type> inline INTM Vector<floating_type>::fmin() const {
return cblas_iamin<floating_type>(_n, _X, 1);
};
/// returns a reference to X[index]
template <typename floating_type> inline floating_type& Vector<floating_type>::operator[] (const INTM i) {
assert(i >= 0 && i < _n);
return _X[i];
};
/// returns X[index]
template <typename floating_type> inline floating_type Vector<floating_type>::operator[] (const INTM i) const {
assert(i >= 0 && i < _n);
return _X[i];
};
/// make a copy of x
template <typename floating_type> inline void Vector<floating_type>::copy(const Vector<floating_type>& x) {
if (_X != x._X) {
this->resize(x.n());
//cblas_copy<floating_type>(_n,x._X,1,_X,1);
memcpy(_X, x._X, _n * sizeof(floating_type));
}
};
/// make a copy of x
template <typename floating_type> inline void Vector<floating_type>::copyRef(const Vector<floating_type>& x) {
this->setData(x.rawX(), x.n());
};
/// Set all values to zero
template <typename floating_type> inline void Vector<floating_type>::setZeros() {
memset(_X, 0, _n * sizeof(floating_type));
};
/// resize the vector
template <typename floating_type> inline void Vector<floating_type>::resize(const INTM n, const bool set_zeros) {
if (_n == n)
return;
clear();
#pragma omp critical
{
_X = new floating_type[n];
}
_n = n;
_externAlloc = false;
if (set_zeros)
this->setZeros();
};
/// change the data of the vector
template <typename floating_type> inline void Vector<floating_type>::setPointer(floating_type* X, const INTM n) {
clear();
_externAlloc = true;
_X = X;
_n = n;
};
/// put a random permutation of size n (for integral vectors)
template <> inline void Vector<int>::randi(int n) {
for (int i = 0; i < _n; ++i)
_X[i] = static_cast<int>(random() % n);
};
/// put a random permutation of size n (for integral vectors)
template <> inline void Vector<int>::randperm(int n) {
resize(n);
Vector<int> table(n);
for (int i = 0; i < n; ++i)
table[i] = i;
int size = n;
for (int i = 0; i < n; ++i) {
const int ind = random() % size;
_X[i] = table[ind];
table[ind] = table[size - 1];
--size;
}
};
/// put random values in the vector (white Gaussian Noise)
template <typename floating_type> inline void Vector<floating_type>::setAleat() {
for (INTM i = 0; i < _n; ++i) _X[i] = normalDistrib<floating_type>();
};
/// clear the vector
template <typename floating_type> inline void Vector<floating_type>::clear() {
if (!_externAlloc)
delete[](_X);
_n = 0;
_X = NULL;
_externAlloc = true;
};
/// performs soft-thresholding of the vector
template <typename floating_type> inline void Vector<floating_type>::softThrshold(const floating_type nu) {
for (INTM i = 0; i < _n; ++i) {
if (_X[i] > nu) {
_X[i] -= nu;
}
else if (_X[i] < -nu) {
_X[i] += nu;
}
else {
_X[i] = 0;
}
}
};
/// performs soft-thresholding of the vector
template <typename floating_type> inline void Vector<floating_type>::fastSoftThrshold(const floating_type nu) {
//#pragma omp parallel for
for (INTM i = 0; i < _n; ++i)
{
_X[i] = fastSoftThrs(_X[i], nu);
}
};
/// performs soft-thresholding of the vector
template <typename floating_type> inline void Vector<floating_type>::fastSoftThrshold(Vector<floating_type>& output, const floating_type nu) const {
output.resize(_n, false);
//#pragma omp parallel for
for (INTM i = 0; i < _n; ++i)
output[i] = fastSoftThrs(_X[i], nu);
};
/// performs soft-thresholding of the vector
template <typename floating_type> inline void Vector<floating_type>::softThrsholdScal(Vector<floating_type>& out, const floating_type nu, const floating_type s) {
floating_type* Y = out.rawX();
for (INTM i = 0; i < _n; ++i) {
if (_X[i] > nu) {
Y[i] = s * (_X[i] - nu);
}
else if (_X[i] < -nu) {
Y[i] = s * (_X[i] + nu);
}
else {
Y[i] = 0;
}
}
};
/// performs soft-thresholding of the vector
template <typename floating_type> inline void Vector<floating_type>::hardThrshold(const floating_type nu) {
for (INTM i = 0; i < _n; ++i) {
if (!(_X[i] > nu || _X[i] < -nu)) {
_X[i] = 0;
}
}
};
/// performs thresholding of the vector
template <typename floating_type> inline void Vector<floating_type>::thrsmax(const floating_type nu) {
//#pragma omp parallel for private(i)
for (INTM i = 0; i < _n; ++i)
if (_X[i] < nu) _X[i] = nu;
}
/// performs thresholding of the vector
template <typename floating_type> inline void Vector<floating_type>::thrsmin(const floating_type nu) {
for (INTM i = 0; i < _n; ++i)
_X[i] = MIN(_X[i], nu);
}
/// performs thresholding of the vector
template <typename floating_type> inline void Vector<floating_type>::thrsabsmin(const floating_type nu) {
for (INTM i = 0; i < _n; ++i)
_X[i] = MAX(MIN(_X[i], nu), -nu);
}
/// performs thresholding of the vector
template <typename floating_type> inline void Vector<floating_type>::thrshold(const floating_type nu) {
for (INTM i = 0; i < _n; ++i)
if (abs<floating_type>(_X[i]) < nu)
_X[i] = 0;
}
/// performs soft-thresholding of the vector
template <typename floating_type> inline void Vector<floating_type>::thrsPos() {
for (INTM i = 0; i < _n; ++i) {
if (_X[i] < 0)
_X[i] = 0;
}
};
template <>
inline bool Vector<bool>::alltrue() const {
for (INTM i = 0; i < _n; ++i) {
if (!_X[i])
return false;
}
return true;
};
template <>
inline bool Vector<bool>::allfalse() const {
for (INTM i = 0; i < _n; ++i) {
if (_X[i])
return false;
}
return true;
};
/// set each value of the vector to val
template <typename floating_type> inline void Vector<floating_type>::set(const floating_type val) {
for (INTM i = 0; i < _n; ++i) _X[i] = val;
};
/// returns ||A||_2
template <typename floating_type> inline floating_type Vector<floating_type>::nrm2() const {
return cblas_nrm2<floating_type>(_n, _X, 1);
};
/// returns ||A||_2^2
template <typename floating_type> inline floating_type Vector<floating_type>::nrm2sq() const {
return cblas_dot<floating_type>(_n, _X, 1, _X, 1);
};
/// returns A'x
template <typename floating_type> inline floating_type Vector<floating_type>::dot(const Vector<floating_type>& x) const {
assert(_n == x._n);
return cblas_dot<floating_type>(_n, _X, 1, x._X, 1);
};
/// returns A'x, when x is sparse
template <typename floating_type>
template <typename I>
inline floating_type Vector<floating_type>::dot(const SpVector<floating_type, I>& x) const {
floating_type sum = 0;
const I* r = x.rawR();
const floating_type* v = x.rawX();
for (INTT i = 0; i < x._L; ++i) {
sum += _X[r[i]] * v[i];
}
return sum;
//return cblas_doti<floating_type>(x._L,x._v,x._r,_X);
};
/// A <- A + a*x
template <typename floating_type> inline void Vector<floating_type>::add(const Vector<floating_type>& x, const floating_type a) {
assert(_n == x._n);
cblas_axpy<floating_type>(_n, a, x._X, 1, _X, 1);
};
template <typename floating_type> inline void Vector<floating_type>::add_scal(const Vector<floating_type>& x, const floating_type a, const floating_type b) {
assert(_n == x._n);
cblas_axpby<floating_type>(_n, a, x._X, 1, b, _X, 1);
};
/// A <- A + a*x
template <typename floating_type>
template <typename I>
inline void Vector<floating_type>::add(const SpVector<floating_type, I>& x,
const floating_type a) {
if (a == 1.0) {
for (INTM i = 0; i < x._L; ++i)
_X[x._r[i]] += x._v[i];
}
else {
for (INTM i = 0; i < x._L; ++i)
_X[x._r[i]] += a * x._v[i];
}
};
/// A <- A + a*x
template <typename floating_type>
template <typename I>
inline void Vector<floating_type>::add_scal(const SpVector<floating_type, I>& x,
const floating_type a, const floating_type b) {
if (b != floating_type(1.0)) {
if (b == 0) {
this->setZeros();
}
else {
this->scal(b);
}
}
if (a == floating_type(1.0)) {
for (I i = 0; i < x._L; ++i)
_X[x._r[i]] += x._v[i];
}
else {
for (I i = 0; i < x._L; ++i)
_X[x._r[i]] += a * x._v[i];
}
};
/// adds a to each value in the vector
template <typename floating_type> inline void Vector<floating_type>::add(const floating_type a) {
for (INTM i = 0; i < _n; ++i) _X[i] += a;
};
/// A <- A - x
template <typename floating_type> inline void Vector<floating_type>::sub(const Vector<floating_type>& x) {
assert(_n == x._n);
vSub<floating_type>(_n, _X, x._X, _X);
};
/// A <- A + a*x
template <typename floating_type>
template <typename I>
inline void Vector<floating_type>::sub(const SpVector<floating_type, I>& x) {
for (INTM i = 0; i < x._L; ++i)
_X[x._r[i]] -= x._v[i];
};
/// A <- A ./ x
template <typename floating_type> inline void Vector<floating_type>::div(const Vector<floating_type>& x) {
assert(_n == x._n);
vDiv<floating_type>(_n, _X, x._X, _X);
};
/// A <- x ./ y
template <typename floating_type> inline void Vector<floating_type>::div(const Vector<floating_type>& x, const Vector<floating_type>& y) {
assert(_n == x._n);
vDiv<floating_type>(_n, x._X, y._X, _X);
};
/// A <- x .^ 2
template <typename floating_type> inline void Vector<floating_type>::sqr(const Vector<floating_type>& x) {
this->resize(x._n);
vSqr<floating_type>(_n, x._X, _X);
}
/// A <- x .^ 2
template <typename floating_type> inline void Vector<floating_type>::sqr() {
vSqr<floating_type>(_n, _X, _X);
}
/// A <- x .^ 2
template <typename floating_type> inline void Vector<floating_type>::Invsqrt(const Vector<floating_type>& x) {
this->resize(x._n);
vInvSqrt<floating_type>(_n, x._X, _X);
}
/// A <- x .^ 2
template <typename floating_type> inline void Vector<floating_type>::Sqrt(const Vector<floating_type>& x) {
this->resize(x._n);
vSqrt<floating_type>(_n, x._X, _X);
}
/// A <- x .^ 2
template <typename floating_type> inline void Vector<floating_type>::Invsqrt() {
vInvSqrt<floating_type>(_n, _X, _X);
}
/// A <- x .^ 2
template <typename floating_type> inline void Vector<floating_type>::Sqrt() {
vSqrt<floating_type>(_n, _X, _X);
}
/// A <- 1./x
template <typename floating_type> inline void Vector<floating_type>::inv(const Vector<floating_type>& x) {
this->resize(x.n());
vInv<floating_type>(_n, x._X, _X);
};
/// A <- 1./A
template <typename floating_type> inline void Vector<floating_type>::inv() {
vInv<floating_type>(_n, _X, _X);
};
/// A <- x .* y
template <typename floating_type> inline void Vector<floating_type>::mult(const Vector<floating_type>& x,
const Vector<floating_type>& y) {
this->resize(x.n());
vMul<floating_type>(_n, x._X, y._X, _X);
};
;
/// normalize the vector
template <typename floating_type> inline void Vector<floating_type>::normalize() {
floating_type norm = nrm2();
if (norm > EPSILON)
scal(1.0 / norm);
};
/// normalize the vector
template <typename floating_type> inline void Vector<floating_type>::normalize2(const floating_type thrs) {
floating_type norm = nrm2();
if (norm > thrs)
scal(thrs / norm);
};
/// whiten
template <typename floating_type> inline void Vector<floating_type>::whiten(
Vector<floating_type>& meanv, const bool pattern) {
if (pattern) {
const INTM n = static_cast<INTM>(sqrt(static_cast<floating_type>(_n)));
INTM count[4];
for (INTM i = 0; i < 4; ++i) count[i] = 0;
INTM offsetx = 0;
for (INTM j = 0; j < n; ++j) {
offsetx = (offsetx + 1) % 2;
INTM offsety = 0;
for (INTM k = 0; k < n; ++k) {
offsety = (offsety + 1) % 2;
meanv[2 * offsetx + offsety] += _X[j * n + k];
count[2 * offsetx + offsety]++;
}
}
for (INTM i = 0; i < 4; ++i)
meanv[i] /= count[i];
offsetx = 0;
for (INTM j = 0; j < n; ++j) {
offsetx = (offsetx + 1) % 2;
INTM offsety = 0;
for (INTM k = 0; k < n; ++k) {
offsety = (offsety + 1) % 2;
_X[j * n + k] -= meanv[2 * offsetx + offsety];
}
}
}
else {
const INTM V = meanv.n();
const INTM sizePatch = _n / V;
for (INTM j = 0; j < V; ++j) {
floating_type mean = 0;
for (INTM k = 0; k < sizePatch; ++k) {
mean += _X[sizePatch * j + k];
}
mean /= sizePatch;
for (INTM k = 0; k < sizePatch; ++k) {
_X[sizePatch * j + k] -= mean;
}
meanv[j] = mean;
}
}
};
/// whiten
template <typename floating_type> inline void Vector<floating_type>::whiten(
Vector<floating_type>& meanv, const Vector<floating_type>& mask) {
const INTM V = meanv.n();
const INTM sizePatch = _n / V;
for (INTM j = 0; j < V; ++j) {
floating_type mean = 0;
for (INTM k = 0; k < sizePatch; ++k) {
mean += _X[sizePatch * j + k];
}
mean /= cblas_asum(sizePatch, mask._X + j * sizePatch, 1);
for (INTM k = 0; k < sizePatch; ++k) {
if (mask[sizePatch * j + k])
_X[sizePatch * j + k] -= mean;
}
meanv[j] = mean;
}
};
/// whiten
template <typename floating_type> inline void Vector<floating_type>::whiten(const INTM V) {
const INTM sizePatch = _n / V;
for (INTM j = 0; j < V; ++j) {
floating_type mean = 0;
for (INTM k = 0; k < sizePatch; ++k) {
mean += _X[sizePatch * j + k];
}
mean /= sizePatch;
for (INTM k = 0; k < sizePatch; ++k) {
_X[sizePatch * j + k] -= mean;
}
}
};
template <typename floating_type> inline floating_type Vector<floating_type>::KL(const Vector<floating_type>& Y) {
floating_type sum = 0;
floating_type* prY = Y.rawX();
for (INTM i = 0; i < _n; ++i) {
if (_X[i] > 1e-20) {
if (prY[i] < 1e-60) {
sum += 1e200;
}
else {
sum += _X[i] * log_alt<floating_type>(_X[i] / prY[i]);
}
//sum += _X[i]*log_alt<floating_type>(_X[i]/(prY[i]+1e-100));
}
}
sum += floating_type(-1.0) + Y.sum();
return sum;
};
/// unwhiten
template <typename floating_type> inline void Vector<floating_type>::unwhiten(
Vector<floating_type>& meanv, const bool pattern) {
if (pattern) {
const INTM n = static_cast<INTM>(sqrt(static_cast<floating_type>(_n)));
INTM offsetx = 0;
for (INTM j = 0; j < n; ++j) {
offsetx = (offsetx + 1) % 2;
INTM offsety = 0;
for (INTM k = 0; k < n; ++k) {
offsety = (offsety + 1) % 2;
_X[j * n + k] += meanv[2 * offsetx + offsety];
}
}
}
else {
const INTM V = meanv.n();
const INTM sizePatch = _n / V;
for (INTM j = 0; j < V; ++j) {
floating_type mean = meanv[j];
for (INTM k = 0; k < sizePatch; ++k) {
_X[sizePatch * j + k] += mean;
}
}
}
};
/// return the mean
template <typename floating_type> inline floating_type Vector<floating_type>::mean() const {
return this->sum() / _n;
}
template <typename floating_type> inline floating_type Vector<floating_type>::abs_mean() const {
return this->asum() / _n;
};
template <typename floating_type> inline floating_type Vector<floating_type>::mean_non_uniform(const Vector<floating_type>& qi) const {
Vector<floating_type> tmp;
tmp.copy(*this);
tmp.mult(qi, tmp);
return tmp.sum();
};
/// return the std
template <typename floating_type> inline floating_type Vector<floating_type>::std() {
floating_type E = this->mean();
floating_type std = 0;
for (INTM i = 0; i < _n; ++i) {
floating_type tmp = _X[i] - E;
std += tmp * tmp;
}
std /= _n;
return sqr_alt<floating_type>(std);
}
/// scale the vector by a
template <typename floating_type> inline void Vector<floating_type>::scal(const floating_type a) {
return cblas_scal<floating_type>(_n, a, _X, 1);
};
/// A <- -A
template <typename floating_type> inline void Vector<floating_type>::neg() {
for (INTM i = 0; i < _n; ++i) _X[i] = -_X[i];
};
/// replace each value by its exponential
template <typename floating_type> inline void Vector<floating_type>::exp() {
vExp<floating_type>(_n, _X, _X);
};
/// replace each value by its absolute value
template <typename floating_type> inline void Vector<floating_type>::abs_vec() {
vAbs<floating_type>(_n, _X, _X);
};
/// replace each value by its logarithm
template <typename floating_type> inline void Vector<floating_type>::log() {
for (INTM i = 0; i < _n; ++i) _X[i] = alt_log<floating_type>(_X[i]);
};
/// replace each value by its exponential
template <typename floating_type> inline void Vector<floating_type>::logexp() {
for (INTM i = 0; i < _n; ++i) {
_X[i] = logexp2(_X[i]);
/*if (_X[i] < -30) {
_X[i]=0;
} else if (_X[i] < 30) {
_X[i]= alt_log<floating_type>( floating_type(1.0) + exp_alt<floating_type>( _X[i] ) );
}*/
}
};
template <typename floating_type> inline floating_type Vector<floating_type>::logsumexp() {
floating_type mm = this->maxval();
this->add(-mm);
this->exp();
return mm + alt_log<floating_type>(this->asum());
};
/// replace each value by its exponential
template <typename floating_type> inline floating_type Vector<floating_type>::softmax(const int y) {
this->add(-_X[y]);
_X[y] = -INFINITY;
floating_type max = this->maxval();
if (max > 30) {
return max;
}
else if (max < -30) {
return 0;
}
else {
_X[y] = floating_type(0.0);
this->exp();
return alt_log<floating_type>(this->sum());
}
};
/// computes the sum of the magnitudes of the vector
template <typename floating_type> inline floating_type Vector<floating_type>::asum() const {
return cblas_asum<floating_type>(_n, _X, 1);
};
template <typename floating_type> inline floating_type Vector<floating_type>::lzero() const {
INTM count = 0;
for (INTM i = 0; i < _n; ++i)
if (_X[i] != 0)
++count;
return count;
};
template <typename floating_type> inline floating_type Vector<floating_type>::afused() const {
floating_type sum = 0;
for (INTM i = 1; i < _n; ++i) {
sum += abs<floating_type>(_X[i] - _X[i - 1]);
}
return sum;
}
/// returns the sum of the vector
template <typename floating_type> inline floating_type Vector<floating_type>::sum() const {
floating_type sum = floating_type();
for (INTM i = 0; i < _n; ++i) sum += _X[i];
return sum;
};
/// puts in signs, the sign of each poINTM in the vector
template <typename floating_type> inline void Vector<floating_type>::sign(Vector<floating_type>& signs) const {
floating_type* prSign = signs.rawX();
for (INTM i = 0; i < _n; ++i) {
if (_X[i] == 0) {
prSign[i] = 0.0;
}
else {
prSign[i] = _X[i] > 0 ? 1.0 : -1.0;
}
}
};
/// projects the vector onto the l1 ball of radius thrs,
/// returns true if the returned vector is null
template <typename floating_type> inline void Vector<floating_type>::l1project(Vector<floating_type>& out,
const floating_type thrs, const bool simplex) const {
out.copy(*this);
if (simplex) {
out.thrsPos();
}
else {
vAbs<floating_type>(_n, out._X, out._X);
}
floating_type norm1 = out.sum();
if (norm1 <= thrs) {
if (!simplex)
out.copy(*this);
return;
}
floating_type* prU = out._X;
INTM sizeU = _n;
floating_type sum = floating_type();
INTM sum_card = 0;
while (sizeU > 0) {
// put the pivot in prU[0]
std::swap(prU[0], prU[sizeU / 2]);
floating_type pivot = prU[0];
INTM sizeG = 1;
floating_type sumG = pivot;
for (INTM i = 1; i < sizeU; ++i) {
if (prU[i] >= pivot) {
sumG += prU[i];
std::swap(prU[sizeG++], prU[i]);
}
}
if (sum + sumG - pivot * (sum_card + sizeG) <= thrs) {
sum_card += sizeG;
sum += sumG;
prU += sizeG;
sizeU -= sizeG;
}
else {
++prU;
sizeU = sizeG - 1;
}
}
floating_type lambda_1 = (sum - thrs) / sum_card;
out.copy(*this);
if (simplex) {
out.thrsPos();
}
out.softThrshold(lambda_1);
};
/// projects the vector onto the l1 ball of radius thrs,
/// returns true if the returned vector is null
template <typename floating_type> inline void Vector<floating_type>::l1project_weighted(Vector<floating_type>& out, const Vector<floating_type>& weights,
const floating_type thrs, const bool residual) const {
out.copy(*this);
if (thrs == 0) {
out.setZeros();
return;
}
vAbs<floating_type>(_n, out._X, out._X);
out.div(weights);
Vector<INTM> keys(_n);
for (INTM i = 0; i < _n; ++i) keys[i] = i;
out.sort2(keys, false);
floating_type sum1 = 0;
floating_type sum2 = 0;
floating_type lambda_1 = 0;
for (INTM i = 0; i < _n; ++i) {
const floating_type lambda_old = lambda_1;
const floating_type fact = weights[keys[i]] * weights[keys[i]];
lambda_1 = out[i];
sum2 += fact;
sum1 += fact * lambda_1;
if (sum1 - lambda_1 * sum2 >= thrs) {
sum2 -= fact;
sum1 -= fact * lambda_1;
lambda_1 = lambda_old;
break;
}
}
lambda_1 = MAX(0, (sum1 - thrs) / sum2);
if (residual) {
for (INTM i = 0; i < _n; ++i) {
out._X[i] = _X[i] > 0 ? MIN(_X[i], lambda_1 * weights[i]) : MAX(_X[i], -lambda_1 * weights[i]);
}
}
else {
for (INTM i = 0; i < _n; ++i) {
out._X[i] = _X[i] > 0 ? MAX(0, _X[i] - lambda_1 * weights[i]) : MIN(0, _X[i] + lambda_1 * weights[i]);
}
}
};
template <typename floating_type>
inline void Vector<floating_type>::project_sft_binary(const Vector<floating_type>& y) {
floating_type mean = this->mean();
Vector<floating_type> ztilde, xtilde;
ztilde.resize(_n);
int count = 0;
if (mean > 0) {
for (int ii = 0; ii < _n; ++ii)
if (y[ii] > 0) {
count++;
ztilde[ii] = _X[ii] + floating_type(1.0);
}
else {
ztilde[ii] = _X[ii];
}
ztilde.l1project(xtilde, floating_type(count));
for (int ii = 0; ii < _n; ++ii)
_X[ii] = y[ii] > 0 ? xtilde[ii] - floating_type(1.0) : xtilde[ii];
}
else {
for (int ii = 0; ii < _n; ++ii)
if (y[ii] > 0) {
ztilde[ii] = -_X[ii];
}
else {
count++;
ztilde[ii] = -_X[ii] + floating_type(1.0);
}
ztilde.l1project(xtilde, floating_type(count));
for (int ii = 0; ii < _n; ++ii)
_X[ii] = y[ii] > 0 ? -xtilde[ii] : -xtilde[ii] + floating_type(1.0);
}
};
template <typename floating_type>
inline void Vector<floating_type>::project_sft(const Vector<int>& labels, const int clas) {
Vector<floating_type> y(_n);
for (int ii = 0; ii < _n; ++ii) y[ii] = labels[ii] == clas ? floating_type(1.0) : -floating_type(1.0);
this->project_sft_binary(y);
/* floating_type mean = this->mean();
floating_type thrs=mean;
while (abs(mean) > EPSILON) {
INTM n_seuils=0;
for (INTM i = 0; i< _n; ++i) {
_X[i] = _X[i]-thrs;
if (labels[i]==clas) {
if (_X[i] < -1.0) {
_X[i]=-1.0;
++n_seuils;
}
} else {
if (_X[i] < 0) {
++n_seuils;
_X[i]=0;
}
}
}
mean = this->mean();
thrs= mean * _n/(_n-n_seuils);*/
//}
};
template <typename floating_type>
inline void Vector<floating_type>::sparseProject(Vector<floating_type>& out, const floating_type thrs, const int mode, const floating_type lambda_1,
const floating_type lambda_2, const floating_type lambda_3, const bool pos) {
if (mode == 1) {
/// min_u ||b-u||_2^2 / ||u||_1 <= thrs
this->l1project(out, thrs, pos);
}
else if (mode == 2) {
/// min_u ||b-u||_2^2 / ||u||_2^2 + lambda_1||u||_1 <= thrs
if (lambda_1 > 1e-10) {
this->scal(lambda_1);
this->l1l2project(out, thrs, 2.0 / (lambda_1 * lambda_1), pos);
this->scal(floating_type(1.0 / lambda_1));
out.scal(floating_type(1.0 / lambda_1));
}
else {
out.copy(*this);
out.normalize2();
out.scal(sqrt(thrs));
}
}
else if (mode == 3) {
/// min_u ||b-u||_2^2 / ||u||_1 + (lambda_1/2) ||u||_2^2 <= thrs
this->l1l2project(out, thrs, lambda_1, pos);
}
else if (mode == 4) {
/// min_u 0.5||b-u||_2^2 + lambda_1||u||_1 / ||u||_2^2 <= thrs
out.copy(*this);
if (pos)
out.thrsPos();
out.softThrshold(lambda_1);
floating_type nrm = out.nrm2sq();
if (nrm > thrs)
out.scal(sqr_alt<floating_type>(thrs / nrm));
}
else if (mode == 5) {
/// min_u 0.5||b-u||_2^2 + lambda_1||u||_1 +lambda_2 Fused(u) / ||u||_2^2 <= thrs
// this->fusedProject(out,lambda_1,lambda_2,100);
// floating_type nrm=out.nrm2sq();
// if (nrm > thrs)
// out.scal(sqr_alt<floating_type>(thrs/nrm));
// } else if (mode == 6) {
/// min_u 0.5||b-u||_2^2 + lambda_1||u||_1 +lambda_2 Fused(u) +0.5lambda_3 ||u||_2^2
this->fusedProjectHomotopy(out, lambda_1, lambda_2, lambda_3, true);
}
else if (mode == 6) {
/// min_u ||b-u||_2^2 / lambda_1||u||_1 +lambda_2 Fused(u) + 0.5lambda3||u||_2^2 <= thrs
this->fusedProjectHomotopy(out, lambda_1 / thrs, lambda_2 / thrs, lambda_3 / thrs, false);
}
else {
/// min_u ||b-u||_2^2 / (1-lambda_1)*||u||_2^2 + lambda_1||u||_1 <= thrs
if (lambda_1 < 1e-10) {
out.copy(*this);
if (pos)
out.thrsPos();
out.normalize2();
out.scal(sqrt(thrs));
}
else if (lambda_1 > 0.999999) {
this->l1project(out, thrs, pos);
}
else {
this->sparseProject(out, thrs / (1.0 - lambda_1), 2, lambda_1 / (1 - lambda_1), 0, 0, pos);
}
}
};
/// returns true if the returned vector is null
template <typename floating_type>
inline void Vector<floating_type>::l1l2projectb(Vector<floating_type>& out, const floating_type thrs, const floating_type gamma, const bool pos,
const int mode) {
if (mode == 1) {
/// min_u ||b-u||_2^2 / ||u||_2^2 + gamma ||u||_1 <= thrs
this->scal(gamma);
this->l1l2project(out, thrs, 2.0 / (gamma * gamma), pos);
this->scal(floating_type(1.0 / gamma));
out.scal(floating_type(1.0 / gamma));
}
else if (mode == 2) {
/// min_u ||b-u||_2^2 / ||u||_1 + (gamma/2) ||u||_2^2 <= thrs
this->l1l2project(out, thrs, gamma, pos);
}
else if (mode == 3) {
/// min_u 0.5||b-u||_2^2 + gamma||u||_1 / ||u||_2^2 <= thrs
out.copy(*this);
if (pos)
out.thrsPos();
out.softThrshold(gamma);
floating_type nrm = out.nrm2();
if (nrm > thrs)
out.scal(thrs / nrm);
}
}
/// returns true if the returned vector is null
/// min_u ||b-u||_2^2 / ||u||_1 + (gamma/2) ||u||_2^2 <= thrs
template <typename floating_type>
inline void Vector<floating_type>::l1l2project(Vector<floating_type>& out, const floating_type thrs, const floating_type gamma, const bool pos) const {
if (gamma == 0)
return this->l1project(out, thrs, pos);
out.copy(*this);
if (pos) {
out.thrsPos();
}
else {
vAbs<floating_type>(_n, out._X, out._X);
}
floating_type norm = out.sum() + gamma * out.nrm2sq();
if (norm <= thrs) {
if (!pos)
out.copy(*this);
return;
}
/// BEGIN
floating_type* prU = out._X;
INTM sizeU = _n;
floating_type sum = 0;
INTM sum_card = 0;
while (sizeU > 0) {
// put the pivot in prU[0]
std::swap(prU[0], prU[sizeU / 2]);
floating_type pivot = prU[0];
INTM sizeG = 1;
floating_type sumG = pivot + 0.5 * gamma * pivot * pivot;
for (INTM i = 1; i < sizeU; ++i) {
if (prU[i] >= pivot) {
sumG += prU[i] + 0.5 * gamma * prU[i] * prU[i];
std::swap(prU[sizeG++], prU[i]);
}
}
if (sum + sumG - pivot * (1 + 0.5 * gamma * pivot) * (sum_card + sizeG) <
thrs * (1 + gamma * pivot) * (1 + gamma * pivot)) {
sum_card += sizeG;
sum += sumG;
prU += sizeG;
sizeU -= sizeG;
}
else {
++prU;
sizeU = sizeG - 1;
}
}
floating_type a = gamma * gamma * thrs + 0.5 * gamma * sum_card;
floating_type b = 2 * gamma * thrs + sum_card;
floating_type c = thrs - sum;
floating_type delta = b * b - 4 * a * c;
floating_type lambda_1 = (-b + sqrt(delta)) / (2 * a);
out.copy(*this);
if (pos) {
out.thrsPos();
}
out.fastSoftThrshold(lambda_1);
out.scal(floating_type(1.0 / (1 + lambda_1 * gamma)));
};
template <typename floating_type>
static inline floating_type fusedHomotopyAux(const bool& sign1,
const bool& sign2,
const bool& sign3,
const floating_type& c1,
const floating_type& c2) {
if (sign1) {
if (sign2) {
return sign3 ? 0 : c2;
}
else {
return sign3 ? -c2 - c1 : -c1;
}
}
else {
if (sign2) {
return sign3 ? c1 : c1 + c2;
}
else {
return sign3 ? -c2 : 0;
}
}
};
template <typename floating_type>
inline void Vector<floating_type>::fusedProjectHomotopy(Vector<floating_type>& alpha,
const floating_type lambda_1, const floating_type lambda_2, const floating_type lambda_3,
const bool penalty) {
floating_type* pr_DtR = _X;
const INTM K = _n;
alpha.setZeros();
Vector<floating_type> u(K); // regularization path for gamma
Vector<floating_type> Du(K); // regularization path for alpha
Vector<floating_type> DDu(K); // regularization path for alpha
Vector<floating_type> gamma(K); // auxiliary variable
Vector<floating_type> c(K); // auxiliary variables
Vector<floating_type> scores(K); // auxiliary variables
gamma.setZeros();
floating_type* pr_gamma = gamma.rawX();
floating_type* pr_u = u.rawX();
floating_type* pr_Du = Du.rawX();
floating_type* pr_DDu = DDu.rawX();
floating_type* pr_c = c.rawX();
floating_type* pr_scores = scores.rawX();
Vector<INTM> ind(K + 1);
Vector<bool> signs(K);
ind.set(K);
INTM* pr_ind = ind.rawX();
bool* pr_signs = signs.rawX();
/// Computation of DtR
floating_type sumBeta = this->sum();
/// first element is selected, gamma and alpha are updated
pr_gamma[0] = sumBeta / K;
/// update alpha
alpha.set(pr_gamma[0]);
/// update DtR
this->sub(alpha);
for (INTM j = K - 2; j >= 0; --j)
pr_DtR[j] += pr_DtR[j + 1];
pr_DtR[0] = 0;
pr_ind[0] = 0;
pr_signs[0] = pr_DtR[0] > 0;
pr_c[0] = floating_type(1.0) / K;
INTM currentInd = this->fmax();
floating_type currentLambda = abs<floating_type>(pr_DtR[currentInd]);
bool newAtom = true;
/// Solve the Lasso using simplified LARS
for (INTM i = 1; i < K; ++i) {
/// exit if constraINTMs are satisfied
/// min_u ||b-u||_2^2 + lambda_1||u||_1 +lambda_2 Fused(u) + 0.5lambda3||u||_2^2
if (penalty && currentLambda <= lambda_2)
break;
if (!penalty) {
/// min_u ||b-u||_2^2 / lambda_1||u||_1 +lambda_2 Fused(u) + 0.5lambda3||u||_2^2 <= 1.0
scores.copy(alpha);
scores.softThrshold(lambda_1 * currentLambda / lambda_2);
scores.scal(floating_type(1.0 / (1.0 + lambda_3 * currentLambda / lambda_2)));
if (lambda_1 * scores.asum() + lambda_2 * scores.afused() + 0.5 *
lambda_3 * scores.nrm2sq() >= floating_type(1.0)) break;
}
/// Update pr_ind and pr_c
if (newAtom) {
INTM j;
for (j = 1; j < i; ++j)
if (pr_ind[j] > currentInd)
break;
for (INTM k = i; k > j; --k) {
pr_ind[k] = pr_ind[k - 1];
pr_c[k] = pr_c[k - 1];
pr_signs[k] = pr_signs[k - 1];
}
pr_ind[j] = currentInd;
pr_signs[j] = pr_DtR[currentInd] > 0;
pr_c[j - 1] = floating_type(1.0) / (pr_ind[j] - pr_ind[j - 1]);
pr_c[j] = floating_type(1.0) / (pr_ind[j + 1] - pr_ind[j]);
}
// Compute u
pr_u[0] = pr_signs[1] ? -pr_c[0] : pr_c[0];
if (i == 1) {
pr_u[1] = pr_signs[1] ? pr_c[0] + pr_c[1] : -pr_c[0] - pr_c[1];
}
else {
pr_u[1] = pr_signs[1] ? pr_c[0] + pr_c[1] : -pr_c[0] - pr_c[1];
pr_u[1] += pr_signs[2] ? -pr_c[1] : pr_c[1];
for (INTM j = 2; j < i; ++j) {
pr_u[j] = 2 * fusedHomotopyAux<floating_type>(pr_signs[j - 1],
pr_signs[j], pr_signs[j + 1], pr_c[j - 1], pr_c[j]);
}
pr_u[i] = pr_signs[i - 1] ? -pr_c[i - 1] : pr_c[i - 1];
pr_u[i] += pr_signs[i] ? pr_c[i - 1] + pr_c[i] : -pr_c[i - 1] - pr_c[i];
}
// Compute Du
pr_Du[0] = pr_u[0];
for (INTM k = 1; k < pr_ind[1]; ++k)
pr_Du[k] = pr_Du[0];
for (INTM j = 1; j <= i; ++j) {
pr_Du[pr_ind[j]] = pr_Du[pr_ind[j] - 1] + pr_u[j];
for (INTM k = pr_ind[j] + 1; k < pr_ind[j + 1]; ++k)
pr_Du[k] = pr_Du[pr_ind[j]];
}
/// Compute DDu
DDu.copy(Du);
for (INTM j = K - 2; j >= 0; --j)
pr_DDu[j] += pr_DDu[j + 1];
/// Check constraINTMs
floating_type max_step1 = INFINITY;
if (penalty) {
max_step1 = currentLambda - lambda_2;
}
/// Check changes of sign
floating_type max_step2 = INFINITY;
INTM step_out = -1;
for (INTM j = 1; j <= i; ++j) {
floating_type ratio = -pr_gamma[pr_ind[j]] / pr_u[j];
if (ratio > 0 && ratio <= max_step2) {
max_step2 = ratio;
step_out = j;
}
}
floating_type max_step3 = INFINITY;
/// Check new variables entering the active set
for (INTM j = 1; j < K; ++j) {
floating_type sc1 = (currentLambda - pr_DtR[j]) / (floating_type(1.0) - pr_DDu[j]);
floating_type sc2 = (currentLambda + pr_DtR[j]) / (floating_type(1.0) + pr_DDu[j]);
if (sc1 <= 1e-10)
sc1 = INFINITY;
if (sc2 <= 1e-10)
sc2 = INFINITY;
pr_scores[j] = MIN(sc1, sc2);
}
for (INTM j = 0; j <= i; ++j) {
pr_scores[pr_ind[j]] = INFINITY;
}
currentInd = scores.fmin();
max_step3 = pr_scores[currentInd];
floating_type step = MIN(max_step1, MIN(max_step3, max_step2));
if (step == 0 || step == INFINITY)
break;
/// Update gamma, alpha, DtR, currentLambda
for (INTM j = 0; j <= i; ++j) {
pr_gamma[pr_ind[j]] += step * pr_u[j];
}
alpha.add(Du, step);
this->add(DDu, -step);
currentLambda -= step;
if (step == max_step2) {
/// Update signs,pr_ind, pr_c
for (INTM k = step_out; k <= i; ++k)
pr_ind[k] = pr_ind[k + 1];
pr_ind[i] = K;
for (INTM k = step_out; k <= i; ++k)
pr_signs[k] = pr_signs[k + 1];
pr_c[step_out - 1] = floating_type(1.0) / (pr_ind[step_out] - pr_ind[step_out - 1]);
pr_c[step_out] = floating_type(1.0) / (pr_ind[step_out + 1] - pr_ind[step_out]);
i -= 2;
newAtom = false;
}
else {
newAtom = true;
}
}
if (penalty) {
alpha.softThrshold(lambda_1);
alpha.scal(floating_type(1.0 / (1.0 + lambda_3)));
}
else {
alpha.softThrshold(lambda_1 * currentLambda / lambda_2);
alpha.scal(floating_type(1.0 / (1.0 + lambda_3 * currentLambda / lambda_2)));
}
};
template <typename floating_type>
inline void Vector<floating_type>::fusedProject(Vector<floating_type>& alpha, const floating_type lambda_1, const floating_type lambda_2,
const int itermax) {
floating_type* pr_alpha = alpha.rawX();
floating_type* pr_beta = _X;
const INTM K = alpha.n();
floating_type total_alpha = alpha.sum();
/// Modification of beta
for (INTM i = K - 2; i >= 0; --i)
pr_beta[i] += pr_beta[i + 1];
for (INTM i = 0; i < itermax; ++i) {
floating_type sum_alpha = 0;
floating_type sum_diff = 0;
/// Update first coordinate
floating_type gamma_old = pr_alpha[0];
pr_alpha[0] = (K * gamma_old + pr_beta[0] -
total_alpha) / K;
floating_type diff = pr_alpha[0] - gamma_old;
sum_diff += diff;
sum_alpha += pr_alpha[0];
total_alpha += K * diff;
/// Update alpha_j
for (INTM j = 1; j < K; ++j) {
pr_alpha[j] += sum_diff;
floating_type gamma_old = pr_alpha[j] - pr_alpha[j - 1];
floating_type gamma_new = softThrs((K - j) * gamma_old + pr_beta[j] -
(total_alpha - sum_alpha), lambda_2) / (K - j);
pr_alpha[j] = pr_alpha[j - 1] + gamma_new;
floating_type diff = gamma_new - gamma_old;
sum_diff += diff;
sum_alpha += pr_alpha[j];
total_alpha += (K - j) * diff;
}
}
alpha.softThrshold(lambda_1);
};
/// sort the vector
template <typename floating_type>
inline void Vector<floating_type>::sort(const bool mode) {
if (mode) {
lasrt<floating_type>(incr, _n, _X);
}
else {
lasrt<floating_type>(decr, _n, _X);
}
};
/// sort the vector
template <typename floating_type>
inline void Vector<floating_type>::sort(Vector<floating_type>& out, const bool mode) const {
out.copy(*this);
out.sort(mode);
};
template <typename floating_type>
inline void Vector<floating_type>::sort2(Vector<INTM>& key, const bool mode) {
quick_sort(key.rawX(), _X, (INTM)0, _n - 1, mode);
};
template <typename floating_type>
inline void Vector<floating_type>::sort2(Vector<floating_type>& out, Vector<INTM>& key, const bool mode) const {
out.copy(*this);
out.sort2(key, mode);
}
/// make a sparse copy
template <typename floating_type> inline void Vector<floating_type>::toSparse(
SpVector<floating_type>& vec) const {
INTM L = 0;
floating_type* v = vec._v;
INTM* r = vec._r;
for (INTM i = 0; i < _n; ++i) {
if (_X[i] != floating_type()) {
v[L] = _X[i];
r[L++] = i;
}
}
vec._L = L;
};
template <typename floating_type>
inline void Vector<floating_type>::copyMask(Vector<floating_type>& out, Vector<bool>& mask) const {
out.resize(_n);
INTM pointer = 0;
for (INTM i = 0; i < _n; ++i) {
if (mask[i])
out[pointer++] = _X[i];
}
out.setn(pointer);
};
/// Class for dense vector
template<typename floating_type, typename I> class LazyVector {
public:
LazyVector(Vector<floating_type>& x, const Vector<floating_type>& z, const int n) : _x(x), _z(z), _n(n + 1), _p(x.n()) {
_current_time = 0;
_dates.resize(_p);
_dates.setZeros();
_stats1.resize(n + 1);
_stats2.resize(n + 1);
_stats1[0] = floating_type(1.0);
_stats2[0] = 0;
};
void inline update() {
for (int ii = 0; ii < _p; ++ii) {
update(ii);
}
_current_time = 0;
_dates.setZeros();
};
void inline update(const I ind) {
const int last_time = _dates[ind];
if (last_time != _current_time) {
_x[ind] = (_stats1[_current_time] / _stats1[last_time]) * _x[ind] + _stats1[_current_time] * (_stats2[_current_time] - _stats2[last_time]) * _z[ind];
_dates[ind] = _current_time;
}
};
void inline update(const Vector<I>& indices) {
const int p = indices.n();
for (int ii = 0; ii < p; ++ii) {
update(indices[ii]);
}
};
void inline add_scal(const floating_type a, const floating_type b) { // performs x <- a(x - b z)
if (_current_time == _n)
update();
_current_time++;
_stats2[_current_time] = _stats2[_current_time - 1] + a / _stats1[_current_time - 1];
_stats1[_current_time] = _stats1[_current_time - 1] * b;
if (_stats1[_current_time] < 1e-7)
update(); // to prevent numerical stability problems
};
private:
Vector<floating_type>& _x;
const Vector<floating_type>& _z;
const int _n;
const int _p;
Vector<floating_type> _stats1, _stats2;
Vector<int> _dates;
int _current_time;
};
/// Class for dense vector
template<typename floating_type, typename I> class DoubleLazyVector {
public:
DoubleLazyVector(Vector<floating_type>& x, const Vector<floating_type>& z1, const Vector<floating_type>& z2, const int n) : _x(x), _z1(z1), _z2(z2), _n(n + 1), _p(x.n()) {
_current_time = 0;
_dates.resize(_p);
_dates.setZeros();
_stats1.resize(n + 1);
_stats2.resize(n + 1);
_stats3.resize(n + 1);
_stats1[0] = floating_type(1.0);
_stats2[0] = 0;
_stats3[0] = 0;
};
void inline update() {
for (int ii = 0; ii < _p; ++ii) {
update(ii);
}
_current_time = 0;
_dates.setZeros();
};
void inline update(const I ind) {
const int last_time = _dates[ind];
if (last_time != _current_time) {
_x[ind] = _stats1[_current_time] * (_x[ind] / _stats1[last_time] + (_stats2[_current_time] - _stats2[last_time]) * _z1[ind] + (_stats3[_current_time] - _stats3[last_time]) * _z2[ind]);
_dates[ind] = _current_time;
}
};
void inline update(const Vector<I>& indices) {
const int p = indices.n();
for (int ii = 0; ii < p; ++ii) {
update(indices[ii]);
}
};
void inline add_scal(const floating_type a, const floating_type b, const floating_type c) {
if (_current_time == _n)
update();
_current_time++;
_stats1[_current_time] = _stats1[_current_time - 1] * c;
_stats2[_current_time] = _stats2[_current_time - 1] + a / _stats1[_current_time];
_stats3[_current_time] = _stats3[_current_time - 1] + b / _stats1[_current_time];
if (_stats1[_current_time] < 1e-6)
update(); // to prevent numerical stability problems
};
private:
Vector<floating_type>& _x;
const Vector<floating_type>& _z1;
const Vector<floating_type>& _z2;
const int _n;
const int _p;
Vector<floating_type> _stats1, _stats2, _stats3;
Vector<int> _dates;
int _current_time;
};
#endif |
mrcore.c | /*
* No mattter where you got this code, be aware that MIRACL is NOT
* free software. For commercial use a license is required.
* See www.shamus.ie
*
* MIRACL Core module - contains initialisation code and general purpose
* utilities
* mrcore.c
*
* Space can be saved by removing unneeded functions (mr_and ?)
*
* Copyright (c) 1988-2007 Shamus Software Ltd.
*/
#include "miracl.h"
#include <stdlib.h>
#include <string.h>
#ifdef MR_FP
#include <math.h>
#endif
/*** Multi-Threaded Support ***/
#ifndef MR_GENERIC_MT
#ifdef MR_OPENMP_MT
#include <omp.h>
#define MR_MIP_EXISTS
miracl *mr_mip;
#pragma omp threadprivate(mr_mip)
miracl *get_mip()
{
return mr_mip;
}
void mr_init_threading()
{
}
void mr_end_threading()
{
}
#endif
#ifdef MR_WINDOWS_MT
#include <windows.h>
DWORD mr_key;
miracl *get_mip()
{
return (miracl *)TlsGetValue(mr_key);
}
void mr_init_threading()
{
mr_key=TlsAlloc();
}
void mr_end_threading()
{
TlsFree(mr_key);
}
#endif
#ifdef MR_UNIX_MT
#include <pthread.h>
pthread_key_t mr_key;
miracl *get_mip()
{
return (miracl *)pthread_getspecific(mr_key);
}
void mr_init_threading()
{
pthread_key_create(&mr_key,(void(*)(void *))NULL);
}
void mr_end_threading()
{
pthread_key_delete(mr_key);
}
#endif
#ifndef MR_WINDOWS_MT
#ifndef MR_UNIX_MT
#ifndef MR_OPENMP_MT
#ifdef MR_STATIC
miracl mip;
miracl *mr_mip=&mip;
#else
miracl *mr_mip=NULL; /* MIRACL's one and only global variable */
#endif
#define MR_MIP_EXISTS
miracl *get_mip()
{
return (miracl *)mr_mip;
}
#endif
#endif
#endif
#ifdef MR_MIP_EXISTS
void set_mip(miracl *mip)
{
mr_mip=mip;
}
#endif
#endif
/* See Advanced Windows by Jeffrey Richter, Chapter 12 for methods for
creating different instances of this global for each executing thread
when using Windows '95/NT
*/
#ifdef MR_STATIC
#if MIRACL==8
static const int mr_small_primes[]=
{2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103,
107,109,113,127,0};
#else
static const int mr_small_primes[]=
{2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103,
107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,199,211,
223,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,313,317,331,
337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,433,439,443,449,
457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,563,569,571,577,587,
593,599,601,607,613,617,619,631,641,643,647,653,659,661,673,677,683,691,701,709,
719,727,733,739,743,751,757,761,769,773,787,797,809,811,821,823,827,829,839,853,
857,859,863,877,881,883,887,907,911,919,929,937,941,947,953,967,971,977,983,991,
997,0};
#endif
#endif
#ifndef MR_STRIPPED_DOWN
#ifndef MR_NO_STANDARD_IO
static char *names[] =
{"your program","innum","otnum","jack","normalise",
"multiply","divide","incr","decr","premult",
"subdiv","fdsize","egcd","cbase",
"cinnum","cotnum","nroot","power",
"powmod","bigdig","bigrand","nxprime","isprime",
"mirvar","mad","multi_inverse","putdig",
"add","subtract","mirsys","xgcd",
"fpack","dconv","mr_shift","mround","fmul",
"fdiv","fadd","fsub","fcomp","fconv",
"frecip","fpmul","fincr","","ftrunc",
"frand","sftbit","build","logb2","expint",
"fpower","froot","fpi","fexp","flog","fpowf",
"ftan","fatan","fsin","fasin","fcos","facos",
"ftanh","fatanh","fsinh","fasinh","fcosh",
"facosh","flop","gprime","powltr","fft_mult",
"crt_init","crt","otstr","instr","cotstr","cinstr","powmod2",
"prepare_monty","nres","redc","nres_modmult","nres_powmod",
"nres_moddiv","nres_powltr","divisible","remain",
"fmodulo","nres_modadd","nres_modsub","nres_negate",
"ecurve_init","ecurve_add","ecurve_mult",
"epoint_init","epoint_set","epoint_get","nres_powmod2",
"nres_sqroot","sqroot","nres_premult","ecurve_mult2",
"ecurve_sub","trial_division","nxsafeprime","nres_lucas","lucas",
"brick_init","pow_brick","set_user_function",
"nres_powmodn","powmodn","ecurve_multn",
"ebrick_init","mul_brick","epoint_norm","nres_multi_inverse","",
"nres_dotprod","epoint_negate","ecurve_multi_add",
"ecurve2_init","","epoint2_set","epoint2_norm","epoint2_get",
"epoint2_comp","ecurve2_add","epoint2_negate","ecurve2_sub",
"ecurve2_multi_add","ecurve2_mult","ecurve2_multn","ecurve2_mult2",
"ebrick2_init","mul2_brick","prepare_basis","strong_bigrand",
"bytes_to_big","big_to_bytes","set_io_buffer_size",
"epoint_getxyz","epoint_double_add","nres_double_inverse",
"double_inverse","epoint_x","hamming","expb2","bigbits",
"nres_lazy","zzn2_imul","nres_double_modadd","nres_double_modsub",
/*155*/"","zzn2_from_int","zzn2_negate","zzn2_conj","zzn2_add",
"zzn2_sub","zzn2_smul","zzn2_mul","zzn2_inv","zzn2_timesi","zzn2_powl",
"zzn2_from_bigs","zzn2_from_big","zzn2_from_ints",
"zzn2_sadd","zzn2_ssub","zzn2_times_irp","zzn2_div2",
"zzn3_from_int","zzn3_from_ints","zzn3_from_bigs",
"zzn3_from_big","zzn3_negate","zzn3_powq","zzn3_init",
"zzn3_add","zzn3_sadd","zzn3_sub","zzn3_ssub","zzn3_smul",
"zzn3_imul","zzn3_mul","zzn3_inv","zzn3_div2","zzn3_timesi",
"epoint_multi_norm","mr_jsf","epoint2_multi_norm",
"ecn2_compare","ecn2_norm","ecn2_set","zzn2_txx",
"zzn2_txd","nres_div2","nres_div3","zzn2_div3",
"ecn2_setx","ecn2_rhs","zzn2_qr","zzn2_sqrt","ecn2_add","ecn2_mul2_jsf","ecn2_mul",
"nres_div5","zzn2_div5","zzn2_sqr","ecn2_add_sub","ecn2_psi","invmodp",
"zzn2_multi_inverse","ecn2_multi_norm","ecn2_precomp","ecn2_mul4_gls_v",
"ecn2_mul2","ecn2_precomp_gls","ecn2_mul2_gls"
"ecn2_brick_init","ecn2_mul_brick_gls"};
/* 0 - 222 (223 in all) */
#endif
#endif
#ifdef MR_NOASM
/* C only versions of muldiv/muldvd/muldvd2/muldvm */
/* Note that mr_large should be twice the size of mr_small */
mr_small muldiv(mr_small a,mr_small b,mr_small c,mr_small m,mr_small *rp)
{
mr_small q;
mr_large ldres,p=(mr_large)a*b+c;
q=(mr_small)(MR_LROUND(p/m));
*rp=(mr_small)(p-(mr_large)q*m);
return q;
}
#ifdef MR_FP_ROUNDING
mr_small imuldiv(mr_small a,mr_small b,mr_small c,mr_small m,mr_large im,mr_small *rp)
{
mr_small q;
mr_large ldres,p=(mr_large)a*b+c;
q=(mr_small)MR_LROUND(p*im);
*rp=(mr_small)(p-(mr_large)q*m);
return q;
}
#endif
#ifndef MR_NOFULLWIDTH
mr_small muldvm(mr_small a,mr_small c,mr_small m,mr_small *rp)
{
mr_small q;
union doubleword dble;
dble.h[MR_BOT]=c;
dble.h[MR_TOP]=a;
q=(mr_small)(dble.d/m);
*rp=(mr_small)(dble.d-(mr_large)q*m);
return q;
}
mr_small muldvd(mr_small a,mr_small b,mr_small c,mr_small *rp)
{
union doubleword dble;
dble.d=(mr_large)a*b+c;
*rp=dble.h[MR_BOT];
return dble.h[MR_TOP];
}
void muldvd2(mr_small a,mr_small b,mr_small *c,mr_small *rp)
{
union doubleword dble;
dble.d=(mr_large)a*b+*c+*rp;
*rp=dble.h[MR_BOT];
*c=dble.h[MR_TOP];
}
#endif
#endif
#ifdef MR_NOFULLWIDTH
/* no FULLWIDTH working, so supply dummies */
/*
mr_small muldvd(mr_small a,mr_small b,mr_small c,mr_small *rp)
{
return (mr_small)0;
}
mr_small muldvm(mr_small a,mr_small c,mr_small m,mr_small *rp)
{
return (mr_small)0;
}
void muldvd2(mr_small a,mr_small b,mr_small *c,mr_small *rp)
{
}
*/
#endif
#ifndef MR_NO_STANDARD_IO
static void mputs(char *s)
{ /* output a string */
int i=0;
while (s[i]!=0) fputc((int)s[i++],stdout);
}
#endif
void mr_berror(_MIPD_ int nerr)
{ /* Big number error routine */
#ifndef MR_STRIPPED_DOWN
int i;
#endif
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERCON)
{
mr_mip->ERNUM=nerr;
return;
}
#ifndef MR_NO_STANDARD_IO
#ifndef MR_STRIPPED_DOWN
mputs("\nMIRACL error from routine ");
if (mr_mip->depth<MR_MAXDEPTH) mputs(names[mr_mip->trace[mr_mip->depth]]);
else mputs("???");
fputc('\n',stdout);
for (i=mr_mip->depth-1;i>=0;i--)
{
mputs(" called from ");
if (i<MR_MAXDEPTH) mputs(names[mr_mip->trace[i]]);
else mputs("???");
fputc('\n',stdout);
}
switch (nerr)
{
case 1 :
mputs("Number base too big for representation\n");
break;
case 2 :
mputs("Division by zero attempted\n");
break;
case 3 :
mputs("Overflow - Number too big\n");
break;
case 4 :
mputs("Internal result is negative\n");
break;
case 5 :
mputs("Input format error\n");
break;
case 6 :
mputs("Illegal number base\n");
break;
case 7 :
mputs("Illegal parameter usage\n");
break;
case 8 :
mputs("Out of space\n");
break;
case 9 :
mputs("Even root of a negative number\n");
break;
case 10:
mputs("Raising integer to negative power\n");
break;
case 11:
mputs("Attempt to take illegal root\n");
break;
case 12:
mputs("Integer operation attempted on Flash number\n");
break;
case 13:
mputs("Flash overflow\n");
break;
case 14:
mputs("Numbers too big\n");
break;
case 15:
mputs("Log of a non-positive number\n");
break;
case 16:
mputs("Flash to double conversion failure\n");
break;
case 17:
mputs("I/O buffer overflow\n");
break;
case 18:
mputs("MIRACL not initialised - no call to mirsys()\n");
break;
case 19:
mputs("Illegal modulus \n");
break;
case 20:
mputs("No modulus defined\n");
break;
case 21:
mputs("Exponent too big\n");
break;
case 22:
mputs("Unsupported Feature - check mirdef.h\n");
break;
case 23:
mputs("Specified double length type isn't double length\n");
break;
case 24:
mputs("Specified basis is NOT irreducible\n");
break;
case 25:
mputs("Unable to control Floating-point rounding\n");
break;
case 26:
mputs("Base must be binary (MR_ALWAYS_BINARY defined in mirdef.h ?)\n");
break;
case 27:
mputs("No irreducible basis defined\n");
break;
case 28:
mputs("Composite modulus\n");
break;
default:
mputs("Undefined error\n");
break;
}
exit(0);
#else
mputs("MIRACL error\n");
exit(0);
#endif
#endif
}
#ifndef MR_STRIPPED_DOWN
void mr_track(_MIPDO_ )
{ /* track course of program execution *
* through the MIRACL routines */
#ifndef MR_NO_STANDARD_IO
int i;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
for (i=0;i<mr_mip->depth;i++) fputc('-',stdout);
fputc('>',stdout);
mputs(names[mr_mip->trace[mr_mip->depth]]);
fputc('\n',stdout);
#endif
}
#endif
#ifndef MR_NO_RAND
mr_small brand(_MIPDO_ )
{ /* Marsaglia & Zaman random number generator */
int i,k;
mr_unsign32 pdiff,t;
mr_small r;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->lg2b>32)
{ /* underlying type is > 32 bits. Assume <= 64 bits */
mr_mip->rndptr+=2;
if (mr_mip->rndptr<NK-1)
{
r=(mr_small)mr_mip->ira[mr_mip->rndptr];
r=mr_shiftbits(r,mr_mip->lg2b-32);
r+=(mr_small)mr_mip->ira[mr_mip->rndptr+1];
return r;
}
}
else
{
mr_mip->rndptr++;
if (mr_mip->rndptr<NK) return (mr_small)mr_mip->ira[mr_mip->rndptr];
}
mr_mip->rndptr=0;
for (i=0,k=NK-NJ;i<NK;i++,k++)
{ /* calculate next NK values */
if (k==NK) k=0;
t=mr_mip->ira[k];
pdiff=t - mr_mip->ira[i] - mr_mip->borrow;
if (pdiff<t) mr_mip->borrow=0;
if (pdiff>t) mr_mip->borrow=1;
mr_mip->ira[i]=pdiff;
}
if (mr_mip->lg2b>32)
{ /* double up */
r=(mr_small)mr_mip->ira[0];
r=mr_shiftbits(r,mr_mip->lg2b-32);
r+=(mr_small)mr_mip->ira[1];
return r;
}
else return (mr_small)(mr_mip->ira[0]);
}
void irand(_MIPD_ mr_unsign32 seed)
{ /* initialise random number system */
int i,in;
mr_unsign32 t,m=1L;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
mr_mip->borrow=0L;
mr_mip->rndptr=0;
mr_mip->ira[0]=seed;
for (i=1;i<NK;i++)
{ /* fill initialisation vector */
in=(NV*i)%NK;
mr_mip->ira[in]=m;
t=m;
m=seed-m;
seed=t;
}
for (i=0;i<1000;i++) brand(_MIPPO_ ); /* "warm-up" & stir the generator */
}
#endif
mr_small mr_shiftbits(mr_small x,int n)
{
#ifdef MR_FP
int i;
mr_small dres;
if (n==0) return x;
if (n>0)
{
for (i=0;i<n;i++) x=x+x;
return x;
}
n=-n;
for (i=0;i<n;i++) x=MR_DIV(x,2.0);
return x;
#else
if (n==0) return x;
if (n>0) x<<=n;
else x>>=n;
return x;
#endif
}
mr_small mr_setbase(_MIPD_ mr_small nb)
{ /* set base. Pack as many digits as *
* possible into each computer word */
mr_small temp;
#ifdef MR_FP
mr_small dres;
#endif
#ifndef MR_NOFULLWIDTH
BOOL fits;
int bits;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
fits=FALSE;
bits=MIRACL;
while (bits>1)
{
bits/=2;
temp=((mr_small)1<<bits);
if (temp==nb)
{
fits=TRUE;
break;
}
if (temp<nb || (bits%2)!=0) break;
}
if (fits)
{
mr_mip->apbase=nb;
mr_mip->pack=MIRACL/bits;
mr_mip->base=0;
return 0;
}
#endif
mr_mip->apbase=nb;
mr_mip->pack=1;
mr_mip->base=nb;
#ifdef MR_SIMPLE_BASE
return 0;
#else
if (mr_mip->base==0) return 0;
temp=MR_DIV(MAXBASE,nb);
while (temp>=nb)
{
temp=MR_DIV(temp,nb);
mr_mip->base*=nb;
mr_mip->pack++;
}
#ifdef MR_FP_ROUNDING
mr_mip->inverse_base=mr_invert(mr_mip->base);
return mr_mip->inverse_base;
#else
return 0;
#endif
#endif
}
#ifdef MR_FLASH
BOOL fit(big x,big y,int f)
{ /* returns TRUE if x/y would fit flash format of length f */
int n,d;
n=(int)(x->len&(MR_OBITS));
d=(int)(y->len&(MR_OBITS));
if (n==1 && x->w[0]==1) n=0;
if (d==1 && y->w[0]==1) d=0;
if (n+d<=f) return TRUE;
return FALSE;
}
#endif
int mr_lent(flash x)
{ /* return length of big or flash in words */
mr_lentype lx;
lx=(x->len&(MR_OBITS));
#ifdef MR_FLASH
return (int)((lx&(MR_MSK))+((lx>>(MR_BTS))&(MR_MSK)));
#else
return (int)lx;
#endif
}
void zero(flash x)
{ /* set big/flash number to zero */
int i,n;
mr_small *g;
if (x==NULL) return;
#ifdef MR_FLASH
n=mr_lent(x);
#else
n=(x->len&MR_OBITS);
#endif
g=x->w;
for (i=0;i<n;i++)
g[i]=0;
x->len=0;
}
void uconvert(_MIPD_ unsigned int n ,big x)
{ /* convert unsigned integer n to big number format */
int m;
#ifdef MR_FP
mr_small dres;
#endif
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
zero(x);
if (n==0) return;
m=0;
#ifndef MR_SIMPLE_BASE
if (mr_mip->base==0)
{
#endif
#ifndef MR_NOFULLWIDTH
#if MR_IBITS > MIRACL
while (n>0)
{
x->w[m++]=(mr_small)(n%((mr_small)1<<(MIRACL)));
n/=((mr_small)1<<(MIRACL));
}
#else
x->w[m++]=(mr_small)n;
#endif
#endif
#ifndef MR_SIMPLE_BASE
}
else while (n>0)
{
x->w[m++]=MR_REMAIN((mr_small)n,mr_mip->base);
n/=mr_mip->base;
}
#endif
x->len=m;
}
void convert(_MIPD_ int n ,big x)
{ /* convert signed integer n to big number format */
mr_lentype s;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (n==0) {zero(x); return;}
s=0;
if (n<0)
{
s=MR_MSBIT;
n=(-n);
}
uconvert(_MIPP_ (unsigned int)n,x);
x->len|=s;
}
#ifndef MR_STATIC
#ifdef mr_dltype
void dlconv(_MIPD_ mr_dltype n,big x)
{ /* convert double length integer to big number format - rarely needed */
int m;
mr_lentype s;
#ifdef MR_FP
mr_small dres;
#endif
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
zero(x);
if (n==0) return;
s=0;
if (n<0)
{
s=MR_MSBIT;
n=(-n);
}
m=0;
#ifndef MR_SIMPLE_BASE
if (mr_mip->base==0)
{
#endif
#ifndef MR_NOFULLWIDTH
while (n>0)
{
x->w[m++]=(mr_small)(n%((mr_dltype)1<<(MIRACL)));
n/=((mr_dltype)1<<(MIRACL));
}
#endif
#ifndef MR_SIMPLE_BASE
}
else while (n>0)
{
x->w[m++]=(mr_small)MR_REMAIN(n,mr_mip->base);
n/=mr_mip->base;
}
#endif
x->len=(m|s);
}
#endif
void ulgconv(_MIPD_ unsigned long n,big x)
{ /* convert unsigned long integer to big number format - rarely needed */
int m;
#ifdef MR_FP
mr_small dres;
#endif
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
zero(x);
if (n==0) return;
m=0;
#ifndef MR_SIMPLE_BASE
if (mr_mip->base==0)
{
#endif
#ifndef MR_NOFULLWIDTH
#if MR_LBITS > MIRACL
while (n>0)
{
x->w[m++]=(mr_small)(n%(1L<<(MIRACL)));
n/=(1L<<(MIRACL));
}
#else
x->w[m++]=(mr_small)n;
#endif
#endif
#ifndef MR_SIMPLE_BASE
}
else while (n>0)
{
x->w[m++]=MR_REMAIN(n,mr_mip->base);
n/=mr_mip->base;
}
#endif
x->len=m;
}
void lgconv(_MIPD_ long n,big x)
{ /* convert signed long integer to big number format - rarely needed */
mr_lentype s;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (n==0) {zero(x); return;}
s=0;
if (n<0)
{
s=MR_MSBIT;
n=(-n);
}
ulgconv(_MIPP_ (unsigned long)n,x);
x->len|=s;
}
flash mirvar(_MIPD_ int iv)
{ /* initialize big/flash number */
flash x;
int align;
char *ptr;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return NULL;
MR_IN(23);
if (!(mr_mip->active))
{
mr_berror(_MIPP_ MR_ERR_NO_MIRSYS);
MR_OUT
return NULL;
}
/* OK, now I control alignment.... */
/* Allocate space for big, the length, the pointer, and the array */
/* Do it all in one memory allocation - this is quicker */
/* Ensure that the array has correct alignment */
x=(big)mr_alloc(_MIPP_ mr_size(mr_mip->nib-1),1);
if (x==NULL)
{
MR_OUT
return x;
}
ptr=(char *)&x->w;
align=(unsigned long)(ptr+sizeof(mr_small *))%sizeof(mr_small);
x->w=(mr_small *)(ptr+sizeof(mr_small *)+sizeof(mr_small)-align);
if (iv!=0) convert(_MIPP_ iv,x);
MR_OUT
return x;
}
#endif
flash mirvar_mem_variable(char *mem,int index,int sz)
{
flash x;
int align;
char *ptr;
int offset,r;
/* alignment */
offset=0;
r=(unsigned long)mem%MR_SL;
if (r>0) offset=MR_SL-r;
x=(big)&mem[offset+mr_size(sz)*index];
ptr=(char *)&x->w;
align=(unsigned long)(ptr+sizeof(mr_small *))%sizeof(mr_small);
x->w=(mr_small *)(ptr+sizeof(mr_small *)+sizeof(mr_small)-align);
return x;
}
flash mirvar_mem(_MIPD_ char *mem,int index)
{ /* initialize big/flash number from pre-allocated memory */
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return NULL;
return mirvar_mem_variable(mem,index,mr_mip->nib-1);
}
void set_user_function(_MIPD_ BOOL (*user)(void))
{
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return;
MR_IN(111)
if (!(mr_mip->active))
{
mr_berror(_MIPP_ MR_ERR_NO_MIRSYS);
MR_OUT
return;
}
mr_mip->user=user;
MR_OUT
}
#ifndef MR_STATIC
#ifndef MR_SIMPLE_IO
void set_io_buffer_size(_MIPD_ int len)
{
int i;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (len<0) return;
MR_IN(142)
for (i=0;i<mr_mip->IOBSIZ;i++) mr_mip->IOBUFF[i]=0;
mr_free(mr_mip->IOBUFF);
if (len==0)
{
MR_OUT
return;
}
mr_mip->IOBSIZ=len;
mr_mip->IOBUFF=(char *)mr_alloc(_MIPP_ len+1,1);
mr_mip->IOBUFF[0]='\0';
MR_OUT
}
#endif
#endif
/* Initialise a big from ROM given its fixed length */
BOOL init_big_from_rom(big x,int len,const mr_small *rom,int romsize,int *romptr)
{
int i;
zero(x);
x->len=len;
for (i=0;i<len;i++)
{
if (*romptr>=romsize) return FALSE;
#ifdef MR_AVR
x->w[i]=pgm_read_byte_near(&rom[*romptr]);
#else
x->w[i]=rom[*romptr];
#endif
(*romptr)++;
}
mr_lzero(x);
return TRUE;
}
/* Initialise an elliptic curve point from ROM */
BOOL init_point_from_rom(epoint *P,int len,const mr_small *rom,int romsize,int *romptr)
{
if (!init_big_from_rom(P->X,len,rom,romsize,romptr)) return FALSE;
if (!init_big_from_rom(P->Y,len,rom,romsize,romptr)) return FALSE;
P->marker=MR_EPOINT_NORMALIZED;
return TRUE;
}
miracl *mirsys(_MIPD_ int nd,mr_small nb)
{ /* Initialize MIRACL system to *
* use numbers to base nb, and *
* nd digits or (-nd) bytes long */
/* In these cases mr_mip is passed as the first parameter */
#ifdef MR_GENERIC_MT
return mirsys_basic(mr_mip,nd,nb);
#else
#ifdef MR_STATIC
return mirsys_basic(mr_mip,nd,nb);
#endif
#endif
/* In these cases mr_mip is a "global" pointer and the mip itself is allocated from the heap.
In fact mr_mip (and mip) may be thread specific if some multi-threading scheme is implemented */
#ifndef MR_GENERIC_MT
#ifndef MR_STATIC
#ifdef MR_WINDOWS_MT
miracl *mr_mip=mr_first_alloc();
TlsSetValue(mr_key,mr_mip);
#endif
#ifdef MR_UNIX_MT
miracl *mr_mip=mr_first_alloc();
pthread_setspecific(mr_key,mr_mip);
#endif
#ifdef MR_OPENMP_MT
mr_mip=mr_first_alloc();
#endif
#ifndef MR_WINDOWS_MT
#ifndef MR_UNIX_MT
#ifndef MR_OPENMP_MT
mr_mip=mr_first_alloc();
#endif
#endif
#endif
mr_mip=get_mip();
return mirsys_basic(mr_mip,nd,nb);
#endif
#endif
}
miracl *mirsys_basic(miracl *mr_mip,int nd,mr_small nb)
{
#ifndef MR_NO_RAND
int i;
#endif
mr_small b,nw;
#ifdef MR_FP
mr_small dres;
#endif
if (mr_mip==NULL) return NULL;
#ifndef MR_STRIPPED_DOWN
mr_mip->depth=0;
mr_mip->trace[0]=0;
mr_mip->depth++;
mr_mip->trace[mr_mip->depth]=29;
#endif
/* digest hardware configuration */
#ifdef MR_NO_STANDARD_IO
mr_mip->ERCON=TRUE;
#else
mr_mip->ERCON=FALSE;
#endif
#ifndef MR_STATIC
mr_mip->logN=0;
mr_mip->degree=0;
mr_mip->chin.NP=0;
#endif
mr_mip->user=NULL;
mr_mip->same=FALSE;
mr_mip->first_one=FALSE;
mr_mip->debug=FALSE;
mr_mip->AA=0;
#ifndef MR_AFFINE_ONLY
mr_mip->coord=MR_NOTSET;
#endif
#ifdef MR_NOFULLWIDTH
if (nb==0)
{
mr_berror(_MIPP_ MR_ERR_BAD_BASE);
MR_OUT
return mr_mip;
}
#endif
#ifndef MR_FP
#ifdef mr_dltype
#ifndef MR_NOFULLWIDTH
if (sizeof(mr_dltype)<2*sizeof(mr_utype))
{ /* double length type, isn't */
mr_berror(_MIPP_ MR_ERR_NOT_DOUBLE_LEN);
MR_OUT
return mr_mip;
}
#endif
#endif
#endif
if (nb==1 || nb>MAXBASE)
{
mr_berror(_MIPP_ MR_ERR_BAD_BASE);
MR_OUT
return mr_mip;
}
#ifdef MR_FP_ROUNDING
if (mr_setbase(_MIPP_ nb)==0)
{ /* unable in fact to control FP rounding */
mr_berror(_MIPP_ MR_ERR_NO_ROUNDING);
MR_OUT
return mr_mip;
}
#else
mr_setbase(_MIPP_ nb);
#endif
b=mr_mip->base;
#ifdef MR_SIMPLE_BASE
if (b!=0)
{
mr_berror(_MIPP_ MR_ERR_BAD_BASE);
MR_OUT
return mr_mip;
}
#endif
mr_mip->lg2b=0;
mr_mip->base2=1;
#ifndef MR_SIMPLE_BASE
if (b==0)
{
#endif
mr_mip->lg2b=MIRACL;
mr_mip->base2=0;
#ifndef MR_SIMPLE_BASE
}
else while (b>1)
{
b=MR_DIV(b,2);
mr_mip->lg2b++;
mr_mip->base2*=2;
}
#endif
mr_mip->base_mask=mr_mip->base-1;
#ifdef MR_ALWAYS_BINARY
if (mr_mip->base!=mr_mip->base2)
{
mr_berror(_MIPP_ MR_ERR_NOT_BINARY);
MR_OUT
return mr_mip;
}
#endif
/* calculate total space for bigs */
/*
big -> |int len|small *ptr| alignment space | size in words +1| alignment up to multiple of 4 |
*/
if (nd>0) nw=MR_ROUNDUP(nd,mr_mip->pack);
else nw=MR_ROUNDUP(8*(-nd),mr_mip->lg2b);
if (nw<1) nw=1;
mr_mip->nib=(int)(nw+1); /* add one extra word for small overflows */
#ifdef MR_STATIC
if (nw>MR_STATIC)
{
mr_berror(_MIPP_ MR_ERR_TOO_BIG);
MR_OUT
return mr_mip;
}
#endif
mr_mip->nib=(int)(nw+1); /* add one extra word for small overflows */
#ifdef MR_FLASH
mr_mip->workprec=mr_mip->nib;
mr_mip->stprec=mr_mip->nib;
while (mr_mip->stprec>2 && mr_mip->stprec>MR_FLASH/mr_mip->lg2b)
mr_mip->stprec=(mr_mip->stprec+1)/2;
if (mr_mip->stprec<2) mr_mip->stprec=2;
#endif
#ifndef MR_DOUBLE_BIG
mr_mip->check=ON;
#else
mr_mip->check=OFF;
#endif
#ifndef MR_SIMPLE_BASE
#ifndef MR_SIMPLE_IO
mr_mip->IOBASE=10; /* defaults */
#endif
#endif
mr_mip->ERNUM=0;
mr_mip->NTRY=6;
mr_mip->MONTY=ON;
#ifdef MR_FLASH
mr_mip->EXACT=TRUE;
mr_mip->RPOINT=OFF;
#endif
#ifndef MR_STRIPPED_DOWN
mr_mip->TRACER=OFF;
#endif
#ifndef MR_SIMPLE_IO
mr_mip->INPLEN=0;
mr_mip->IOBSIZ=MR_DEFAULT_BUFFER_SIZE;
#endif
#ifdef MR_STATIC
mr_mip->PRIMES=mr_small_primes;
#else
mr_mip->PRIMES=NULL;
#ifndef MR_SIMPLE_IO
mr_mip->IOBUFF=(char *)mr_alloc(_MIPP_ MR_DEFAULT_BUFFER_SIZE+1,1);
#endif
#endif
#ifndef MR_SIMPLE_IO
mr_mip->IOBUFF[0]='\0';
#endif
mr_mip->qnr=0;
mr_mip->cnr=0;
mr_mip->TWIST=FALSE;
mr_mip->pmod8=0;
/* quick start for rng. irand(.) should be called first before serious use.. */
#ifndef MR_NO_RAND
mr_mip->ira[0]=0x55555555;
mr_mip->ira[1]=0x12345678;
for (i=2;i<NK;i++)
mr_mip->ira[i]=mr_mip->ira[i-1]+mr_mip->ira[i-2]+0x1379BDF1;
mr_mip->rndptr=NK;
mr_mip->borrow=0;
#endif
mr_mip->nib=2*mr_mip->nib+1;
#ifdef MR_FLASH
if (mr_mip->nib!=(mr_mip->nib&(MR_MSK)))
#else
if (mr_mip->nib!=(int)(mr_mip->nib&(MR_OBITS)))
#endif
{
mr_berror(_MIPP_ MR_ERR_TOO_BIG);
mr_mip->nib=(mr_mip->nib-1)/2;
MR_OUT
return mr_mip;
}
#ifndef MR_STATIC
mr_mip->workspace=(char *)memalloc(_MIPP_ MR_SPACES); /* grab workspace */
#else
memset(mr_mip->workspace,0,MR_BIG_RESERVE(MR_SPACES));
#endif
mr_mip->M=0;
mr_mip->fin=FALSE;
mr_mip->fout=FALSE;
mr_mip->active=ON;
mr_mip->nib=(mr_mip->nib-1)/2;
/* allocate memory for workspace variables */
#ifndef MR_DOUBLE_BIG
mr_mip->w0=mirvar_mem(_MIPP_ mr_mip->workspace,0); /* double length */
mr_mip->w1=mirvar_mem(_MIPP_ mr_mip->workspace,2);
mr_mip->w2=mirvar_mem(_MIPP_ mr_mip->workspace,3);
mr_mip->w3=mirvar_mem(_MIPP_ mr_mip->workspace,4);
mr_mip->w4=mirvar_mem(_MIPP_ mr_mip->workspace,5);
mr_mip->w5=mirvar_mem(_MIPP_ mr_mip->workspace,6); /* double length */
mr_mip->w6=mirvar_mem(_MIPP_ mr_mip->workspace,8); /* double length */
mr_mip->w7=mirvar_mem(_MIPP_ mr_mip->workspace,10); /* double length */
mr_mip->w8=mirvar_mem(_MIPP_ mr_mip->workspace,12);
mr_mip->w9=mirvar_mem(_MIPP_ mr_mip->workspace,13);
mr_mip->w10=mirvar_mem(_MIPP_ mr_mip->workspace,14);
mr_mip->w11=mirvar_mem(_MIPP_ mr_mip->workspace,15);
mr_mip->w12=mirvar_mem(_MIPP_ mr_mip->workspace,16);
mr_mip->w13=mirvar_mem(_MIPP_ mr_mip->workspace,17);
mr_mip->w14=mirvar_mem(_MIPP_ mr_mip->workspace,18);
mr_mip->w15=mirvar_mem(_MIPP_ mr_mip->workspace,19);
mr_mip->sru=mirvar_mem(_MIPP_ mr_mip->workspace,20);
mr_mip->modulus=mirvar_mem(_MIPP_ mr_mip->workspace,21);
mr_mip->pR=mirvar_mem(_MIPP_ mr_mip->workspace,22); /* double length */
mr_mip->A=mirvar_mem(_MIPP_ mr_mip->workspace,24);
mr_mip->B=mirvar_mem(_MIPP_ mr_mip->workspace,25);
mr_mip->one=mirvar_mem(_MIPP_ mr_mip->workspace,26);
#ifdef MR_KCM
mr_mip->big_ndash=mirvar_mem(_MIPP_ mr_mip->workspace,27);
mr_mip->ws=mirvar_mem(_MIPP_ mr_mip->workspace,28);
mr_mip->wt=mirvar_mem(_MIPP_ mr_mip->workspace,29); /* double length */
#endif
#ifdef MR_FLASH
#ifdef MR_KCM
mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,31);
#else
mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,27);
#endif
#endif
#else
/* w0-w7 are double normal length */
mr_mip->w0=mirvar_mem(_MIPP_ mr_mip->workspace,0); /* quad length */
mr_mip->w1=mirvar_mem(_MIPP_ mr_mip->workspace,4); /* double length */
mr_mip->w2=mirvar_mem(_MIPP_ mr_mip->workspace,6);
mr_mip->w3=mirvar_mem(_MIPP_ mr_mip->workspace,8);
mr_mip->w4=mirvar_mem(_MIPP_ mr_mip->workspace,10);
mr_mip->w5=mirvar_mem(_MIPP_ mr_mip->workspace,12); /* quad length */
mr_mip->w6=mirvar_mem(_MIPP_ mr_mip->workspace,16); /* quad length */
mr_mip->w7=mirvar_mem(_MIPP_ mr_mip->workspace,20); /* quad length */
mr_mip->w8=mirvar_mem(_MIPP_ mr_mip->workspace,24);
mr_mip->w9=mirvar_mem(_MIPP_ mr_mip->workspace,25);
mr_mip->w10=mirvar_mem(_MIPP_ mr_mip->workspace,26);
mr_mip->w11=mirvar_mem(_MIPP_ mr_mip->workspace,27);
mr_mip->w12=mirvar_mem(_MIPP_ mr_mip->workspace,28);
mr_mip->w13=mirvar_mem(_MIPP_ mr_mip->workspace,29);
mr_mip->w14=mirvar_mem(_MIPP_ mr_mip->workspace,30);
mr_mip->w15=mirvar_mem(_MIPP_ mr_mip->workspace,31);
mr_mip->sru=mirvar_mem(_MIPP_ mr_mip->workspace,32);
mr_mip->modulus=mirvar_mem(_MIPP_ mr_mip->workspace,33);
mr_mip->pR=mirvar_mem(_MIPP_ mr_mip->workspace,34); /* double length */
mr_mip->A=mirvar_mem(_MIPP_ mr_mip->workspace,36);
mr_mip->B=mirvar_mem(_MIPP_ mr_mip->workspace,37);
mr_mip->one=mirvar_mem(_MIPP_ mr_mip->workspace,38);
#ifdef MR_KCM
mr_mip->big_ndash=mirvar_mem(_MIPP_ mr_mip->workspace,39);
mr_mip->ws=mirvar_mem(_MIPP_ mr_mip->workspace,40);
mr_mip->wt=mirvar_mem(_MIPP_ mr_mip->workspace,41); /* double length */
#endif
#ifdef MR_FLASH
#ifdef MR_KCM
mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,43);
#else
mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,39);
#endif
#endif
#endif
MR_OUT
return mr_mip;
}
#ifndef MR_STATIC
/* allocate space for a number of bigs from the heap */
void *memalloc(_MIPD_ int num)
{
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
return mr_alloc(_MIPP_ mr_big_reserve(num,mr_mip->nib-1),1);
}
#endif
void memkill(_MIPD_ char *mem,int len)
{
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mem==NULL) return;
memset(mem,0,mr_big_reserve(len,mr_mip->nib-1));
#ifndef MR_STATIC
mr_free(mem);
#endif
}
#ifndef MR_STATIC
void mirkill(big x)
{ /* kill a big/flash variable, that is set it to zero
and free its memory */
if (x==NULL) return;
zero(x);
mr_free(x);
}
#endif
void mirexit(_MIPDO_ )
{ /* clean up after miracl */
int i;
#ifdef MR_WINDOWS_MT
miracl *mr_mip=get_mip();
#endif
#ifdef MR_UNIX_MT
miracl *mr_mip=get_mip();
#endif
#ifdef MR_OPENMP_MT
miracl *mr_mip=get_mip();
#endif
mr_mip->ERCON=FALSE;
mr_mip->active=OFF;
memkill(_MIPP_ mr_mip->workspace,MR_SPACES);
#ifndef MR_NO_RAND
for (i=0;i<NK;i++) mr_mip->ira[i]=0L;
#endif
#ifndef MR_STATIC
#ifndef MR_SIMPLE_IO
set_io_buffer_size(_MIPP_ 0);
#endif
if (mr_mip->PRIMES!=NULL) mr_free(mr_mip->PRIMES);
#else
#ifndef MR_SIMPLE_IO
for (i=0;i<=MR_DEFAULT_BUFFER_SIZE;i++)
mr_mip->IOBUFF[i]=0;
#endif
#endif
#ifndef MR_STATIC
mr_free(mr_mip);
#ifdef MR_WINDOWS_MT
TlsSetValue(mr_key, NULL); //Thank you Thales
#endif
#endif
#ifndef MR_GENERIC_MT
#ifndef MR_WINDOWS_MT
#ifndef MR_UNIX_MT
#ifndef MR_STATIC
mr_mip=NULL;
#endif
#endif
#endif
#endif
#ifdef MR_OPENMP_MT
mr_mip=NULL;
#endif
}
int exsign(flash x)
{ /* extract sign of big/flash number */
if ((x->len&(MR_MSBIT))==0) return PLUS;
else return MINUS;
}
void insign(int s,flash x)
{ /* assert sign of big/flash number */
if (x->len==0) return;
if (s<0) x->len|=MR_MSBIT;
else x->len&=MR_OBITS;
}
void mr_lzero(big x)
{ /* strip leading zeros from big number */
mr_lentype s;
int m;
s=(x->len&(MR_MSBIT));
m=(int)(x->len&(MR_OBITS));
while (m>0 && x->w[m-1]==0)
m--;
x->len=m;
if (m>0) x->len|=s;
}
#ifndef MR_SIMPLE_IO
int getdig(_MIPD_ big x,int i)
{ /* extract a packed digit */
int k;
mr_small n;
#ifdef MR_FP
mr_small dres;
#endif
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
i--;
n=x->w[i/mr_mip->pack];
if (mr_mip->pack==1) return (int)n;
k=i%mr_mip->pack;
for (i=1;i<=k;i++)
n=MR_DIV(n,mr_mip->apbase);
return (int)MR_REMAIN(n,mr_mip->apbase);
}
int numdig(_MIPD_ big x)
{ /* returns number of digits in x */
int nd;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (x->len==0) return 0;
nd=(int)(x->len&(MR_OBITS))*mr_mip->pack;
while (getdig(_MIPP_ x,nd)==0)
nd--;
return nd;
}
void putdig(_MIPD_ int n,big x,int i)
{ /* insert a digit into a packed word */
int j,k,lx;
mr_small m,p;
mr_lentype s;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return;
MR_IN(26)
s=(x->len&(MR_MSBIT));
lx=(int)(x->len&(MR_OBITS));
m=getdig(_MIPP_ x,i);
p=n;
i--;
j=i/mr_mip->pack;
k=i%mr_mip->pack;
for (i=1;i<=k;i++)
{
m*=mr_mip->apbase;
p*=mr_mip->apbase;
}
if (j>=mr_mip->nib && (mr_mip->check || j>=2*mr_mip->nib))
{
mr_berror(_MIPP_ MR_ERR_OVERFLOW);
MR_OUT
return;
}
x->w[j]=(x->w[j]-m)+p;
if (j>=lx) x->len=((j+1)|s);
mr_lzero(x);
MR_OUT
}
#endif
#ifndef MR_FP
void mr_and(big x,big y,big z)
{ /* z= bitwise logical AND of x and y */
int i,nx,ny,nz,nr;
if (x==y)
{
copy(x,z);
return;
}
#ifdef MR_FLASH
nx=mr_lent(x);
ny=mr_lent(y);
nz=mr_lent(z);
#else
ny=(y->len&(MR_OBITS));
nx=(x->len&(MR_OBITS));
nz=(z->len&(MR_OBITS));
#endif
if (ny<nx) nr=ny;
else nr=nx;
for (i=0;i<nr;i++)
z->w[i]=x->w[i]&y->w[i];
for (i=nr;i<nz;i++)
z->w[i]=0;
z->len=nr;
}
/*
void mr_or(big x,big y,big z)
{
int i,nx,ny,nz,nr;
if (x==y)
{
copy(x,z);
return;
}
#ifdef MR_FLASH
nx=mr_lent(x);
ny=mr_lent(y);
nz=mr_lent(z);
#else
ny=(y->len&(MR_OBITS));
nx=(x->len&(MR_OBITS));
nz=(z->len&(MR_OBITS));
#endif
if (ny<nx) nr=ny;
else nr=nx;
for (i=0;i<nr;i++)
z->w[i]=x->w[i]|y->w[i];
for (i=nr;i<nz;i++)
z->w[i]=0;
z->len=nr;
}
*/
#endif
void copy(flash x,flash y)
{ /* copy x to y: y=x */
int i,nx,ny;
mr_small *gx,*gy;
if (x==y || y==NULL) return;
if (x==NULL)
{
zero(y);
return;
}
#ifdef MR_FLASH
ny=mr_lent(y);
nx=mr_lent(x);
#else
ny=(y->len&(MR_OBITS));
nx=(x->len&(MR_OBITS));
#endif
gx=x->w;
gy=y->w;
for (i=nx;i<ny;i++)
gy[i]=0;
for (i=0;i<nx;i++)
gy[i]=gx[i];
y->len=x->len;
}
void negify(flash x,flash y)
{ /* negate a big/flash variable: y=-x */
copy(x,y);
if (y->len!=0) y->len^=MR_MSBIT;
}
void absol(flash x,flash y)
{ /* y=abs(x) */
copy(x,y);
y->len&=MR_OBITS;
}
BOOL mr_notint(flash x)
{ /* returns TRUE if x is Flash */
#ifdef MR_FLASH
if ((((x->len&(MR_OBITS))>>(MR_BTS))&(MR_MSK))!=0) return TRUE;
#endif
return FALSE;
}
void mr_shift(_MIPD_ big x,int n,big w)
{ /* set w=x.(mr_base^n) by shifting */
mr_lentype s;
int i,bl;
mr_small *gw=w->w;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return;
copy(x,w);
if (w->len==0 || n==0) return;
MR_IN(33)
if (mr_notint(w)) mr_berror(_MIPP_ MR_ERR_INT_OP);
s=(w->len&(MR_MSBIT));
bl=(int)(w->len&(MR_OBITS))+n;
if (bl<=0)
{
zero(w);
MR_OUT
return;
}
if (bl>mr_mip->nib && mr_mip->check) mr_berror(_MIPP_ MR_ERR_OVERFLOW);
if (mr_mip->ERNUM)
{
MR_OUT
return;
}
if (n>0)
{
for (i=bl-1;i>=n;i--)
gw[i]=gw[i-n];
for (i=0;i<n;i++)
gw[i]=0;
}
else
{
n=(-n);
for (i=0;i<bl;i++)
gw[i]=gw[i+n];
for (i=0;i<n;i++)
gw[bl+i]=0;
}
w->len=(bl|s);
MR_OUT
}
int size(big x)
{ /* get size of big number; convert to *
* integer - if possible */
int n,m;
mr_lentype s;
if (x==NULL) return 0;
s=(x->len&MR_MSBIT);
m=(int)(x->len&MR_OBITS);
if (m==0) return 0;
if (m==1 && x->w[0]<(mr_small)MR_TOOBIG) n=(int)x->w[0];
else n=MR_TOOBIG;
if (s==MR_MSBIT) return (-n);
return n;
}
int mr_compare(big x,big y)
{ /* compare x and y: =1 if x>y =-1 if x<y *
* =0 if x=y */
int m,n,sig;
mr_lentype sx,sy;
if (x==y) return 0;
sx=(x->len&MR_MSBIT);
sy=(y->len&MR_MSBIT);
if (sx==0) sig=PLUS;
else sig=MINUS;
if (sx!=sy) return sig;
m=(int)(x->len&MR_OBITS);
n=(int)(y->len&MR_OBITS);
if (m>n) return sig;
if (m<n) return -sig;
while (m>0)
{ /* check digit by digit */
m--;
if (x->w[m]>y->w[m]) return sig;
if (x->w[m]<y->w[m]) return -sig;
}
return 0;
}
#ifdef MR_FLASH
void fpack(_MIPD_ big n,big d,flash x)
{ /* create floating-slash number x=n/d from *
* big integer numerator and denominator */
mr_lentype s;
int i,ld,ln;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return;
MR_IN(31)
ld=(int)(d->len&MR_OBITS);
if (ld==0) mr_berror(_MIPP_ MR_ERR_FLASH_OVERFLOW);
if (ld==1 && d->w[0]==1) ld=0;
if (x==d) mr_berror(_MIPP_ MR_ERR_BAD_PARAMETERS);
if (mr_notint(n) || mr_notint(d)) mr_berror(_MIPP_ MR_ERR_INT_OP);
s=(n->len&MR_MSBIT);
ln=(int)(n->len&MR_OBITS);
if (ln==1 && n->w[0]==1) ln=0;
if ((ld+ln>mr_mip->nib) && (mr_mip->check || ld+ln>2*mr_mip->nib))
mr_berror(_MIPP_ MR_ERR_FLASH_OVERFLOW);
if (mr_mip->ERNUM)
{
MR_OUT
return;
}
copy(n,x);
if (n->len==0)
{
MR_OUT
return;
}
s^=(d->len&MR_MSBIT);
if (ld==0)
{
if (x->len!=0) x->len|=s;
MR_OUT
return;
}
for (i=0;i<ld;i++)
x->w[ln+i]=d->w[i];
x->len=(s|(ln+((mr_lentype)ld<<MR_BTS)));
MR_OUT
}
void numer(_MIPD_ flash x,big y)
{ /* extract numerator of x */
int i,ln,ld;
mr_lentype s,ly;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return;
if (mr_notint(x))
{
s=(x->len&MR_MSBIT);
ly=(x->len&MR_OBITS);
ln=(int)(ly&MR_MSK);
if (ln==0)
{
if(s==MR_MSBIT) convert(_MIPP_ (-1),y);
else convert(_MIPP_ 1,y);
return;
}
ld=(int)((ly>>MR_BTS)&MR_MSK);
if (x!=y)
{
for (i=0;i<ln;i++) y->w[i]=x->w[i];
for (i=ln;i<mr_lent(y);i++) y->w[i]=0;
}
else for (i=0;i<ld;i++) y->w[ln+i]=0;
y->len=(ln|s);
}
else copy(x,y);
}
void denom(_MIPD_ flash x,big y)
{ /* extract denominator of x */
int i,ln,ld;
mr_lentype ly;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return;
if (!mr_notint(x))
{
convert(_MIPP_ 1,y);
return;
}
ly=(x->len&MR_OBITS);
ln=(int)(ly&MR_MSK);
ld=(int)((ly>>MR_BTS)&MR_MSK);
for (i=0;i<ld;i++)
y->w[i]=x->w[ln+i];
if (x==y) for (i=0;i<ln;i++) y->w[ld+i]=0;
else for (i=ld;i<mr_lent(y);i++) y->w[i]=0;
y->len=ld;
}
#endif
unsigned int igcd(unsigned int x,unsigned int y)
{ /* integer GCD, returns GCD of x and y */
unsigned int r;
if (y==0) return x;
while ((r=x%y)!=0)
x=y,y=r;
return y;
}
unsigned long lgcd(unsigned long x,unsigned long y)
{ /* long GCD, returns GCD of x and y */
unsigned long r;
if (y==0) return x;
while ((r=x%y)!=0)
x=y,y=r;
return y;
}
unsigned int isqrt(unsigned int num,unsigned int guess)
{ /* square root of an integer */
unsigned int sqr;
unsigned int oldguess=guess;
if (num==0) return 0;
if (num<4) return 1;
for (;;)
{ /* Newtons iteration */
/* sqr=guess+(((num/guess)-guess)/2); */
sqr=((num/guess)+guess)/2;
if (sqr==guess || sqr==oldguess)
{
if (sqr*sqr>num) sqr--;
return sqr;
}
oldguess=guess;
guess=sqr;
}
}
unsigned long lsqrt(unsigned long num,unsigned long guess)
{ /* square root of a long */
unsigned long sqr;
unsigned long oldguess=guess;
if (num==0) return 0;
if (num<4) return 1;
for (;;)
{ /* Newtons iteration */
/* sqr=guess+(((num/guess)-guess)/2); */
sqr=((num/guess)+guess)/2;
if (sqr==guess || sqr==oldguess)
{
if (sqr*sqr>num) sqr--;
return sqr;
}
oldguess=guess;
guess=sqr;
}
}
mr_small sgcd(mr_small x,mr_small y)
{ /* integer GCD, returns GCD of x and y */
mr_small r;
#ifdef MR_FP
mr_small dres;
#endif
if (y==(mr_small)0) return x;
while ((r=MR_REMAIN(x,y))!=(mr_small)0)
x=y,y=r;
return y;
}
/* routines to support sliding-windows exponentiation *
* in various contexts */
int mr_testbit(_MIPD_ big x,int n)
{ /* return value of n-th bit of big */
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
#ifdef MR_FP
mr_small m,a,dres;
m=mr_shiftbits((mr_small)1,n%mr_mip->lg2b);
a=x->w[n/mr_mip->lg2b];
a=MR_DIV(a,m);
if ((MR_DIV(a,2.0)*2.0) != a) return 1;
#else
if ((x->w[n/mr_mip->lg2b] & ((mr_small)1<<(n%mr_mip->lg2b))) >0) return 1;
#endif
return 0;
}
void mr_addbit(_MIPD_ big x,int n)
{ /* add 2^n to positive x - where you know that bit is zero. Use with care! */
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
mr_lentype m=n/mr_mip->lg2b;
x->w[m]+=mr_shiftbits((mr_small)1,n%mr_mip->lg2b);
if (x->len<m+1) x->len=m+1;
}
int recode(_MIPD_ big e,int t,int w,int i)
{ /* recode exponent for Comb method */
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
int j,r;
r=0;
for (j=w-1;j>=0;j--)
{
r<<=1;
r|=mr_testbit(_MIPP_ e,i+j*t);
}
return r;
}
int mr_window(_MIPD_ big x,int i,int *nbs,int * nzs,int window_size)
{ /* returns sliding window value, max. of 5 bits, *
* (Note from version 5.23 this can be changed by *
* setting parameter window_size. This can be *
* a useful space-saver) starting at i-th bit of big x. *
* nbs is number of bits processed, nzs is the number of *
* additional trailing zeros detected. Returns valid bit *
* pattern 1x..x1 with no two adjacent 0's. So 10101 *
* will return 21 with nbs=5, nzs=0. 11001 will return 3,*
* with nbs=2, nzs=2, having stopped after the first 11..*/
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
int j,r,w;
w=window_size;
/* check for leading 0 bit */
*nbs=1;
*nzs=0;
if (!mr_testbit(_MIPP_ x,i)) return 0;
/* adjust window size if not enough bits left */
if (i-w+1<0) w=i+1;
r=1;
for (j=i-1;j>i-w;j--)
{ /* accumulate bits. Abort if two 0's in a row */
(*nbs)++;
r*=2;
if (mr_testbit(_MIPP_ x,j)) r+=1;
if (r%4==0)
{ /* oops - too many zeros - shorten window */
r/=4;
*nbs-=2;
*nzs=2;
break;
}
}
if (r%2==0)
{ /* remove trailing 0 */
r/=2;
*nzs=1;
(*nbs)--;
}
return r;
}
int mr_window2(_MIPD_ big x,big y,int i,int *nbs,int *nzs)
{ /* two bit window for double exponentiation */
int r,w;
BOOL a,b,c,d;
w=2;
*nbs=1;
*nzs=0;
/* check for two leading 0's */
a=mr_testbit(_MIPP_ x,i); b=mr_testbit(_MIPP_ y,i);
if (!a && !b) return 0;
if (i<1) w=1;
if (a)
{
if (b) r=3;
else r=2;
}
else r=1;
if (w==1) return r;
c=mr_testbit(_MIPP_ x,i-1); d=mr_testbit(_MIPP_ y,i-1);
if (!c && !d)
{
*nzs=1;
return r;
}
*nbs=2;
r*=4;
if (c)
{
if (d) r+=3;
else r+=2;
}
else r+=1;
return r;
}
int mr_naf_window(_MIPD_ big x,big x3,int i,int *nbs,int *nzs,int store)
{ /* returns sliding window value, using fractional windows *
* where "store" precomputed values are precalulated and *
* stored. Scanning starts at the i-th bit of x. nbs is *
* the number of bits processed. nzs is number of *
* additional trailing zeros detected. x and x3 (which is *
* 3*x) are combined to produce the NAF (non-adjacent *
* form). So if x=11011(27) and x3 is 1010001, the LSB is *
* ignored and the value 100T0T (32-4-1=27) processed, *
* where T is -1. Note x.P = (3x-x)/2.P. This value will *
* return +7, with nbs=4 and nzs=1, having stopped after *
* the first 4 bits. If it goes too far, it must backtrack *
* Note in an NAF non-zero elements are never side by side, *
* so 10T10T won't happen. NOTE: return value n zero or *
* odd, -21 <= n <= +21 */
int nb,j,r,biggest;
/* get first bit */
nb=mr_testbit(_MIPP_ x3,i)-mr_testbit(_MIPP_ x,i);
*nbs=1;
*nzs=0;
if (nb==0) return 0;
if (i==0) return nb;
biggest=2*store-1;
if (nb>0) r=1;
else r=(-1);
for (j=i-1;j>0;j--)
{
(*nbs)++;
r*=2;
nb=mr_testbit(_MIPP_ x3,j)-mr_testbit(_MIPP_ x,j);
if (nb>0) r+=1;
if (nb<0) r-=1;
if (abs(r)>biggest) break;
}
if (r%2!=0 && j!=0)
{ /* backtrack */
if (nb>0) r=(r-1)/2;
if (nb<0) r=(r+1)/2;
(*nbs)--;
}
while (r%2==0)
{ /* remove trailing zeros */
r/=2;
(*nzs)++;
(*nbs)--;
}
return r;
}
/* Some general purpose elliptic curve stuff */
BOOL point_at_infinity(epoint *p)
{
if (p==NULL) return FALSE;
if (p->marker==MR_EPOINT_INFINITY) return TRUE;
return FALSE;
}
#ifndef MR_STATIC
epoint* epoint_init(_MIPDO_ )
{ /* initialise epoint to general point at infinity. */
epoint *p;
char *ptr;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return NULL;
MR_IN(96)
/* Create space for whole structure in one heap access */
p=(epoint *)mr_alloc(_MIPP_ mr_esize(mr_mip->nib-1),1);
ptr=(char *)p+sizeof(epoint);
p->X=mirvar_mem(_MIPP_ ptr,0);
p->Y=mirvar_mem(_MIPP_ ptr,1);
#ifndef MR_AFFINE_ONLY
p->Z=mirvar_mem(_MIPP_ ptr,2);
#endif
p->marker=MR_EPOINT_INFINITY;
MR_OUT
return p;
}
#endif
epoint* epoint_init_mem_variable(_MIPD_ char *mem,int index,int sz)
{
epoint *p;
char *ptr;
int offset,r;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
offset=0;
r=(unsigned long)mem%MR_SL;
if (r>0) offset=MR_SL-r;
#ifndef MR_AFFINE_ONLY
if (mr_mip->coord==MR_AFFINE)
p=(epoint *)&mem[offset+index*mr_esize_a(sz)];
else
#endif
p=(epoint *)&mem[offset+index*mr_esize(sz)];
ptr=(char *)p+sizeof(epoint);
p->X=mirvar_mem_variable(ptr,0,sz);
p->Y=mirvar_mem_variable(ptr,1,sz);
#ifndef MR_AFFINE_ONLY
if (mr_mip->coord!=MR_AFFINE) p->Z=mirvar_mem_variable(ptr,2,sz);
#endif
p->marker=MR_EPOINT_INFINITY;
return p;
}
epoint* epoint_init_mem(_MIPD_ char *mem,int index)
{
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return NULL;
return epoint_init_mem_variable(_MIPP_ mem,index,mr_mip->nib-1);
}
#ifndef MR_STATIC
/* allocate space for a number of epoints from the heap */
void *ecp_memalloc(_MIPD_ int num)
{
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
#ifndef MR_AFFINE_ONLY
if (mr_mip->coord==MR_AFFINE)
return mr_alloc(_MIPP_ mr_ecp_reserve_a(num,mr_mip->nib-1),1);
else
#endif
return mr_alloc(_MIPP_ mr_ecp_reserve(num,mr_mip->nib-1),1);
}
#endif
void ecp_memkill(_MIPD_ char *mem,int num)
{
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mem==NULL) return;
#ifndef MR_AFFINE_ONLY
if (mr_mip->coord==MR_AFFINE)
memset(mem,0,mr_ecp_reserve_a(num,mr_mip->nib-1));
else
#endif
memset(mem,0,mr_ecp_reserve(num,mr_mip->nib-1));
#ifndef MR_STATIC
mr_free(mem);
#endif
}
#ifndef MR_STATIC
void epoint_free(epoint *p)
{ /* clean up point */
if (p==NULL) return;
zero(p->X);
zero(p->Y);
#ifndef MR_AFFINE_ONLY
if (p->marker==MR_EPOINT_GENERAL) zero(p->Z);
#endif
mr_free(p);
}
#endif
|
conv_dw_dilation_kernel_arm.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#ifndef __CONV_DW_DILATION_KERNEL_ARM_H_
#define __CONV_DW_DILATION_KERNEL_ARM_H_
#include "tengine_ir.h"
#include "convolution_param.h"
#include "conv_dw_k5_k7_kernel_arm.h"
int conv_dw_dilation_run(float* input_buf, float* weight_buf, float* bias, float* output_buf, int input_h, int input_w,
int channel, int pad, int activation, int num_thread)
{
int channel_size = input_h * input_w;
int mid_w = input_w - pad * 2;
int mid_block_end = (mid_w & -4) + pad;
int mid_end = mid_w + pad;
int w = 0;
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channel; c++)
{
float* input_buf_c = input_buf + c * channel_size;
float* output_buf_c = output_buf + c * channel_size;
float* weight_buf_c = weight_buf + c * 9;
float bias_c = bias ? bias[c] : 0;
for (int h = 0; h < pad; h++)
{
for (w = 0; w < pad; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad];
tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w];
tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
}
for (; w < mid_block_end; w += 4)
{
float32x4_t tmp_4 = vdupq_n_f32(bias_c);
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[6]),
vld1q_f32(input_buf_c + (h + pad) * input_w + w - pad));
tmp_4 =
vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[7]), vld1q_f32(input_buf_c + (h + pad) * input_w + w));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[8]),
vld1q_f32(input_buf_c + (h + pad) * input_w + w + pad));
tmp_4 = vector_activation(tmp_4, activation);
vst1q_f32(output_buf_c + h * input_w + w, tmp_4);
}
for (; w < mid_end; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad];
tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad];
tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w];
tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
for (; w < input_w; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad];
tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
}
for (int h = pad; h < input_h - pad; h++)
{
for (w = 0; w < pad; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w];
tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad];
tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w];
tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
for (; w < mid_block_end; w += 4)
{
float32x4_t tmp_4 = vdupq_n_f32(bias_c);
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[0]),
vld1q_f32(input_buf_c + (h - pad) * input_w + w - pad));
tmp_4 =
vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[1]), vld1q_f32(input_buf_c + (h - pad) * input_w + w));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[2]),
vld1q_f32(input_buf_c + (h - pad) * input_w + w + pad));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[6]),
vld1q_f32(input_buf_c + (h + pad) * input_w + w - pad));
tmp_4 =
vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[7]), vld1q_f32(input_buf_c + (h + pad) * input_w + w));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[8]),
vld1q_f32(input_buf_c + (h + pad) * input_w + w + pad));
tmp_4 = vector_activation(tmp_4, activation);
vst1q_f32(output_buf_c + h * input_w + w, tmp_4);
}
for (; w < mid_end; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad];
tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w];
tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad];
tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad];
tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad];
tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w];
tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
for (; w < input_w; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad];
tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w];
tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad];
tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
}
for (int h = input_h - pad; h < input_h; h++)
{
for (w = 0; w < pad; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w];
tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
for (; w < mid_block_end; w += 4)
{
float32x4_t tmp_4 = vdupq_n_f32(bias_c);
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[0]),
vld1q_f32(input_buf_c + (h - pad) * input_w + w - pad));
tmp_4 =
vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[1]), vld1q_f32(input_buf_c + (h - pad) * input_w + w));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[2]),
vld1q_f32(input_buf_c + (h - pad) * input_w + w + pad));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w));
tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad));
tmp_4 = vector_activation(tmp_4, activation);
vst1q_f32(output_buf_c + h * input_w + w, tmp_4);
}
for (; w < mid_end; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad];
tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w];
tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad];
tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
for (; w < input_w; w++)
{
float tmp = bias_c;
tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad];
tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w];
tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad];
tmp += weight_buf_c[4] * input_buf_c[h * input_w + w];
output_buf_c[h * input_w + w] = elem_activation(tmp, activation);
;
}
}
}
return 0;
}
#endif
|
Diffus4th_order_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC
*
* Copyright 2017 Daniil Kazantsev
* Copyright 2017 Srikanth Nagella, Edoardo Pasca
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Diffus4th_order_core.h"
#include "utils.h"
#define EPS 1.0e-7
/* C-OMP implementation of fourth-order diffusion scheme [1] for piecewise-smooth recovery (2D/3D case)
* The minimisation is performed using explicit scheme.
*
* Input Parameters:
* 1. Noisy image/volume
* 2. lambda - regularization parameter
* 3. Edge-preserving parameter (sigma)
* 4. Number of iterations, for explicit scheme >= 150 is recommended
* 5. tau - time-marching step for the explicit scheme
* 6. eplsilon: tolerance constant
*
* Output:
* [1] Regularized image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
*
* This function is based on the paper by
* [1] Hajiaboli, M.R., 2011. An anisotropic fourth-order diffusion filter for image noise removal. International Journal of Computer Vision, 92(2), pp.177-191.
*/
float Diffus4th_CPU_main(float *Input, float *Output, float *infovector, float lambdaPar, float sigmaPar, int iterationsNumb, float tau, float epsil, int dimX, int dimY, int dimZ)
{
int i,DimTotal,j,count;
float sigmaPar2, re, re1;
re = 0.0f; re1 = 0.0f;
count = 0;
float *W_Lapl=NULL, *Output_prev=NULL;
sigmaPar2 = sigmaPar*sigmaPar;
DimTotal = dimX*dimY*dimZ;
W_Lapl = calloc(DimTotal, sizeof(float));
if (epsil != 0.0f) Output_prev = calloc(DimTotal, sizeof(float));
/* copy into output */
copyIm(Input, Output, (long)(dimX), (long)(dimY), (long)(dimZ));
for(i=0; i < iterationsNumb; i++) {
if ((epsil != 0.0f) && (i % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ));
if (dimZ == 1) {
/* running 2D diffusion iterations */
/* Calculating weighted Laplacian */
Weighted_Laplc2D(W_Lapl, Output, sigmaPar2, dimX, dimY);
/* Perform iteration step */
Diffusion_update_step2D(Output, Input, W_Lapl, lambdaPar, sigmaPar2, tau, (long)(dimX), (long)(dimY));
}
else {
/* running 3D diffusion iterations */
/* Calculating weighted Laplacian */
Weighted_Laplc3D(W_Lapl, Output, sigmaPar2, dimX, dimY, dimZ);
/* Perform iteration step */
Diffusion_update_step3D(Output, Input, W_Lapl, lambdaPar, sigmaPar2, tau, (long)(dimX), (long)(dimY), (long)(dimZ));
}
/* check early stopping criteria */
if ((epsil != 0.0f) && (i % 5 == 0)) {
re = 0.0f; re1 = 0.0f;
for(j=0; j<DimTotal; j++)
{
re += powf(Output[j] - Output_prev[j],2);
re1 += powf(Output[j],2);
}
re = sqrtf(re)/sqrtf(re1);
if (re < epsil) count++;
if (count > 3) break;
}
}
free(W_Lapl);
if (epsil != 0.0f) free(Output_prev);
/*adding info into info_vector */
infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/
infovector[1] = re; /* reached tolerance */
return 0;
}
/********************************************************************/
/***************************2D Functions*****************************/
/********************************************************************/
float Weighted_Laplc2D(float *W_Lapl, float *U0, float sigma, long dimX, long dimY)
{
long i,j,i1,i2,j1,j2,index;
float gradX, gradX_sq, gradY, gradY_sq, gradXX, gradYY, gradXY, xy_2, denom, V_norm, V_orth, c, c_sq;
#pragma omp parallel for shared(W_Lapl) private(i,j,i1,i2,j1,j2,index,gradX, gradX_sq, gradY, gradY_sq, gradXX, gradYY, gradXY, xy_2, denom, V_norm, V_orth, c, c_sq)
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
index = j*dimX+i;
gradX = 0.5f*(U0[j*dimX+i2] - U0[j*dimX+i1]);
gradX_sq = pow(gradX,2);
gradY = 0.5f*(U0[j2*dimX+i] - U0[j1*dimX+i]);
gradY_sq = pow(gradY,2);
gradXX = U0[j*dimX+i2] + U0[j*dimX+i1] - 2*U0[index];
gradYY = U0[j2*dimX+i] + U0[j1*dimX+i] - 2*U0[index];
gradXY = 0.25f*(U0[j2*dimX+i2] + U0[j1*dimX+i1] - U0[j1*dimX+i2] - U0[j2*dimX+i1]);
xy_2 = 2.0f*gradX*gradY*gradXY;
denom = gradX_sq + gradY_sq;
if (denom <= EPS) {
V_norm = (gradXX*gradX_sq + xy_2 + gradYY*gradY_sq)/EPS;
V_orth = (gradXX*gradY_sq - xy_2 + gradYY*gradX_sq)/EPS;
}
else {
V_norm = (gradXX*gradX_sq + xy_2 + gradYY*gradY_sq)/denom;
V_orth = (gradXX*gradY_sq - xy_2 + gradYY*gradX_sq)/denom;
}
c = 1.0f/(1.0f + denom/sigma);
c_sq = c*c;
W_Lapl[index] = c_sq*V_norm + c*V_orth;
}
}
return *W_Lapl;
}
float Diffusion_update_step2D(float *Output, float *Input, float *W_Lapl, float lambdaPar, float sigmaPar2, float tau, long dimX, long dimY)
{
long i,j,i1,i2,j1,j2,index;
float gradXXc, gradYYc;
#pragma omp parallel for shared(Output, Input, W_Lapl) private(i,j,i1,i2,j1,j2,index,gradXXc,gradYYc)
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
index = j*dimX+i;
gradXXc = W_Lapl[j*dimX+i2] + W_Lapl[j*dimX+i1] - 2*W_Lapl[index];
gradYYc = W_Lapl[j2*dimX+i] + W_Lapl[j1*dimX+i] - 2*W_Lapl[index];
Output[index] += tau*(-lambdaPar*(gradXXc + gradYYc) - (Output[index] - Input[index]));
}
}
return *Output;
}
/********************************************************************/
/***************************3D Functions*****************************/
/********************************************************************/
float Weighted_Laplc3D(float *W_Lapl, float *U0, float sigma, long dimX, long dimY, long dimZ)
{
long i,j,k,i1,i2,j1,j2,k1,k2,index;
float gradX, gradX_sq, gradY, gradY_sq, gradXX, gradYY, gradXY, xy_2, denom, V_norm, V_orth, c, c_sq, gradZ, gradZ_sq, gradZZ, gradXZ, gradYZ, xyz_1, xyz_2;
#pragma omp parallel for shared(W_Lapl) private(i,j,k,i1,i2,j1,j2,k1,k2,index,gradX, gradX_sq, gradY, gradY_sq, gradXX, gradYY, gradXY, xy_2, denom, V_norm, V_orth, c, c_sq, gradZ, gradZ_sq, gradZZ, gradXZ, gradYZ, xyz_1, xyz_2)
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
for(k=0; k<dimZ; k++) {
/* symmetric boundary conditions */
k1 = k+1; if (k1 == dimZ) k1 = k-1;
k2 = k-1; if (k2 < 0) k2 = k+1;
index = (dimX*dimY)*k + j*dimX+i;
gradX = 0.5f*(U0[(dimX*dimY)*k + j*dimX+i2] - U0[(dimX*dimY)*k + j*dimX+i1]);
gradX_sq = pow(gradX,2);
gradY = 0.5f*(U0[(dimX*dimY)*k + j2*dimX+i] - U0[(dimX*dimY)*k + j1*dimX+i]);
gradY_sq = pow(gradY,2);
gradZ = 0.5f*(U0[(dimX*dimY)*k2 + j*dimX+i] - U0[(dimX*dimY)*k1 + j*dimX+i]);
gradZ_sq = pow(gradZ,2);
gradXX = U0[(dimX*dimY)*k + j*dimX+i2] + U0[(dimX*dimY)*k + j*dimX+i1] - 2*U0[index];
gradYY = U0[(dimX*dimY)*k + j2*dimX+i] + U0[(dimX*dimY)*k + j1*dimX+i] - 2*U0[index];
gradZZ = U0[(dimX*dimY)*k2 + j*dimX+i] + U0[(dimX*dimY)*k1 + j*dimX+i] - 2*U0[index];
gradXY = 0.25f*(U0[(dimX*dimY)*k + j2*dimX+i2] + U0[(dimX*dimY)*k + j1*dimX+i1] - U0[(dimX*dimY)*k + j1*dimX+i2] - U0[(dimX*dimY)*k + j2*dimX+i1]);
gradXZ = 0.25f*(U0[(dimX*dimY)*k2 + j*dimX+i2] - U0[(dimX*dimY)*k2+j*dimX+i1] - U0[(dimX*dimY)*k1+j*dimX+i2] + U0[(dimX*dimY)*k1+j*dimX+i1]);
gradYZ = 0.25f*(U0[(dimX*dimY)*k2 +j2*dimX+i] - U0[(dimX*dimY)*k2+j1*dimX+i] - U0[(dimX*dimY)*k1+j2*dimX+i] + U0[(dimX*dimY)*k1+j1*dimX+i]);
xy_2 = 2.0f*gradX*gradY*gradXY;
xyz_1 = 2.0f*gradX*gradZ*gradXZ;
xyz_2 = 2.0f*gradY*gradZ*gradYZ;
denom = gradX_sq + gradY_sq + gradZ_sq;
if (denom <= EPS) {
V_norm = (gradXX*gradX_sq + gradYY*gradY_sq + gradZZ*gradZ_sq + xy_2 + xyz_1 + xyz_2)/EPS;
V_orth = ((gradY_sq + gradZ_sq)*gradXX + (gradX_sq + gradZ_sq)*gradYY + (gradX_sq + gradY_sq)*gradZZ - xy_2 - xyz_1 - xyz_2)/EPS;
}
else {
V_norm = (gradXX*gradX_sq + gradYY*gradY_sq + gradZZ*gradZ_sq + xy_2 + xyz_1 + xyz_2)/denom;
V_orth = ((gradY_sq + gradZ_sq)*gradXX + (gradX_sq + gradZ_sq)*gradYY + (gradX_sq + gradY_sq)*gradZZ - xy_2 - xyz_1 - xyz_2)/denom;
}
c = 1.0f/(1.0f + denom/sigma);
c_sq = c*c;
W_Lapl[index] = c_sq*V_norm + c*V_orth;
}
}
}
return *W_Lapl;
}
float Diffusion_update_step3D(float *Output, float *Input, float *W_Lapl, float lambdaPar, float sigmaPar2, float tau, long dimX, long dimY, long dimZ)
{
long i,j,i1,i2,j1,j2,index,k,k1,k2;
float gradXXc, gradYYc, gradZZc;
#pragma omp parallel for shared(Output, Input, W_Lapl) private(i,j,i1,i2,j1,j2,k,k1,k2,index,gradXXc,gradYYc,gradZZc)
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
for(k=0; k<dimZ; k++) {
/* symmetric boundary conditions */
k1 = k+1; if (k1 == dimZ) k1 = k-1;
k2 = k-1; if (k2 < 0) k2 = k+1;
index = (dimX*dimY)*k + j*dimX+i;
gradXXc = W_Lapl[(dimX*dimY)*k + j*dimX+i2] + W_Lapl[(dimX*dimY)*k + j*dimX+i1] - 2*W_Lapl[index];
gradYYc = W_Lapl[(dimX*dimY)*k + j2*dimX+i] + W_Lapl[(dimX*dimY)*k + j1*dimX+i] - 2*W_Lapl[index];
gradZZc = W_Lapl[(dimX*dimY)*k2 + j*dimX+i] + W_Lapl[(dimX*dimY)*k1 + j*dimX+i] - 2*W_Lapl[index];
Output[index] += tau*(-lambdaPar*(gradXXc + gradYYc + gradZZc) - (Output[index] - Input[index]));
}
}
}
return *Output;
}
|
convolution_3x3_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_pack4_msa(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = pb-pa-inch/pa-64-outch/pb
kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 4 * 4, 4 * 4);
for (int q = 0; q + (4 - 1) < outch; q += 4)
{
Mat g0 = kernel_tm_pack4.channel(q / 4);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + (4 - 1) < inch; p += 4)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = (float)k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd64_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
bottom_blob_tm.create(tiles, 64, inch, 4u * elempack, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8][4];
v4f32 _v5_25 = __msa_fill_w_f32(5.25f);
v4f32 _vm4_25 = __msa_fill_w_f32(-4.25f);
v4f32 _vm1_25 = __msa_fill_w_f32(-1.25f);
v4f32 _v0_25 = __msa_fill_w_f32(0.25f);
v4f32 _vm2_5 = __msa_fill_w_f32(-2.5f);
v4f32 _v0_5 = __msa_fill_w_f32(0.5f);
v4f32 _v2 = __msa_fill_w_f32(2.f);
v4f32 _v4 = __msa_fill_w_f32(4.f);
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _r05 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0);
v4f32 _r06 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0);
v4f32 _r07 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0);
v4f32 _tmp0m = __msa_fmadd_w(__msa_fsub_w(_r00, _r06), _v5_25, __msa_fsub_w(_r04, _r02));
v4f32 _tmp7m = __msa_fmadd_w(__msa_fsub_w(_r07, _r01), _v5_25, __msa_fsub_w(_r03, _r05));
__msa_st_w((v4i32)_tmp0m, tmp[0][m], 0);
__msa_st_w((v4i32)_tmp7m, tmp[7][m], 0);
v4f32 _tmp12a = __msa_fmadd_w(__msa_fadd_w(_r02, _r06), _vm4_25, _r04);
v4f32 _tmp12b = __msa_fmadd_w(__msa_fadd_w(_r01, _r05), _vm4_25, _r03);
v4f32 _tmp1m = __msa_fadd_w(_tmp12a, _tmp12b);
v4f32 _tmp2m = __msa_fsub_w(_tmp12a, _tmp12b);
__msa_st_w((v4i32)_tmp1m, tmp[1][m], 0);
__msa_st_w((v4i32)_tmp2m, tmp[2][m], 0);
v4f32 _tmp34a = __msa_fmadd_w(__msa_fmadd_w(_r06, _v0_25, _r02), _vm1_25, _r04);
v4f32 _tmp34b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_r01, _v0_5), _vm2_5, _r03), _v2, _r05);
v4f32 _tmp3m = __msa_fadd_w(_tmp34a, _tmp34b);
v4f32 _tmp4m = __msa_fsub_w(_tmp34a, _tmp34b);
__msa_st_w((v4i32)_tmp3m, tmp[3][m], 0);
__msa_st_w((v4i32)_tmp4m, tmp[4][m], 0);
v4f32 _tmp56a = __msa_fmadd_w(_r06, _v4, __msa_fmadd_w(_r02, _vm1_25, _r04));
v4f32 _tmp56b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_r01, _v2), _vm2_5, _r03), _v0_5, _r05);
v4f32 _tmp5m = __msa_fadd_w(_tmp56a, _tmp56b);
v4f32 _tmp6m = __msa_fsub_w(_tmp56a, _tmp56b);
__msa_st_w((v4i32)_tmp5m, tmp[5][m], 0);
__msa_st_w((v4i32)_tmp6m, tmp[6][m], 0);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2;
float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3;
float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4;
float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5;
float* r0_tm_6 = r0_tm_0 + tiles * 4 * 6;
float* r0_tm_7 = r0_tm_0 + tiles * 4 * 7;
for (int m = 0; m < 8; m++)
{
v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0);
v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0);
v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0);
v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0);
v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0);
v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0);
v4f32 _tmp06 = (v4f32)__msa_ld_w(tmp[m][6], 0);
v4f32 _tmp07 = (v4f32)__msa_ld_w(tmp[m][7], 0);
v4f32 _r0tm0 = __msa_fmadd_w(__msa_fsub_w(_tmp00, _tmp06), _v5_25, __msa_fsub_w(_tmp04, _tmp02));
v4f32 _r0tm7 = __msa_fmadd_w(__msa_fsub_w(_tmp07, _tmp01), _v5_25, __msa_fsub_w(_tmp03, _tmp05));
v4f32 _tmp12a = __msa_fmadd_w(__msa_fadd_w(_tmp02, _tmp06), _vm4_25, _tmp04);
v4f32 _tmp12b = __msa_fmadd_w(__msa_fadd_w(_tmp01, _tmp05), _vm4_25, _tmp03);
v4f32 _r0tm1 = __msa_fadd_w(_tmp12a, _tmp12b);
v4f32 _r0tm2 = __msa_fsub_w(_tmp12a, _tmp12b);
v4f32 _tmp34a = __msa_fmadd_w(__msa_fmadd_w(_tmp06, _v0_25, _tmp02), _vm1_25, _tmp04);
v4f32 _tmp34b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_tmp01, _v0_5), _vm2_5, _tmp03), _v2, _tmp05);
v4f32 _r0tm3 = __msa_fadd_w(_tmp34a, _tmp34b);
v4f32 _r0tm4 = __msa_fsub_w(_tmp34a, _tmp34b);
v4f32 _tmp56a = __msa_fmadd_w(_tmp06, _v4, __msa_fmadd_w(_tmp02, _vm1_25, _tmp04));
v4f32 _tmp56b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_tmp01, _v2), _vm2_5, _tmp03), _v0_5, _tmp05);
v4f32 _r0tm5 = __msa_fadd_w(_tmp56a, _tmp56b);
v4f32 _r0tm6 = __msa_fsub_w(_tmp56a, _tmp56b);
__msa_st_w((v4i32)_r0tm0, r0_tm_0, 0);
__msa_st_w((v4i32)_r0tm1, r0_tm_1, 0);
__msa_st_w((v4i32)_r0tm2, r0_tm_2, 0);
__msa_st_w((v4i32)_r0tm3, r0_tm_3, 0);
__msa_st_w((v4i32)_r0tm4, r0_tm_4, 0);
__msa_st_w((v4i32)_r0tm5, r0_tm_5, 0);
__msa_st_w((v4i32)_r0tm6, r0_tm_6, 0);
__msa_st_w((v4i32)_r0tm7, r0_tm_7, 0);
r0_tm_0 += tiles * 4 * 8;
r0_tm_1 += tiles * 4 * 8;
r0_tm_2 += tiles * 4 * 8;
r0_tm_3 += tiles * 4 * 8;
r0_tm_4 += tiles * 4 * 8;
r0_tm_5 += tiles * 4 * 8;
r0_tm_6 += tiles * 4 * 8;
r0_tm_7 += tiles * 4 * 8;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x8
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0);
v4f32 _r8 = (v4f32)__msa_ld_w(r0 + 4 * 8, 0);
v4f32 _r9 = (v4f32)__msa_ld_w(r0 + 4 * 9, 0);
v4f32 _ra = (v4f32)__msa_ld_w(r0 + 4 * 10, 0);
v4f32 _rb = (v4f32)__msa_ld_w(r0 + 4 * 11, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8);
v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8);
v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra);
v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l);
v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0);
__msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0);
__msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 48;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x8
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 32;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x4
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 16;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x2
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4i32 _r01_0 = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01_1 = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
__msa_st_w((v4i32)_r01_0, tmpptr, 0);
__msa_st_w((v4i32)_r01_1, tmpptr + 4, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 8;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
v4f32 _val = (v4f32)__msa_ld_w(r0, 0);
__msa_st_w((v4i32)_val, tmpptr, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
v4f32 _sum2 = (v4f32)__msa_fill_w(0);
v4f32 _sum3 = (v4f32)__msa_fill_w(0);
v4f32 _sum4 = (v4f32)__msa_fill_w(0);
v4f32 _sum5 = (v4f32)__msa_fill_w(0);
v4f32 _sum6 = (v4f32)__msa_fill_w(0);
v4f32 _sum7 = (v4f32)__msa_fill_w(0);
v4f32 _sum8 = (v4f32)__msa_fill_w(0);
v4f32 _sum9 = (v4f32)__msa_fill_w(0);
v4f32 _suma = (v4f32)__msa_fill_w(0);
v4f32 _sumb = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 96);
__builtin_prefetch(k0 + 32);
v4i32 _val0123 = __msa_ld_w(r0, 0);
v4i32 _val4567 = __msa_ld_w(r0 + 4, 0);
v4i32 _val89ab = __msa_ld_w(r0 + 8, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
_sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0);
_sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0);
_sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0);
_sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0);
_sum8 = __msa_fmadd_w(_sum8, (v4f32)__msa_splati_w(_val89ab, 0), _w0);
_sum9 = __msa_fmadd_w(_sum9, (v4f32)__msa_splati_w(_val89ab, 1), _w0);
_suma = __msa_fmadd_w(_suma, (v4f32)__msa_splati_w(_val89ab, 2), _w0);
_sumb = __msa_fmadd_w(_sumb, (v4f32)__msa_splati_w(_val89ab, 3), _w0);
r0 += 12;
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
__msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0);
__msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0);
__msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0);
__msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0);
__msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0);
__msa_st_w((v4i32)_sum8, output0_tm + 4 * 8, 0);
__msa_st_w((v4i32)_sum9, output0_tm + 4 * 9, 0);
__msa_st_w((v4i32)_suma, output0_tm + 4 * 10, 0);
__msa_st_w((v4i32)_sumb, output0_tm + 4 * 11, 0);
output0_tm += 4 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
v4f32 _sum2 = (v4f32)__msa_fill_w(0);
v4f32 _sum3 = (v4f32)__msa_fill_w(0);
v4f32 _sum4 = (v4f32)__msa_fill_w(0);
v4f32 _sum5 = (v4f32)__msa_fill_w(0);
v4f32 _sum6 = (v4f32)__msa_fill_w(0);
v4f32 _sum7 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 64);
__builtin_prefetch(k0 + 32);
v4i32 _val0123 = __msa_ld_w(r0, 0);
v4i32 _val4567 = __msa_ld_w(r0 + 4, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
_sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0);
_sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0);
_sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0);
_sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0);
r0 += 8;
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
__msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0);
__msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0);
__msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0);
__msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0);
__msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0);
output0_tm += 4 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
v4f32 _sum2 = (v4f32)__msa_fill_w(0);
v4f32 _sum3 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 32);
__builtin_prefetch(k0 + 32);
v4i32 _val0123 = __msa_ld_w(r0, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
r0 += 4;
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
__msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0);
output0_tm += 4 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 16);
__builtin_prefetch(k0 + 32);
v4f32 _val0 = __msa_fill_w_f32(*r0++);
v4f32 _val1 = __msa_fill_w_f32(*r0++);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, _w0);
_sum1 = __msa_fmadd_w(_sum1, _val1, _w0);
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
output0_tm += 4 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 8);
__builtin_prefetch(k0 + 32);
v4f32 _val0 = __msa_fill_w_f32(*r0++);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum = __msa_fmadd_w(_sum, _val0, _w0);
k0 += 4;
}
__msa_st_w((v4i32)_sum, output0_tm, 0);
output0_tm += 4;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w((const float*)bias + p * 4, 0) : (v4f32)__msa_fill_w(0);
float tmp[6][8][4];
v4f32 _v32 = __msa_fill_w_f32(32.f);
v4f32 _v16 = __msa_fill_w_f32(16.f);
v4f32 _v8 = __msa_fill_w_f32(8.f);
v4f32 _v4 = __msa_fill_w_f32(4.f);
v4f32 _v2 = __msa_fill_w_f32(2.f);
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
// top_blob_tm.create(tiles, 64, outch, elemsize, elempack);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5;
const float* output0_tm_6 = output0_tm_0 + tiles * 4 * 6;
const float* output0_tm_7 = output0_tm_0 + tiles * 4 * 7;
float* output0 = out0.row<float>(i * 6) + (j * 6) * 4;
// TODO msa optimize
for (int m = 0; m < 8; m++)
{
v4f32 _out0tm0 = (v4f32)__msa_ld_w(output0_tm_0, 0);
v4f32 _out0tm1 = (v4f32)__msa_ld_w(output0_tm_1, 0);
v4f32 _out0tm2 = (v4f32)__msa_ld_w(output0_tm_2, 0);
v4f32 _out0tm3 = (v4f32)__msa_ld_w(output0_tm_3, 0);
v4f32 _out0tm4 = (v4f32)__msa_ld_w(output0_tm_4, 0);
v4f32 _out0tm5 = (v4f32)__msa_ld_w(output0_tm_5, 0);
v4f32 _out0tm6 = (v4f32)__msa_ld_w(output0_tm_6, 0);
v4f32 _out0tm7 = (v4f32)__msa_ld_w(output0_tm_7, 0);
v4f32 _tmp024a = __msa_fadd_w(_out0tm1, _out0tm2);
v4f32 _tmp135a = __msa_fsub_w(_out0tm1, _out0tm2);
v4f32 _tmp024b = __msa_fadd_w(_out0tm3, _out0tm4);
v4f32 _tmp135b = __msa_fsub_w(_out0tm3, _out0tm4);
v4f32 _tmp024c = __msa_fadd_w(_out0tm5, _out0tm6);
v4f32 _tmp135c = __msa_fsub_w(_out0tm5, _out0tm6);
v4f32 _tmp0m = __msa_fadd_w(__msa_fadd_w(_out0tm0, _tmp024a), __msa_fmadd_w(_tmp024b, _v32, _tmp024c));
v4f32 _tmp2m = __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v4, _tmp024b), _v8, _tmp024c);
v4f32 _tmp4m = __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v16, _tmp024b), _v2, _tmp024c);
__msa_st_w((v4i32)_tmp0m, tmp[0][m], 0);
__msa_st_w((v4i32)_tmp2m, tmp[2][m], 0);
__msa_st_w((v4i32)_tmp4m, tmp[4][m], 0);
v4f32 _tmp1m = __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v2, _tmp135b), _v16, _tmp135c);
v4f32 _tmp3m = __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v8, _tmp135b), _v4, _tmp135c);
v4f32 _tmp5m = __msa_fadd_w(__msa_fadd_w(_out0tm7, _tmp135a), __msa_fmadd_w(_tmp135c, _v32, _tmp135b));
__msa_st_w((v4i32)_tmp1m, tmp[1][m], 0);
__msa_st_w((v4i32)_tmp3m, tmp[3][m], 0);
__msa_st_w((v4i32)_tmp5m, tmp[5][m], 0);
output0_tm_0 += tiles * 4 * 8;
output0_tm_1 += tiles * 4 * 8;
output0_tm_2 += tiles * 4 * 8;
output0_tm_3 += tiles * 4 * 8;
output0_tm_4 += tiles * 4 * 8;
output0_tm_5 += tiles * 4 * 8;
output0_tm_6 += tiles * 4 * 8;
output0_tm_7 += tiles * 4 * 8;
}
for (int m = 0; m < 6; m++)
{
v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0);
v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0);
v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0);
v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0);
v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0);
v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0);
v4f32 _tmp06 = (v4f32)__msa_ld_w(tmp[m][6], 0);
v4f32 _tmp07 = (v4f32)__msa_ld_w(tmp[m][7], 0);
v4f32 _tmp024a = __msa_fadd_w(_tmp01, _tmp02);
v4f32 _tmp135a = __msa_fsub_w(_tmp01, _tmp02);
v4f32 _tmp024b = __msa_fadd_w(_tmp03, _tmp04);
v4f32 _tmp135b = __msa_fsub_w(_tmp03, _tmp04);
v4f32 _tmp024c = __msa_fadd_w(_tmp05, _tmp06);
v4f32 _tmp135c = __msa_fsub_w(_tmp05, _tmp06);
v4f32 _out00 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp00, _tmp024a), __msa_fmadd_w(_tmp024b, _v32, _tmp024c)));
v4f32 _out02 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v4, _tmp024b), _v8, _tmp024c));
v4f32 _out04 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v16, _tmp024b), _v2, _tmp024c));
__msa_st_w((v4i32)_out00, output0, 0);
__msa_st_w((v4i32)_out02, output0 + 4 * 2, 0);
__msa_st_w((v4i32)_out04, output0 + 4 * 4, 0);
v4f32 _out01 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v2, _tmp135b), _v16, _tmp135c));
v4f32 _out03 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v8, _tmp135b), _v4, _tmp135c));
v4f32 _out05 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp07, _tmp135a), __msa_fmadd_w(_tmp135c, _v32, _tmp135b)));
__msa_st_w((v4i32)_out01, output0 + 4, 0);
__msa_st_w((v4i32)_out03, output0 + 4 * 3, 0);
__msa_st_w((v4i32)_out05, output0 + 4 * 5, 0);
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd42_transform_kernel_pack4_msa(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt)
{
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = pb-pa-inch/pa-36-outch/pb
kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 4 * 4, 4 * 4);
for (int q = 0; q + (4 - 1) < outch; q += 4)
{
Mat g0 = kernel_tm_pack4.channel(q / 4);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + (4 - 1) < inch; p += 4)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = (float)k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd42_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 4u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[6][6][4];
v4f32 _vm5 = __msa_fill_w_f32(-5.f);
v4f32 _vm4 = __msa_fill_w_f32(-4.f);
v4f32 _v4 = __msa_fill_w_f32(4.f);
v4f32 _vm2 = __msa_fill_w_f32(-2.f);
v4f32 _v2 = __msa_fill_w_f32(2.f);
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const float* r0 = img0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _r05 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0);
v4f32 _tmp0m = __msa_fmadd_w(__msa_fmadd_w(_r04, _v4, _r00), _vm5, _r02);
v4f32 _tmp1m = __msa_fmadd_w(__msa_fadd_w(_r04, _r03), _vm4, __msa_fadd_w(_r01, _r02));
v4f32 _tmp2m = __msa_fmadd_w(__msa_fsub_w(_r04, _r03), _v4, __msa_fsub_w(_r01, _r02));
v4f32 _tmp3m = __msa_fmadd_w(__msa_fsub_w(_r04, _r02), _vm2, __msa_fsub_w(_r01, _r03));
v4f32 _tmp4m = __msa_fmadd_w(__msa_fsub_w(_r04, _r02), _v2, __msa_fsub_w(_r01, _r03));
v4f32 _tmp5m = __msa_fmadd_w(__msa_fmadd_w(_r05, _v4, _r01), _vm5, _r03);
__msa_st_w((v4i32)_tmp0m, tmp[0][m], 0);
__msa_st_w((v4i32)_tmp1m, tmp[1][m], 0);
__msa_st_w((v4i32)_tmp2m, tmp[2][m], 0);
__msa_st_w((v4i32)_tmp3m, tmp[3][m], 0);
__msa_st_w((v4i32)_tmp4m, tmp[4][m], 0);
__msa_st_w((v4i32)_tmp5m, tmp[5][m], 0);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 6 + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2;
float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3;
float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4;
float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5;
for (int m = 0; m < 6; m++)
{
v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0);
v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0);
v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0);
v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0);
v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0);
v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0);
v4f32 _r0tm0 = __msa_fmadd_w(__msa_fmadd_w(_tmp04, _v4, _tmp00), _vm5, _tmp02);
v4f32 _r0tm1 = __msa_fmadd_w(__msa_fadd_w(_tmp04, _tmp03), _vm4, __msa_fadd_w(_tmp01, _tmp02));
v4f32 _r0tm2 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp03), _v4, __msa_fsub_w(_tmp01, _tmp02));
v4f32 _r0tm3 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp02), _vm2, __msa_fsub_w(_tmp01, _tmp03));
v4f32 _r0tm4 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp02), _v2, __msa_fsub_w(_tmp01, _tmp03));
v4f32 _r0tm5 = __msa_fmadd_w(__msa_fmadd_w(_tmp05, _v4, _tmp01), _vm5, _tmp03);
__msa_st_w((v4i32)_r0tm0, r0_tm_0, 0);
__msa_st_w((v4i32)_r0tm1, r0_tm_1, 0);
__msa_st_w((v4i32)_r0tm2, r0_tm_2, 0);
__msa_st_w((v4i32)_r0tm3, r0_tm_3, 0);
__msa_st_w((v4i32)_r0tm4, r0_tm_4, 0);
__msa_st_w((v4i32)_r0tm5, r0_tm_5, 0);
r0_tm_0 += tiles * 4 * 6;
r0_tm_1 += tiles * 4 * 6;
r0_tm_2 += tiles * 4 * 6;
r0_tm_3 += tiles * 4 * 6;
r0_tm_4 += tiles * 4 * 6;
r0_tm_5 += tiles * 4 * 6;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x8
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0);
v4f32 _r8 = (v4f32)__msa_ld_w(r0 + 4 * 8, 0);
v4f32 _r9 = (v4f32)__msa_ld_w(r0 + 4 * 9, 0);
v4f32 _ra = (v4f32)__msa_ld_w(r0 + 4 * 10, 0);
v4f32 _rb = (v4f32)__msa_ld_w(r0 + 4 * 11, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8);
v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8);
v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra);
v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l);
v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0);
__msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0);
__msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 48;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x8
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 32;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x4
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 16;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x2
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4i32 _r01_0 = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01_1 = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
__msa_st_w((v4i32)_r01_0, tmpptr, 0);
__msa_st_w((v4i32)_r01_1, tmpptr + 4, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 8;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
v4f32 _val = (v4f32)__msa_ld_w(r0, 0);
__msa_st_w((v4i32)_val, tmpptr, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
v4f32 _sum2 = (v4f32)__msa_fill_w(0);
v4f32 _sum3 = (v4f32)__msa_fill_w(0);
v4f32 _sum4 = (v4f32)__msa_fill_w(0);
v4f32 _sum5 = (v4f32)__msa_fill_w(0);
v4f32 _sum6 = (v4f32)__msa_fill_w(0);
v4f32 _sum7 = (v4f32)__msa_fill_w(0);
v4f32 _sum8 = (v4f32)__msa_fill_w(0);
v4f32 _sum9 = (v4f32)__msa_fill_w(0);
v4f32 _suma = (v4f32)__msa_fill_w(0);
v4f32 _sumb = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 96);
__builtin_prefetch(k0 + 32);
v4i32 _val0123 = __msa_ld_w(r0, 0);
v4i32 _val4567 = __msa_ld_w(r0 + 4, 0);
v4i32 _val89ab = __msa_ld_w(r0 + 8, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
_sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0);
_sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0);
_sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0);
_sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0);
_sum8 = __msa_fmadd_w(_sum8, (v4f32)__msa_splati_w(_val89ab, 0), _w0);
_sum9 = __msa_fmadd_w(_sum9, (v4f32)__msa_splati_w(_val89ab, 1), _w0);
_suma = __msa_fmadd_w(_suma, (v4f32)__msa_splati_w(_val89ab, 2), _w0);
_sumb = __msa_fmadd_w(_sumb, (v4f32)__msa_splati_w(_val89ab, 3), _w0);
r0 += 12;
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
__msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0);
__msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0);
__msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0);
__msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0);
__msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0);
__msa_st_w((v4i32)_sum8, output0_tm + 4 * 8, 0);
__msa_st_w((v4i32)_sum9, output0_tm + 4 * 9, 0);
__msa_st_w((v4i32)_suma, output0_tm + 4 * 10, 0);
__msa_st_w((v4i32)_sumb, output0_tm + 4 * 11, 0);
output0_tm += 4 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
v4f32 _sum2 = (v4f32)__msa_fill_w(0);
v4f32 _sum3 = (v4f32)__msa_fill_w(0);
v4f32 _sum4 = (v4f32)__msa_fill_w(0);
v4f32 _sum5 = (v4f32)__msa_fill_w(0);
v4f32 _sum6 = (v4f32)__msa_fill_w(0);
v4f32 _sum7 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 64);
__builtin_prefetch(k0 + 32);
v4i32 _val0123 = __msa_ld_w(r0, 0);
v4i32 _val4567 = __msa_ld_w(r0 + 4, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
_sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0);
_sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0);
_sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0);
_sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0);
r0 += 8;
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
__msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0);
__msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0);
__msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0);
__msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0);
__msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0);
output0_tm += 4 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
v4f32 _sum2 = (v4f32)__msa_fill_w(0);
v4f32 _sum3 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 32);
__builtin_prefetch(k0 + 32);
v4i32 _val0123 = __msa_ld_w(r0, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
r0 += 4;
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
__msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0);
output0_tm += 4 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 16);
__builtin_prefetch(k0 + 32);
v4f32 _val0 = __msa_fill_w_f32(*r0++);
v4f32 _val1 = __msa_fill_w_f32(*r0++);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, _w0);
_sum1 = __msa_fmadd_w(_sum1, _val1, _w0);
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
output0_tm += 4 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 8);
__builtin_prefetch(k0 + 32);
v4f32 _val0 = __msa_fill_w_f32(*r0++);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum = __msa_fmadd_w(_sum, _val0, _w0);
k0 += 4;
}
__msa_st_w((v4i32)_sum, output0_tm, 0);
output0_tm += 4;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w((const float*)bias + p * 4, 0) : (v4f32)__msa_fill_w(0);
float tmp[4][6][4];
v4f32 _v2 = __msa_fill_w_f32(2.f);
v4f32 _v4 = __msa_fill_w_f32(4.f);
v4f32 _v8 = __msa_fill_w_f32(8.f);
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, elemsize, elempack);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 6 + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5;
float* output0 = out0.row<float>(i * 4) + (j * 4) * 4;
// TODO msa optimize
for (int m = 0; m < 6; m++)
{
v4f32 _out0tm0 = (v4f32)__msa_ld_w(output0_tm_0, 0);
v4f32 _out0tm1 = (v4f32)__msa_ld_w(output0_tm_1, 0);
v4f32 _out0tm2 = (v4f32)__msa_ld_w(output0_tm_2, 0);
v4f32 _out0tm3 = (v4f32)__msa_ld_w(output0_tm_3, 0);
v4f32 _out0tm4 = (v4f32)__msa_ld_w(output0_tm_4, 0);
v4f32 _out0tm5 = (v4f32)__msa_ld_w(output0_tm_5, 0);
v4f32 _tmp02a = __msa_fadd_w(_out0tm1, _out0tm2);
v4f32 _tmp13a = __msa_fsub_w(_out0tm1, _out0tm2);
v4f32 _tmp02b = __msa_fadd_w(_out0tm3, _out0tm4);
v4f32 _tmp13b = __msa_fsub_w(_out0tm3, _out0tm4);
v4f32 _tmp0m = __msa_fadd_w(__msa_fadd_w(_out0tm0, _tmp02a), _tmp02b);
v4f32 _tmp1m = __msa_fmadd_w(_tmp13a, _v2, _tmp13b);
v4f32 _tmp2m = __msa_fmadd_w(_tmp02a, _v4, _tmp02b);
v4f32 _tmp3m = __msa_fmadd_w(__msa_fadd_w(_out0tm5, _tmp13a), _v8, _tmp13b);
__msa_st_w((v4i32)_tmp0m, tmp[0][m], 0);
__msa_st_w((v4i32)_tmp1m, tmp[1][m], 0);
__msa_st_w((v4i32)_tmp2m, tmp[2][m], 0);
__msa_st_w((v4i32)_tmp3m, tmp[3][m], 0);
output0_tm_0 += tiles * 4 * 6;
output0_tm_1 += tiles * 4 * 6;
output0_tm_2 += tiles * 4 * 6;
output0_tm_3 += tiles * 4 * 6;
output0_tm_4 += tiles * 4 * 6;
output0_tm_5 += tiles * 4 * 6;
}
for (int m = 0; m < 4; m++)
{
v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0);
v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0);
v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0);
v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0);
v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0);
v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0);
v4f32 _tmp02a = __msa_fadd_w(_tmp01, _tmp02);
v4f32 _tmp13a = __msa_fsub_w(_tmp01, _tmp02);
v4f32 _tmp02b = __msa_fadd_w(_tmp03, _tmp04);
v4f32 _tmp13b = __msa_fsub_w(_tmp03, _tmp04);
v4f32 _out00 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp00, _tmp02a), _tmp02b));
v4f32 _out01 = __msa_fadd_w(_bias0, __msa_fmadd_w(_tmp13a, _v2, _tmp13b));
v4f32 _out02 = __msa_fadd_w(_bias0, __msa_fmadd_w(_tmp02a, _v4, _tmp02b));
v4f32 _out03 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fadd_w(_tmp05, _tmp13a), _v8, _tmp13b));
__msa_st_w((v4i32)_out00, output0, 0);
__msa_st_w((v4i32)_out01, output0 + 4, 0);
__msa_st_w((v4i32)_out02, output0 + 4 * 2, 0);
__msa_st_w((v4i32)_out03, output0 + 4 * 3, 0);
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
9857.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose
void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) {
int t14;
int t12;
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 1; t2 <= 500; t2 += 1) {
#pragma omp parallel for private(t4,t6,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 8)
for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 8)
for (t10 = t8; t10 <= (n - 2 < t8 + 7 ? n - 2 : t8 + 7); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 16)
for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1)
B[t6][t10][t14] = 0.125 * (A[t6 + 1][t10][t14] - 2 * A[t6][t10][t14] + A[t6 - 1][t10][t14]) + 0.125 * (A[t6][t10 + 1][t14] - 2 * A[t6][t10][t14] + A[t6][t10 - 1][t14]) + 0.125 * (A[t6][t10][t14 + 1] - 2 * A[t6][t10][t14] + A[t6][t10][t14 - 1]) + A[t6][t10][t14];
#pragma omp parallel for private(t4,t6,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 8)
for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 8)
for (t10 = t8; t10 <= (n - 2 < t8 + 7 ? n - 2 : t8 + 7); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 16)
for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1)
A[t6][t10][t14] = 0.125 * (B[t6 + 1][t10][t14] - 2 * B[t6][t10][t14] + B[t6 - 1][t10][t14]) + 0.125 * (B[t6][t10 + 1][t14] - 2 * B[t6][t10][t14] + B[t6][t10 - 1][t14]) + 0.125 * (B[t6][t10][t14 + 1] - 2 * B[t6][t10][t14] + B[t6][t10][t14 - 1]) + B[t6][t10][t14];
}
}
|
cell_division_gpu.h | // -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef INTEGRATION_CELL_DIVISION_GPU_H_
#define INTEGRATION_CELL_DIVISION_GPU_H_
#include <array>
#include "biodynamo.h"
#include "math_util.h"
namespace bdm {
// ----------------------------------------------------------------------------
// Starting with 8 cells, we let each cell grow in volume up until a point
// a cell must divide. This tests whether the GPU accelerated mechanical
// interactions properly handle the creation of new cells.
// -----------------------------------------------------------------------------
inline void ExpectArrayNear(const std::array<double, 3>& actual,
const std::array<double, 3>& expected, bool* ret) {
for (size_t i = 0; i < actual.size(); i++) {
if (std::fabs(expected[i] - actual[i]) > 1e-9) {
*ret = false;
std::cout << "Wrong result! Expected " << expected[i]
<< ", but instead got " << actual[i]
<< ", which is a difference of "
<< std::fabs(expected[i] - actual[i])
<< ", which is larger than 1e-9" << std::endl;
}
}
}
// 2. Define compile time parameter
template <typename Backend>
struct CompileTimeParam : public DefaultCompileTimeParam<Backend> {
using BiologyModules = Variant<GrowDivide>;
};
enum ExecutionMode { kCpu, kCuda, kOpenCl };
inline void RunTest(bool* result, ExecutionMode mode) {
Simulation<> simulation("cell_division_gpu");
auto* rm = simulation.GetResourceManager();
auto* param = simulation.GetParam();
rm->Clear();
auto cells = rm->template Get<Cell>();
switch (mode) {
case kCpu:
break;
case kOpenCl:
param->use_opencl_ = true;
case kCuda:
param->use_gpu_ = true;
}
// We need to give every test the same seed for the RNG, because in the cell
// division, random numbers are used. Within a single executable these numbers
// vary. Also within the threads this needs to be enforced
#pragma omp parallel
simulation.GetRandom()->SetSeed(1);
size_t cells_per_dim = 2;
auto construct = [](const std::array<double, 3>& position) {
Cell cell(position);
cell.SetDiameter(30);
cell.SetAdherence(0.4);
cell.SetMass(1.0);
cell.AddBiologyModule(GrowDivide(30.05, 5000, {gAllBmEvents}));
return cell;
};
for (size_t x = 0; x < cells_per_dim; x++) {
double x_pos = x * 20.0;
for (size_t y = 0; y < cells_per_dim; y++) {
double y_pos = y * 20.0;
for (size_t z = 0; z < cells_per_dim; z++) {
auto new_simulation_object = construct({x_pos, y_pos, z * 20.0});
cells->push_back(new_simulation_object);
}
}
}
// Run for 10 timesteps. In step 2 a division should take place. In step 3
// these new cells are instantiated
simulation.GetScheduler()->Simulate(10);
ExpectArrayNear(
(*cells)[0].GetPosition(),
{4.1399071506916413909, -5.9871942139195297727, 2.8344890446256703065},
result);
ExpectArrayNear(
(*cells)[1].GetPosition(),
{-2.4263219149482031511, -1.4202336557809887019, 29.769029317615839147},
result);
ExpectArrayNear(
(*cells)[2].GetPosition(),
{-4.9118212650644856865, 23.156656083480623209, -9.1231684411316447125},
result);
ExpectArrayNear(
(*cells)[3].GetPosition(),
{4.3076765979041251597, 15.615300607043293368, 25.657658447555828474},
result);
ExpectArrayNear(
(*cells)[4].GetPosition(),
{28.139314619772036963, -0.20987998233654170388, 4.6381417441282613012},
result);
ExpectArrayNear(
(*cells)[5].GetPosition(),
{24.417550786690171094, 3.347525366344008102, 28.067824703341415216},
result);
ExpectArrayNear(
(*cells)[6].GetPosition(),
{16.614520566718258721, 15.828015607618416638, -4.8357284569095106974},
result);
ExpectArrayNear(
(*cells)[7].GetPosition(),
{14.446017269290647889, 22.250832446808978204, 20.180438615017894932},
result);
}
inline int Simulate(int argc, const char** argv) {
bool result = true;
// TODO(ahmad): after Trello card ("Fix inconsistency in cell state due to
// direct updates in Biology Modules")
// enable multithreading, and adjust results if necessary
omp_set_num_threads(1);
// Run CPU version
RunTest(&result, kCpu);
// Run GPU (CUDA) version
RunTest(&result, kCuda);
// Run GPU (OpenCL) version
RunTest(&result, kOpenCl);
return !result;
}
} // namespace bdm
#endif // INTEGRATION_CELL_DIVISION_GPU_H_
|
GB_unop__cosh_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__cosh_fc32_fc32)
// op(A') function: GB (_unop_tran__cosh_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = ccoshf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ccoshf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = ccoshf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COSH || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__cosh_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ccoshf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ccoshf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__cosh_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
image.h | // ========================================================================== //
// Copyright (c) 2017-2018 The University of Texas at Austin. //
// All rights reserved. //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// A copy of the License is included with this software in the file LICENSE. //
// If your copy does not contain the License, you may obtain a copy of the //
// License at: //
// //
// https://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT //
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// //
// ========================================================================== //
#pragma once
#include <cstring>
#include <fstream>
#include <iostream>
#include <sstream>
#include "glm/glm.hpp"
#include "glog/logging.h"
#include "pbrt/memory.h"
#include "render/tile.h"
#include "utils/profiler_util.h"
namespace spray {
class TileImage {
public:
TileImage() : buf_(nullptr), w_(0), h_(0) {}
~TileImage() { FreeAligned(buf_); }
void resize(int w, int h) {
FreeAligned(buf_);
int tile_area = w * h;
w_ = w;
h_ = h;
buf_ = AllocAligned<glm::vec4>(tile_area);
}
void add(int pixid, const float rgb[3]) {
// buf[pixid] += glm::vec4(rgb[0], rgb[1], rgb[2], 0.f);
buf_[pixid].r += rgb[0];
buf_[pixid].g += rgb[1];
buf_[pixid].b += rgb[2];
// buf[pixid].a = 0.000001f;
}
private:
int w_, h_;
glm::vec4* buf_;
};
struct HdrImage {
HdrImage() : w(0), h(0), buf(nullptr) {}
~HdrImage() { FreeAligned(buf); }
void resize(int width, int height) {
FreeAligned(buf);
buf = AllocAligned<glm::vec4>(width * height);
w = width;
h = height;
}
int bytes() const { return w * h * sizeof(glm::vec4); }
void clear() { memset(buf, 0, w * h * sizeof(glm::vec4)); }
void clear(std::size_t size) { memset(buf, 0, size * sizeof(glm::vec4)); }
void update(int pixid, const float rgb[3], float scale) {
#ifdef SPRAY_GLOG_CHECK
CHECK_LT(pixid, w * h);
#endif
buf[pixid] =
glm::vec4(scale * rgb[0], scale * rgb[1], scale * rgb[2], 0.000001f);
}
void update(int pixid, const float rgb[3]) {
buf[pixid] = glm::vec4(rgb[0], rgb[1], rgb[2], 0.000001f);
}
void add(int pixid, const float rgb[3], float scale) {
#ifdef SPRAY_GLOG_CHECK
CHECK_LT(pixid, w * h);
#endif
buf[pixid].r += (scale * rgb[0]);
buf[pixid].g += (scale * rgb[1]);
buf[pixid].b += (scale * rgb[2]);
// buf[pixid].a = 0.000001f;
}
void add(int pixid, const float rgb[3], double scale) {
#ifdef SPRAY_GLOG_CHECK
CHECK_LT(pixid, w * h);
#endif
// buf[pixid] +=
// glm::vec4(scale * rgb[0], scale * rgb[1], scale * rgb[2], 0.f);
buf[pixid].r += (scale * rgb[0]);
buf[pixid].g += (scale * rgb[1]);
buf[pixid].b += (scale * rgb[2]);
// buf[pixid].a = 0.000001f;
}
void add(int pixid, const float rgb[3]) {
#ifdef SPRAY_GLOG_CHECK
CHECK_LT(pixid, w * h);
#endif
// buf[pixid] += glm::vec4(rgb[0], rgb[1], rgb[2], 0.f);
buf[pixid].r += rgb[0];
buf[pixid].g += rgb[1];
buf[pixid].b += rgb[2];
// buf[pixid].a = 0.000001f;
}
void scale(const Tile& tile_in, int num_samples) {
Tile tile = tile_in;
int image_w = w;
int pixid_offset, pixid;
float scale = 1.f / num_samples;
for (int y = tile.y; y < tile.y + tile.h; ++y) {
pixid_offset = y * image_w;
for (int x = tile.x; x < tile.x + tile.w; ++x) {
pixid = pixid_offset + x;
buf[pixid].r *= scale;
buf[pixid].g *= scale;
buf[pixid].b *= scale;
}
}
}
void parallelScale(const Tile& tile_in, int num_samples) {
Tile tile = tile_in;
#pragma omp for collapse(2) schedule(static, 1)
for (int y = tile.y; y < tile.y + tile.h; ++y) {
for (int x = tile.x; x < tile.x + tile.w; ++x) {
int image_w = w;
float scale = 1.f / num_samples;
int pixid_offset = y * image_w;
int pixid = pixid_offset + x;
buf[pixid].r *= scale;
buf[pixid].g *= scale;
buf[pixid].b *= scale;
}
}
}
void set(int pixid, const float rgb[3]) {
#ifdef SPRAY_GLOG_CHECK
CHECK_LT(pixid, w * h);
#endif
// buf[pixid] += glm::vec4(rgb[0], rgb[1], rgb[2], 0.f);
buf[pixid].r = rgb[0];
buf[pixid].g = rgb[1];
buf[pixid].b = rgb[2];
// buf[pixid].a = 0.000001f;
}
void set(int pixid, const glm::vec4& rgb) { buf[pixid] = rgb; }
void composite() {
int count = (w * h) << 2;
#ifdef SPRAY_TIMING
tStartMPI(TIMER_SYNC_IMAGE);
#endif
if (mpi::rank() == 0) {
MPI_Reduce(MPI_IN_PLACE, buf, count, MPI_FLOAT, MPI_SUM, 0,
MPI_COMM_WORLD);
} else {
MPI_Reduce(buf, nullptr, count, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);
}
#ifdef SPRAY_TIMING
tStop(TIMER_SYNC_IMAGE);
#endif
}
void writePpm(const char* filename) {
std::stringstream header;
header << "P3" << std::endl;
header << w << " " << h << std::endl;
header << "1023" << std::endl;
std::ofstream file;
file.open(filename, std::ios::out | std::ios::trunc);
file << header.str();
for (int y = h - 1; y > -1; --y) {
for (int x = 0; x < w; ++x) {
int i = y * w + x;
float r = glm::clamp(buf[i].r * 1023.f, 0.0f, 1023.0f);
float g = glm::clamp(buf[i].g * 1023.f, 0.0f, 1023.0f);
float b = glm::clamp(buf[i].b * 1023.f, 0.0f, 1023.0f);
file << (unsigned)r << " " << (unsigned)g << " " << (unsigned)b
<< std::endl;
}
}
file.close();
}
static void writePpm(int width, int height, const char* filename,
void* rgba) {
std::stringstream header;
header << "P3" << std::endl;
header << width << " " << height << std::endl;
header << "1023" << std::endl;
std::ofstream file;
file.open(filename, std::ios::out | std::ios::trunc);
file << header.str();
float* rgba_buf = (float*)rgba;
// for (unsigned int y = 0; y < height; ++y) {
for (int y = height - 1; y > -1; --y) {
int offset = width * y;
for (int x = 0; x < width; ++x) {
int i = (offset + x) << 2;
float r = glm::clamp(rgba_buf[i] * 1023.f, 0.f, 1023.f);
float g = glm::clamp(rgba_buf[i + 1] * 1023.f, 0.f, 1023.f);
float b = glm::clamp(rgba_buf[i + 2] * 1023.f, 0.f, 1023.f);
file << (unsigned)r << " " << (unsigned)g << " " << (unsigned)b
<< std::endl;
}
}
file.close();
}
int w;
int h;
glm::vec4* buf;
};
} // namespace spray
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.