source
stringlengths
3
92
c
stringlengths
26
2.25M
subopt.c
/* * suboptimal folding - Stefan Wuchty, Walter Fontana & Ivo Hofacker * * Vienna RNA package */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <ctype.h> #include <string.h> #include <math.h> #include "ViennaRNA/fold.h" #include "ViennaRNA/constraints/hard.h" #include "ViennaRNA/constraints/soft.h" #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/utils/strings.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/datastructures/lists.h" #include "ViennaRNA/eval.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/cofold.h" #include "ViennaRNA/gquad.h" #include "ViennaRNA/alphabet.h" #include "ViennaRNA/subopt.h" /* hack */ #include "ViennaRNA/color_output.inc" #ifdef _OPENMP #include <omp.h> #endif #define true 1 #define false 0 #ifndef ON_SAME_STRAND #define ON_SAME_STRAND(I, J, C) (((I) >= (C)) || ((J) < (C))) #endif /** * @brief Sequence interval stack element used in subopt.c */ typedef struct INTERVAL { int i; int j; int array_flag; } INTERVAL; typedef struct { char *structure; LIST *Intervals; int partial_energy; int is_duplex; /* int best_energy; */ /* best attainable energy */ } STATE; typedef struct { LIST *Intervals; LIST *Stack; int nopush; } subopt_env; struct old_subopt_dat { unsigned long max_sol; unsigned long n_sol; SOLUTION *SolutionList; FILE *fp; int cp; }; /* ################################# # GLOBAL VARIABLES # ################################# */ PUBLIC int subopt_sorted = 0; /* output sorted by energy */ PUBLIC int density_of_states[MAXDOS + 1]; PUBLIC double print_energy = 9999; /* printing threshold for use with logML */ /* ################################# # PRIVATE VARIABLES # ################################# */ /* some backward compatibility stuff */ PRIVATE int backward_compat = 0; PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL; #ifdef _OPENMP #pragma omp threadprivate(backward_compat_compound, backward_compat) #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY PRIVATE SOLUTION * wrap_subopt(char *seq, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp); #endif PRIVATE void make_pair(int i, int j, STATE *state); /* mark a gquadruplex in the resulting dot-bracket structure */ PRIVATE void make_gquad(int i, int L, int l[3], STATE *state); PRIVATE INTERVAL * make_interval(int i, int j, int ml); PRIVATE STATE * make_state(LIST *Intervals, char *structure, int partial_energy, int is_duplex, int length); PRIVATE STATE * copy_state(STATE *state); PRIVATE void print_state(STATE *state); PRIVATE void UNUSED print_stack(LIST *list); PRIVATE LIST * make_list(void); PRIVATE void push(LIST *list, void *data); PRIVATE void *pop(LIST *list); PRIVATE int best_attainable_energy(vrna_fold_compound_t *vc, STATE *state); PRIVATE void scan_interval(vrna_fold_compound_t *vc, int i, int j, int array_flag, int threshold, STATE *state, subopt_env *env); PRIVATE void free_interval_node(INTERVAL *node); PRIVATE void free_state_node(STATE *node); PRIVATE void push_back(LIST *Stack, STATE *state); PRIVATE char * get_structure(STATE *state); PRIVATE int compare(const void *solution1, const void *solution2); PRIVATE int compare_en(const void *solution1, const void *solution2); PRIVATE void make_output(SOLUTION *SL, int cp, FILE *fp); PRIVATE void repeat(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env); PRIVATE void repeat_gquad(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env); PRIVATE void old_subopt_print(const char *structure, float energy, void *data); PRIVATE void old_subopt_store(const char *structure, float energy, void *data); PRIVATE void old_subopt_store_compressed(const char *structure, float energy, void *data); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ /*---------------------------------------------------------------------------*/ /*List routines--------------------------------------------------------------*/ /*---------------------------------------------------------------------------*/ PRIVATE void make_pair(int i, int j, STATE *state) { state->structure[i - 1] = '('; state->structure[j - 1] = ')'; } PRIVATE void make_gquad(int i, int L, int l[3], STATE *state) { int x; for (x = 0; x < L; x++) { state->structure[i - 1 + x] = '+'; state->structure[i - 1 + x + L + l[0]] = '+'; state->structure[i - 1 + x + 2 * L + l[0] + l[1]] = '+'; state->structure[i - 1 + x + 3 * L + l[0] + l[1] + l[2]] = '+'; } } /*---------------------------------------------------------------------------*/ PRIVATE INTERVAL * make_interval(int i, int j, int array_flag) { INTERVAL *interval; interval = lst_newnode(sizeof(INTERVAL)); interval->i = i; interval->j = j; interval->array_flag = array_flag; return interval; } /*---------------------------------------------------------------------------*/ PRIVATE void free_interval_node(INTERVAL *node) { lst_freenode(node); } /*---------------------------------------------------------------------------*/ PRIVATE void free_state_node(STATE *node) { free(node->structure); if (node->Intervals) lst_kill(node->Intervals, lst_freenode); lst_freenode(node); } /*---------------------------------------------------------------------------*/ PRIVATE STATE * make_state(LIST *Intervals, char *structure, int partial_energy, int is_duplex, int length) { STATE *state; state = lst_newnode(sizeof(STATE)); if (Intervals) state->Intervals = Intervals; else state->Intervals = lst_init(); if (structure) { state->structure = structure; } else { int i; state->structure = (char *)vrna_alloc(length + 1); for (i = 0; i < length; i++) state->structure[i] = '.'; } state->partial_energy = partial_energy; return state; } /*---------------------------------------------------------------------------*/ PRIVATE STATE * copy_state(STATE *state) { STATE *new_state; void *after; INTERVAL *new_interval, *next; new_state = lst_newnode(sizeof(STATE)); new_state->Intervals = lst_init(); new_state->partial_energy = state->partial_energy; /* new_state->best_energy = state->best_energy; */ if (state->Intervals->count) { after = LST_HEAD(new_state->Intervals); for (next = lst_first(state->Intervals); next; next = lst_next(next)) { new_interval = lst_newnode(sizeof(INTERVAL)); *new_interval = *next; lst_insertafter(new_state->Intervals, new_interval, after); after = new_interval; } } new_state->structure = strdup(state->structure); if (!new_state->structure) vrna_message_error("out of memory"); return new_state; } /*---------------------------------------------------------------------------*/ /*@unused @*/ PRIVATE void print_state(STATE *state) { INTERVAL *next; if (state->Intervals->count) { printf("%d intervals:\n", state->Intervals->count); for (next = lst_first(state->Intervals); next; next = lst_next(next)) printf("[%d,%d],%d ", next->i, next->j, next->array_flag); printf("\n"); } printf("partial structure: %s\n", state->structure); printf("\n"); printf(" partial_energy: %d\n", state->partial_energy); /* printf(" best_energy: %d\n", state->best_energy); */ (void)fflush(stdout); } /*---------------------------------------------------------------------------*/ /*@unused @*/ PRIVATE void print_stack(LIST *list) { void *rec; printf("================\n"); printf("%d states\n", list->count); for (rec = lst_first(list); rec; rec = lst_next(rec)) { printf("state-----------\n"); print_state(rec); } printf("================\n"); } /*---------------------------------------------------------------------------*/ PRIVATE LIST * make_list(void) { return lst_init(); } /*---------------------------------------------------------------------------*/ PRIVATE void push(LIST *list, void *data) { lst_insertafter(list, data, LST_HEAD(list)); } /* PRIVATE void */ /* push_stack(STATE *state) { */ /* keep the stack sorted by energy */ /* STATE *after, *next; */ /* nopush = false; */ /* next = after = LST_HEAD(Stack); */ /* while ( next = lst_next(next)) { */ /* if ( next->best_energy >= state->best_energy ) break; */ /* after = next; */ /* } */ /* lst_insertafter(Stack, state, after); */ /* } */ /*---------------------------------------------------------------------------*/ PRIVATE void * pop(LIST *list) { void *data; data = lst_deletenext(list, LST_HEAD(list)); return data; } /*---------------------------------------------------------------------------*/ /*auxiliary routines---------------------------------------------------------*/ /*---------------------------------------------------------------------------*/ PRIVATE int best_attainable_energy(vrna_fold_compound_t *vc, STATE *state) { /* evaluation of best possible energy attainable within remaining intervals */ register int sum; INTERVAL *next; vrna_md_t *md; vrna_mx_mfe_t *matrices; int *indx; md = &(vc->params->model_details); matrices = vc->matrices; indx = vc->jindx; sum = state->partial_energy; /* energy of already found elements */ for (next = lst_first(state->Intervals); next; next = lst_next(next)) { if (next->array_flag == 0) sum += (md->circ) ? matrices->Fc : matrices->f5[next->j]; else if (next->array_flag == 1) sum += matrices->fML[indx[next->j] + next->i]; else if (next->array_flag == 2) sum += matrices->c[indx[next->j] + next->i]; else if (next->array_flag == 3) sum += matrices->fM1[indx[next->j] + next->i]; else if (next->array_flag == 4) sum += matrices->fc[next->i]; else if (next->array_flag == 5) sum += matrices->fc[next->j]; else if (next->array_flag == 6) sum += matrices->ggg[indx[next->j] + next->i]; } return sum; } /*---------------------------------------------------------------------------*/ PRIVATE void push_back(LIST *Stack, STATE *state) { push(Stack, copy_state(state)); return; } /*---------------------------------------------------------------------------*/ PRIVATE char * get_structure(STATE *state) { char *structure; structure = strdup(state->structure); return structure; } /*---------------------------------------------------------------------------*/ PRIVATE int compare(const void *solution1, const void *solution2) { if (((SOLUTION *)solution1)->energy > ((SOLUTION *)solution2)->energy) return 1; if (((SOLUTION *)solution1)->energy < ((SOLUTION *)solution2)->energy) return -1; return strcmp(((SOLUTION *)solution1)->structure, ((SOLUTION *)solution2)->structure); } PRIVATE int compare_en(const void *solution1, const void *solution2) { if (((SOLUTION *)solution1)->energy > ((SOLUTION *)solution2)->energy) return 1; if (((SOLUTION *)solution1)->energy < ((SOLUTION *)solution2)->energy) return -1; return 0; } /*---------------------------------------------------------------------------*/ PRIVATE void make_output(SOLUTION *SL, int cp, FILE *fp) /* prints stuff */ { SOLUTION *sol; for (sol = SL; sol->structure != NULL; sol++) { char *e_string = vrna_strdup_printf(" %6.2f", sol->energy); char *ss = vrna_db_unpack(sol->structure); char *s = vrna_cut_point_insert(ss, cp); print_structure(fp, s, e_string); free(s); free(ss); free(e_string); } } PRIVATE STATE * derive_new_state(int i, int j, STATE *s, int e, int flag) { STATE *s_new = copy_state(s); INTERVAL *ival = make_interval(i, j, flag); push(s_new->Intervals, ival); s_new->partial_energy += e; return s_new; } PRIVATE void fork_state(int i, int j, STATE *s, int e, int flag, subopt_env *env) { STATE *s_new = derive_new_state(i, j, s, e, flag); push(env->Stack, s_new); env->nopush = false; } PRIVATE void fork_int_state(int i, int j, int p, int q, STATE *s, int e, subopt_env *env) { STATE *s_new = derive_new_state(p, q, s, e, 2); make_pair(i, j, s_new); make_pair(p, q, s_new); push(env->Stack, s_new); env->nopush = false; } PRIVATE void fork_state_pair(int i, int j, STATE *s, int e, subopt_env *env) { STATE *new_state; new_state = copy_state(s); make_pair(i, j, new_state); new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } PRIVATE void fork_two_states_pair(int i, int j, int k, STATE *s, int e, int flag1, int flag2, subopt_env *env) { INTERVAL *interval1, *interval2; STATE *new_state; new_state = copy_state(s); interval1 = make_interval(i + 1, k - 1, flag1); interval2 = make_interval(k, j - 1, flag2); if (k - i < j - k) { /* push larger interval first */ push(new_state->Intervals, interval1); push(new_state->Intervals, interval2); } else { push(new_state->Intervals, interval2); push(new_state->Intervals, interval1); } make_pair(i, j, new_state); new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } PRIVATE void fork_two_states(int i, int j, int p, int q, STATE *s, int e, int flag1, int flag2, subopt_env *env) { INTERVAL *interval1, *interval2; STATE *new_state; new_state = copy_state(s); interval1 = make_interval(i, j, flag1); interval2 = make_interval(p, q, flag2); if ((j - i) < (q - p)) { push(new_state->Intervals, interval1); push(new_state->Intervals, interval2); } else { push(new_state->Intervals, interval2); push(new_state->Intervals, interval1); } new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } /*---------------------------------------------------------------------------*/ /* start of subopt backtracking ---------------------------------------------*/ /*---------------------------------------------------------------------------*/ PUBLIC SOLUTION * vrna_subopt(vrna_fold_compound_t *vc, int delta, int sorted, FILE *fp) { struct old_subopt_dat data; vrna_subopt_callback *cb; data.SolutionList = NULL; data.max_sol = 128; data.n_sol = 0; data.fp = fp; data.cp = vc->cutpoint; if (vc) { /* SolutionList stores the suboptimal structures found */ data.SolutionList = (SOLUTION *)vrna_alloc(data.max_sol * sizeof(SOLUTION)); /* end initialize ------------------------------------------------------- */ if (fp) { float min_en; char *SeQ, *energies = NULL; if (vc->strands > 1) min_en = vrna_mfe_dimer(vc, NULL); else min_en = vrna_mfe(vc, NULL); SeQ = vrna_cut_point_insert(vc->sequence, vc->cutpoint); energies = vrna_strdup_printf(" %6.2f %6.2f", min_en, (float)delta / 100.); print_structure(fp, SeQ, energies); free(SeQ); free(energies); vrna_mx_mfe_free(vc); } cb = old_subopt_store; if (fp) cb = (sorted) ? old_subopt_store_compressed : old_subopt_print; /* call subopt() */ vrna_subopt_cb(vc, delta, cb, (void *)&data); if (sorted) { /* sort structures by energy */ if (data.n_sol > 0) { int (*compare_fun)(const void *a, const void *b); switch (sorted) { case VRNA_SORT_BY_ENERGY_ASC: compare_fun = compare_en; break; default: /* a.k.a. VRNA_SORT_BY_ENERGY_LEXICOGRAPHIC_ASC */ compare_fun = compare; break; } qsort(data.SolutionList, data.n_sol - 1, sizeof(SOLUTION), compare_fun); } if (fp) make_output(data.SolutionList, vc->cutpoint, fp); } if (fp) { /* we've printed everything -- free solutions */ SOLUTION *sol; for (sol = data.SolutionList; sol->structure != NULL; sol++) free(sol->structure); free(data.SolutionList); data.SolutionList = NULL; } } return data.SolutionList; } PUBLIC void vrna_subopt_cb(vrna_fold_compound_t *vc, int delta, vrna_subopt_callback *cb, void *data) { subopt_env *env; STATE *state; INTERVAL *interval; unsigned int *so, *ss, *se; int maxlevel, count, partial_energy, old_dangles, logML, dangle_model, length, circular, threshold; double structure_energy, min_en, eprint; char *struc, *structure; float correction; vrna_param_t *P; vrna_md_t *md; int minimal_energy; int Fc; int *f5; vrna_fold_compound_prepare(vc, VRNA_OPTION_MFE | VRNA_OPTION_HYBRID); length = vc->length; so = vc->strand_order; ss = vc->strand_start; se = vc->strand_end; P = vc->params; md = &(P->model_details); /* do mfe folding to get fill arrays and get ground state energy */ /* in case dangles is neither 0 or 2, set dangles=2 while folding */ circular = md->circ; logML = md->logML; old_dangles = dangle_model = md->dangles; if (md->uniq_ML != 1) /* failsafe mechanism to enforce valid fM1 array */ md->uniq_ML = 1; /* temporarily set dangles to 2 if necessary */ if ((md->dangles != 0) && (md->dangles != 2)) md->dangles = 2; struc = (char *)vrna_alloc(sizeof(char) * (length + 1)); if (circular) { min_en = vrna_mfe(vc, struc); Fc = vc->matrices->Fc; f5 = vc->matrices->f5; /* restore dangle model */ md->dangles = old_dangles; /* re-evaluate in case we're using logML etc */ min_en = vrna_eval_structure(vc, struc); } else { min_en = vrna_mfe_dimer(vc, struc); f5 = vc->matrices->f5; /* restore dangle model */ md->dangles = old_dangles; /* re-evaluate in case we're using logML etc */ min_en = vrna_eval_structure(vc, struc); } free(struc); eprint = print_energy + min_en; correction = (min_en < 0) ? -0.1 : 0.1; /* Initialize ------------------------------------------------------------ */ maxlevel = 0; count = 0; partial_energy = 0; /* Initialize the stack ------------------------------------------------- */ minimal_energy = (circular) ? Fc : f5[length]; threshold = minimal_energy + delta; if (threshold >= INF) { vrna_message_warning("Energy range too high, limiting to reasonable value"); threshold = INF - EMAX; } /* init env data structure */ env = (subopt_env *)vrna_alloc(sizeof(subopt_env)); env->Stack = NULL; env->nopush = true; env->Stack = make_list(); /* anchor */ env->Intervals = make_list(); /* initial state: */ interval = make_interval(1, length, 0); /* interval [1,length,0] */ push(env->Intervals, interval); env->nopush = false; state = make_state(env->Intervals, NULL, partial_energy, 0, length); /* state->best_energy = minimal_energy; */ push(env->Stack, state); env->nopush = false; /* end initialize ------------------------------------------------------- */ while (1) { /* forever, til nothing remains on stack */ maxlevel = (env->Stack->count > maxlevel ? env->Stack->count : maxlevel); if (LST_EMPTY(env->Stack)) { /* we are done! clean up and quit */ /* fprintf(stderr, "maxlevel: %d\n", maxlevel); */ lst_kill(env->Stack, free_state_node); cb(NULL, 0, data); /* NULL (last time to call callback function */ break; } /* pop the last element ---------------------------------------------- */ state = pop(env->Stack); /* current state to work with */ if (LST_EMPTY(state->Intervals)) { int e; /* state has no intervals left: we got a solution */ count++; structure = get_structure(state); structure_energy = state->partial_energy / 100.; #ifdef CHECK_ENERGY structure_energy = vrna_eval_structure(vc, structure); if (!logML) if ((double)(state->partial_energy / 100.) != structure_energy) { vrna_message_error("%s %6.2f %6.2f", structure, state->partial_energy / 100., structure_energy); exit(1); } #endif if (logML || (dangle_model == 1) || (dangle_model == 3)) /* recalc energy */ structure_energy = vrna_eval_structure(vc, structure); e = (int)((structure_energy - min_en) * 10. - correction); /* avoid rounding errors */ if (e > MAXDOS) e = MAXDOS; density_of_states[e]++; if (structure_energy <= eprint) { char *outstruct = vrna_cut_point_insert(structure, (vc->strands > 1) ? ss[so[1]] : -1); cb((const char *)outstruct, structure_energy, data); free(outstruct); } free(structure); } else { /* get (and remove) next interval of state to analyze */ interval = pop(state->Intervals); scan_interval(vc, interval->i, interval->j, interval->array_flag, threshold, state, env); free_interval_node(interval); /* free the current interval */ } free_state_node(state); /* free the current state */ } /* end of while (1) */ /* cleanup memory */ free(env); } PRIVATE void scan_interval(vrna_fold_compound_t *vc, int i, int j, int array_flag, int threshold, STATE *state, subopt_env *env) { /* real backtrack routine */ /* array_flag = 0: trace back in f5-array */ /* array_flag = 1: trace back in fML-array */ /* array_flag = 2: trace back in repeat() */ /* array_flag = 3: trace back in fM1-array */ STATE *new_state, *temp_state; INTERVAL *new_interval; vrna_param_t *P; vrna_md_t *md; register int k, fi, cij, ij; register int type; register int dangle_model; register int noLP; unsigned int *sn, *so, *ss, *se; int element_energy, best_energy; int *fc, *f5, *c, *fML, *fM1, *ggg; int FcH, FcI, FcM, *fM2; int length, *indx, *rtype, circular, with_gquad, turn; char *ptype; short *S1; unsigned char *hard_constraints, hc_decompose; vrna_hc_t *hc; vrna_sc_t *sc; length = vc->length; sn = vc->strand_number; so = vc->strand_order; ss = vc->strand_start; se = vc->strand_end; indx = vc->jindx; ptype = vc->ptype; S1 = vc->sequence_encoding; P = vc->params; md = &(P->model_details); rtype = &(md->rtype[0]); dangle_model = md->dangles; noLP = md->noLP; circular = md->circ; with_gquad = md->gquad; turn = md->min_loop_size; fc = vc->matrices->fc; f5 = vc->matrices->f5; c = vc->matrices->c; fML = vc->matrices->fML; fM1 = vc->matrices->fM1; ggg = vc->matrices->ggg; FcH = vc->matrices->FcH; FcI = vc->matrices->FcI; FcM = vc->matrices->FcM; fM2 = vc->matrices->fM2; hc = vc->hc; hard_constraints = hc->mx; sc = vc->sc; best_energy = best_attainable_energy(vc, state); /* .. on remaining intervals */ env->nopush = true; if ((i > 1) && (!array_flag)) vrna_message_error("Error while backtracking!"); if ((j < i + turn + 1) && ((sn[i] == so[1]) || (sn[j] == so[0]))) { /* minimal structure element */ if (array_flag == 0) /* do not forget to add f5[j], since it may contain pseudo energies from soft constraining */ state->partial_energy += f5[j]; if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } ij = indx[j] + i; /* 13131313131313131313131313131313131313131313131313131313131313131313131 */ if (array_flag == 3 || array_flag == 1) { /* array_flag = 3: interval i,j was generated during */ /* a multiloop decomposition using array fM1 in repeat() */ /* or in this block */ /* array_flag = 1: interval i,j was generated from a */ /* stack, bulge, or internal loop in repeat() */ /* or in this block */ if ((hc->up_ml[j]) && (((array_flag == 3) && (fM1[indx[j - 1] + i] != INF)) || (fML[indx[j - 1] + i] != INF))) { if (array_flag == 3) fi = fM1[indx[j - 1] + i] + P->MLbase; else fi = fML[indx[j - 1] + i] + P->MLbase; if (sc) { if (sc->energy_up) fi += sc->energy_up[j][1]; if (sc->f) fi += sc->f(i, j, i, j - 1, VRNA_DECOMP_ML_ML, sc->data); } if ((fi + best_energy <= threshold) && (sn[j - 1] == sn[j])) { /* no basepair, nibbling of 3'-end */ if ((sc) && (sc->energy_up)) fork_state(i, j - 1, state, P->MLbase + sc->energy_up[j][1], array_flag, env); else fork_state(i, j - 1, state, P->MLbase, array_flag, env); } } hc_decompose = hard_constraints[length * i + j]; if (hc_decompose & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) { /* i,j may pair */ cij = c[ij]; if (cij != INF) { type = vrna_get_ptype(ij, ptype); switch (dangle_model) { case 0: element_energy = E_MLstem(type, -1, -1, P); break; default: element_energy = E_MLstem(type, (((i > 1) && (sn[i - 1] == sn[i])) || circular) ? S1[i - 1] : -1, (((j < length) && (sn[j] == sn[j + 1])) || circular) ? S1[j + 1] : -1, P); break; } if (sc) { if (sc->f) element_energy += sc->f(i, j, i, j, VRNA_DECOMP_ML_STEM, sc->data); } cij += element_energy; if (cij + best_energy <= threshold) repeat(vc, i, j, state, element_energy, 0, best_energy, threshold, env); } } else if ((with_gquad) && (ggg[ij] != INF)) { element_energy = E_MLstem(0, -1, -1, P); cij = ggg[ij] + element_energy; if (cij + best_energy <= threshold) repeat_gquad(vc, i, j, state, element_energy, 0, best_energy, threshold, env); } } /* array_flag == 3 || array_flag == 1 */ /* 11111111111111111111111111111111111111111111111111111111111111111111111 */ if (array_flag == 1) { /* array_flag = 1: interval i,j was generated from a */ /* stack, bulge, or internal loop in repeat() */ /* or in this block */ int stopp, k1j; if ((sn[i - 1] == sn[i]) && (sn[j] == sn[j + 1])) { /*backtrack in FML only if multiloop is possible*/ for (k = i + turn + 1; k <= j - 1 - turn; k++) { /* Multiloop decomposition if i,j contains more than 1 stack */ if ((with_gquad) && (sn[k] == sn[k + 1]) && (fML[indx[k] + i] != INF) && (ggg[indx[j] + k + 1] != INF)) { element_energy = E_MLstem(0, -1, -1, P); if (fML[indx[k] + i] + ggg[indx[j] + k + 1] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(i, k, state, 0, array_flag); env->nopush = false; repeat_gquad(vc, k + 1, j, temp_state, element_energy, fML[indx[k] + i], best_energy, threshold, env); free_state_node(temp_state); } } k1j = indx[j] + k + 1; if ((hard_constraints[length * j + k + 1] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) && (fML[indx[k] + i] != INF) && (c[k1j] != INF)) { short s5, s3; type = vrna_get_ptype(k1j, ptype); switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (sn[i - 1] == sn[i]) ? S1[k] : -1; s3 = (sn[j] == sn[j + 1]) ? S1[j + 1] : -1; break; } element_energy = E_MLstem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, j, k, k + 1, VRNA_DECOMP_ML_ML_STEM, sc->data); } if (sn[k] == sn[k + 1]) { if (fML[indx[k] + i] + c[k1j] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(i, k, state, 0, array_flag); env->nopush = false; repeat(vc, k + 1, j, temp_state, element_energy, fML[indx[k] + i], best_energy, threshold, env); free_state_node(temp_state); } } } } } if (vc->strands > 1) { stopp = se[so[0]] - 1; /*if cp -1: k on cut, => no ml*/ stopp = MIN2(stopp, j - 1 - turn); if (i > ss[so[1]]) stopp = j - 1 - turn; else if (i == ss[so[1]]) stopp = 0; /*not a multi loop*/ } else { stopp = j - 1 - turn; } int up = 1; for (k = i; k <= stopp; k++, up++) { if (hc->up_ml[i] >= up) { k1j = indx[j] + k + 1; /* Multiloop decomposition if i,j contains only 1 stack */ if ((with_gquad) && (ggg[k1j] != INF)) { element_energy = E_MLstem(0, -1, -1, P) + P->MLbase * up; if (sc) if (sc->energy_up) element_energy += sc->energy_up[i][up]; if (ggg[k1j] + element_energy + best_energy <= threshold) repeat_gquad(vc, k + 1, j, state, element_energy, 0, best_energy, threshold, env); } if ((hard_constraints[length * j + k + 1] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) && (c[k1j] != INF)) { int s5, s3; type = vrna_get_ptype(k1j, ptype); switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (sn[k - 1] == sn[k]) ? S1[k] : -1; s3 = (sn[j] == sn[j + 1]) ? S1[j + 1] : -1; break; } element_energy = E_MLstem(type, s5, s3, P); element_energy += P->MLbase * up; if (sc) { if (sc->energy_up) element_energy += sc->energy_up[i][up]; if (sc->f) element_energy += sc->f(i, j, k + 1, j, VRNA_DECOMP_ML_STEM, sc->data); } if (c[k1j] + element_energy + best_energy <= threshold) repeat(vc, k + 1, j, state, element_energy, 0, best_energy, threshold, env); } } } } /* array_flag == 1 */ /* 22222222222222222222222222222222222222222222222222 */ /* */ /* array_flag = 2: interval i,j was generated from a */ /* stack, bulge, or internal loop in repeat() */ /* */ /* 22222222222222222222222222222222222222222222222222 */ if (array_flag == 2) { repeat(vc, i, j, state, 0, 0, best_energy, threshold, env); if (env->nopush) if (!noLP) vrna_message_warning("%d,%d\nOops, no solution in repeat!", i, j); return; } /* 00000000000000000000000000000000000000000000000000 */ /* */ /* array_flag = 0: interval i,j was found while */ /* tracing back through f5-array and c-array */ /* or within this block */ /* */ /* 00000000000000000000000000000000000000000000000000 */ if ((array_flag == 0) && !circular) { int s5, s3, kj, tmp_en; if ((hc->up_ext[j]) && (f5[j - 1] != INF)) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[j][1]; if (sc->f) tmp_en += sc->f(1, j, 1, j - 1, VRNA_DECOMP_EXT_EXT, sc->data); } if (f5[j - 1] + tmp_en + best_energy <= threshold) /* no basepair, nibbling of 3'-end */ fork_state(i, j - 1, state, tmp_en, 0, env); } for (k = j - turn - 1; k > 1; k--) { kj = indx[j] + k; if ((with_gquad) && (sn[k] == sn[j]) && (f5[k - 1] != INF) && (ggg[kj] != INF)) { element_energy = 0; if (f5[k - 1] + ggg[kj] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(1, k - 1, state, 0, 0); env->nopush = false; /* backtrace the quadruplex */ repeat_gquad(vc, k, j, temp_state, element_energy, f5[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } if ((hard_constraints[length * j + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (f5[k - 1] != INF) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); /* k and j pair */ switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (sn[k - 1] == sn[k]) ? S1[k - 1] : -1; s3 = ((j < length) && (sn[j] == sn[j + 1])) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sn[k] != sn[j]) /*&&(state->is_duplex==0))*/ element_energy += P->DuplexInit; /*state->is_duplex=1;*/ if (sc) { if (sc->f) element_energy += sc->f(1, j, k - 1, k, VRNA_DECOMP_EXT_EXT_STEM, sc->data); } if (f5[k - 1] + c[kj] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(1, k - 1, state, 0, 0); env->nopush = false; repeat(vc, k, j, temp_state, element_energy, f5[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } } kj = indx[j] + 1; if ((with_gquad) && (sn[k] == sn[j]) && (ggg[kj] != INF)) { element_energy = 0; if (ggg[kj] + element_energy + best_energy <= threshold) /* backtrace the quadruplex */ repeat_gquad(vc, 1, j, state, element_energy, 0, best_energy, threshold, env); } if ((hard_constraints[length + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); s5 = -1; switch (dangle_model) { case 0: s3 = -1; break; default: s3 = (j < length) && (sn[j] == sn[j + 1]) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sn[1] != sn[j]) element_energy += P->DuplexInit; if (sc) { if (sc->f) element_energy += sc->f(1, j, 1, j, VRNA_DECOMP_EXT_STEM, sc->data); } if (c[kj] + element_energy + best_energy <= threshold) repeat(vc, 1, j, state, element_energy, 0, best_energy, threshold, env); } } /* end array_flag == 0 && !circular*/ /* or do we subopt circular? */ else if (array_flag == 0) { int k, l, p, q, tmp_en; /* if we've done everything right, we will never reach this case more than once */ /* right after the initilization of the stack with ([1,n], empty, 0) */ /* lets check, if we can have an open chain without breaking the threshold */ /* this is an ugly work-arround cause in case of an open chain we do not have to */ /* backtrack anything further... */ if (hc->up_ext[1] >= length) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[1][length]; if (sc->f) tmp_en += sc->f(1, j, 1, j, VRNA_DECOMP_EXT_UP, sc->data); } if (tmp_en <= threshold) { new_state = derive_new_state(1, 2, state, 0, 0); new_state->partial_energy = 0; push(env->Stack, new_state); env->nopush = false; } } /* ok, lets check if we can do an exterior hairpin without breaking the threshold */ /* best energy should be 0 if we are here */ if (FcH + best_energy <= threshold) { /* lets search for all exterior hairpin cases, that fit into our threshold barrier */ /* we use index k,l to avoid confusion with i,j index of our state... */ /* if we reach here, i should be 1 and j should be n respectively */ for (k = i; k < j; k++) { if (hc->up_hp[1] < k) break; for (l = j; l >= k + turn + 1; l--) { int kl, tmpE; kl = indx[l] + k; if (c[kl] != INF) { tmpE = vrna_E_hp_loop(vc, l, k); if (c[kl] + tmpE + best_energy <= threshold) { /* what we really have to do is something like this, isn't it? */ /* we have to create a new state, with interval [k,l], then we */ /* add our loop energy as initial energy of this state and put */ /* the state onto the stack R... for further refinement... */ /* we also denote this new interval to be scanned in C */ fork_state(k, l, state, tmpE, 2, env); } } } } } /* now lets see, if we can do an exterior interior loop without breaking the threshold */ if (FcI + best_energy <= threshold) { /* now we search for our exterior interior loop possibilities */ for (k = i; k < j; k++) { for (l = j; l >= k + turn + 1; l--) { int kl, type, tmpE; kl = indx[l] + k; /* just confusing these indices ;-) */ if ((hard_constraints[length * k + l] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) && (c[kl] != INF)) { type = rtype[vrna_get_ptype(kl, ptype)]; for (p = l + 1; p < j; p++) { int u1, qmin; u1 = p - l - 1; if (u1 + k - 1 > MAXLOOP) break; if (hc->up_int[l + 1] < u1) break; qmin = u1 + k - 1 + j - MAXLOOP; if (qmin < p + turn + 1) qmin = p + turn + 1; for (q = j; q >= qmin; q--) { int u2, type_2; if (hc->up_int[q + 1] < (j - q + k - 1)) break; if ((hard_constraints[length * p + q] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) && (c[indx[q] + p] != INF)) { type_2 = rtype[vrna_get_ptype(indx[q] + p, ptype)]; u2 = k - 1 + j - q; if (u1 + u2 > MAXLOOP) continue; tmpE = E_IntLoop(u1, u2, type, type_2, S1[l + 1], S1[k - 1], S1[p - 1], S1[q + 1], P); if (sc) { if (sc->energy_up) tmpE += sc->energy_up[l + 1][p - l - 1] + sc->energy_up[q + 1][j - q] + sc->energy_up[1][k - 1]; if (sc->energy_stack) { if (u1 + u2 == 0) { tmpE += sc->energy_stack[k] + sc->energy_stack[l] + sc->energy_stack[p] + sc->energy_stack[q]; } } } if (c[kl] + c[indx[q] + p] + tmpE + best_energy <= threshold) { /* ok, similar to the hairpin stuff, we add new states onto the stack R */ /* but in contrast to the hairpin decomposition, we have to add two new */ /* intervals, enclosed by k,l and p,q respectively and we also have to */ /* add the partial energy, that comes from the exterior interior loop */ fork_two_states(k, l, p, q, state, tmpE, 2, 2, env); } } } } } } } } /* and last but not least, we have a look, if we can do an exterior multiloop within the energy threshold */ if (FcM <= threshold) { /* this decomposition will be somehow more complicated...so lets see what we do here... */ /* first we want to find out which split inidices we can use without exceeding the threshold */ int tmpE2; for (k = turn + 1; k < j - 2 * turn; k++) { if ((fML[indx[k] + 1] != INF) && (fM2[k + 1] != INF)) { tmpE2 = fML[indx[k] + 1] + fM2[k + 1] + P->MLclosing; if (tmpE2 + best_energy <= threshold) { /* grmpfh, we have found a possible split index k so we have to split fM2 and fML now */ /* lets do it first in fM2 anyway */ for (l = k + turn + 2; l < j - turn - 1; l++) { tmpE2 = fM1[indx[l] + k + 1] + fM1[indx[j] + l + 1]; if (tmpE2 + fML[indx[k] + 1] + P->MLclosing <= threshold) { /* we've (hopefully) found a valid decomposition of fM2 and therefor we have all */ /* three intervals for our new state to be pushed on stack R */ new_state = copy_state(state); /* first interval leads for search in fML array */ new_interval = make_interval(1, k, 1); push(new_state->Intervals, new_interval); env->nopush = false; /* next, we have the first interval that has to be traced in fM1 */ new_interval = make_interval(k + 1, l, 3); push(new_state->Intervals, new_interval); env->nopush = false; /* and the last of our three intervals is also one to be traced within fM1 array... */ new_interval = make_interval(l + 1, j, 3); push(new_state->Intervals, new_interval); env->nopush = false; /* mmh, we add the energy for closing the multiloop now... */ new_state->partial_energy += P->MLclosing; /* next we push our state onto the R stack */ push(env->Stack, new_state); env->nopush = false; } /* else we search further... */ } /* ok, we have to decompose fML now... */ } } } } } /* thats all folks for the circular case... */ /* 44444444444444444444444444444444444444444444444444 */ /* */ /* array_flag = 4: interval i,j was found while */ /* tracing back through fc-array smaller than than cp */ /* or within this block */ /* */ /* 44444444444444444444444444444444444444444444444444 */ if (array_flag == 4) { int ik, s5, s3, tmp_en; if ((hc->up_ext[i]) && (fc[i + 1] != INF)) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[i][1]; if (sc->f) tmp_en += sc->f(i, j, i + 1, j, VRNA_DECOMP_EXT_EXT, sc->data); } if (fc[i + 1] + tmp_en + best_energy <= threshold) /* no basepair, nibbling of 5'-end */ fork_state(i + 1, j, state, tmp_en, 4, env); } for (k = i + turn + 1; k < j; k++) { ik = indx[k] + i; if ((with_gquad) && (fc[k + 1] != INF) && (ggg[ik] != INF)) { if (fc[k + 1] + ggg[ik] + best_energy <= threshold) { temp_state = derive_new_state(k + 1, j, state, 0, 4); env->nopush = false; repeat_gquad(vc, i, k, temp_state, 0, fc[k + 1], best_energy, threshold, env); free_state_node(temp_state); } } if ((hard_constraints[length * i + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (fc[k + 1] != INF) && (c[ik] != INF)) { type = vrna_get_ptype(ik, ptype); switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (i > 1) ? S1[i - 1] : -1; s3 = S1[k + 1]; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, j, k, k + 1, VRNA_DECOMP_EXT_STEM_EXT, sc->data); } if (fc[k + 1] + c[ik] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(k + 1, j, state, 0, 4); env->nopush = false; repeat(vc, i, k, temp_state, element_energy, fc[k + 1], best_energy, threshold, env); free_state_node(temp_state); } } } ik = indx[se[so[0]]] + i; /* indx[j] + i; */ if ((with_gquad) && (ggg[ik] != INF)) if (ggg[ik] + best_energy <= threshold) repeat_gquad(vc, i, se[so[0]], state, 0, 0, best_energy, threshold, env); if ((hard_constraints[length * i + se[so[0]]] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (c[ik] != INF)) { type = vrna_get_ptype(ik, ptype); s3 = -1; switch (dangle_model) { case 0: s5 = -1; break; default: s5 = (i > 1) ? S1[i - 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, se[so[0]], i, se[so[0]], VRNA_DECOMP_EXT_STEM, sc->data); } if (c[ik] + element_energy + best_energy <= threshold) repeat(vc, i, se[so[0]], state, element_energy, 0, best_energy, threshold, env); } } /* array_flag == 4 */ /* 55555555555555555555555555555555555555555555555555 */ /* */ /* array_flag = 5: interval cp=i,j was found while */ /* tracing back through fc-array greater than cp */ /* or within this block */ /* */ /* 55555555555555555555555555555555555555555555555555 */ if (array_flag == 5) { int kj, s5, s3, tmp_en; if ((hc->up_ext[j]) && (fc[j - 1] != INF)) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[j][1]; if (sc->f) tmp_en += sc->f(i, j, i, j - 1, VRNA_DECOMP_EXT_EXT, sc->data); } if (fc[j - 1] + tmp_en + best_energy <= threshold) /* no basepair, nibbling of 3'-end */ fork_state(i, j - 1, state, tmp_en, 5, env); } for (k = j - turn - 1; k > i; k--) { kj = indx[j] + k; if ((with_gquad) && (fc[k - 1] != INF) && (ggg[kj] != INF)) { if (fc[k - 1] + ggg[kj] + best_energy <= threshold) { temp_state = derive_new_state(i, k - 1, state, 0, 5); env->nopush = false; repeat_gquad(vc, k, j, temp_state, 0, fc[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } if ((hard_constraints[length * j + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (fc[k - 1] != INF) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); element_energy = 0; switch (dangle_model) { case 0: s3 = s5 = -1; break; default: s5 = S1[k - 1]; s3 = (j < length) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, j, k - 1, k, VRNA_DECOMP_EXT_EXT_STEM, sc->data); } if (fc[k - 1] + c[kj] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(i, k - 1, state, 0, 5); env->nopush = false; repeat(vc, k, j, temp_state, element_energy, fc[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } } kj = indx[j] + ss[so[1]]; /* indx[j] + i; */ if ((with_gquad) && (ggg[kj] != INF)) if (ggg[kj] + best_energy <= threshold) repeat_gquad(vc, ss[so[1]], j, state, 0, 0, best_energy, threshold, env); if ((hard_constraints[length * ss[so[1]] + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); s5 = -1; switch (dangle_model) { case 0: s3 = -1; break; default: s3 = (j < length) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(ss[so[1]], j, ss[so[1]], j, VRNA_DECOMP_EXT_STEM, sc->data); } if (c[kj] + element_energy + best_energy <= threshold) repeat(vc, ss[so[1]], j, state, element_energy, 0, best_energy, threshold, env); } } /* array_flag == 5 */ if (array_flag == 6) { /* we have a gquad */ repeat_gquad(vc, i, j, state, 0, 0, best_energy, threshold, env); if (env->nopush) vrna_message_warning("%d,%d\nOops, no solution in gquad-repeat!", i, j); return; } if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } /*---------------------------------------------------------------------------*/ PRIVATE void repeat_gquad(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env) { unsigned int *sn; int *ggg, *indx, element_energy; short *S1; vrna_param_t *P; indx = vc->jindx; sn = vc->strand_number; ggg = vc->matrices->ggg; S1 = vc->sequence_encoding; P = vc->params; /* find all gquads that fit into the energy range and the interval [i,j] */ STATE *new_state; best_energy += part_energy; /* energy of current structural element */ best_energy += temp_energy; /* energy from unpushed interval */ if (sn[i] == sn[j]) { element_energy = ggg[indx[j] + i]; if ((element_energy != INF) && (element_energy + best_energy <= threshold)) { int cnt; int *L; int *l; /* find out how many gquads we might expect in the interval [i,j] */ int num_gquads = get_gquad_count(S1, i, j); num_gquads++; L = (int *)vrna_alloc(sizeof(int) * num_gquads); l = (int *)vrna_alloc(sizeof(int) * num_gquads * 3); L[0] = -1; get_gquad_pattern_exhaustive(S1, i, j, P, L, l, threshold - best_energy); for (cnt = 0; L[cnt] != -1; cnt++) { new_state = copy_state(state); make_gquad(i, L[cnt], &(l[3 * cnt]), new_state); new_state->partial_energy += part_energy; new_state->partial_energy += element_energy; /* new_state->best_energy = * hairpin[unpaired] + element_energy + best_energy; */ push(env->Stack, new_state); env->nopush = false; } free(L); free(l); } } best_energy -= part_energy; best_energy -= temp_energy; return; } PRIVATE void repeat(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env) { /* routine to find stacks, bulges, internal loops and multiloops */ /* within interval closed by basepair i,j */ STATE *new_state; vrna_param_t *P; vrna_md_t *md; register int ij, k, p, q, energy, new; register int mm; register int no_close, type, type_2; char *ptype; unsigned int n, *sn, *so, *ss, *se; int element_energy; int *fc, *c, *fML, *fM1, *ggg; int rt, *indx, *rtype, noGUclosure, noLP, with_gquad, dangle_model, turn; short *S1; vrna_hc_t *hc; vrna_sc_t *sc; n = vc->length; S1 = vc->sequence_encoding; ptype = vc->ptype; indx = vc->jindx; sn = vc->strand_number; so = vc->strand_order; ss = vc->strand_start; se = vc->strand_end; P = vc->params; md = &(P->model_details); rtype = &(md->rtype[0]); noGUclosure = md->noGUclosure; noLP = md->noLP; with_gquad = md->gquad; dangle_model = md->dangles; turn = md->min_loop_size; fc = vc->matrices->fc; c = vc->matrices->c; fML = vc->matrices->fML; fM1 = vc->matrices->fM1; ggg = vc->matrices->ggg; hc = vc->hc; sc = vc->sc; ij = indx[j] + i; type = vrna_get_ptype(ij, ptype); /* * if (type==0) fprintf(stderr, "repeat: Warning: %d %d can't pair\n", i,j); */ no_close = (((type == 3) || (type == 4)) && noGUclosure); if (hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) { if (noLP) { /* always consider the structure with additional stack */ if (i + turn + 2 < j) { if (hc->mx[n * (i + 1) + j - 1] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC) { type_2 = rtype[vrna_get_ptype(indx[j - 1] + i + 1, ptype)]; energy = 0; if ((sn[i] == sn[i + 1]) && (sn[j - 1] == sn[j])) { energy = E_IntLoop(0, 0, type, type_2, S1[i + 1], S1[j - 1], S1[i + 1], S1[j - 1], P); if (sc) { if (sc->energy_bp) energy += sc->energy_bp[ij]; if (sc->energy_stack) { energy += sc->energy_stack[i] + sc->energy_stack[i + 1] + sc->energy_stack[j - 1] + sc->energy_stack[j]; } if (sc->f) energy += sc->f(i, j, i + 1, j - 1, VRNA_DECOMP_PAIR_IL, sc->data); } new_state = derive_new_state(i + 1, j - 1, state, part_energy + energy, 2); make_pair(i, j, new_state); make_pair(i + 1, j - 1, new_state); /* new_state->best_energy = new + best_energy; */ push(env->Stack, new_state); env->nopush = false; if (i == 1 || state->structure[i - 2] != '(' || state->structure[j] != ')') /* adding a stack is the only possible structure */ return; } } } } } best_energy += part_energy; /* energy of current structural element */ best_energy += temp_energy; /* energy from unpushed interval */ if (hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) { for (p = i + 1; p <= MIN2(j - 2 - turn, i + MAXLOOP + 1); p++) { int minq = j - i + p - MAXLOOP - 2; if (minq < p + 1 + turn) minq = p + 1 + turn; if (hc->up_int[i + 1] < (p - i - 1)) break; for (q = j - 1; q >= minq; q--) { if (hc->up_int[q + 1] < (j - q - 1)) break; /* skip stack if noLP, since we've already processed it above */ if ((noLP) && (p == i + 1) && (q == j - 1)) continue; if (!(hc->mx[n * p + q] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC)) continue; if (c[indx[q] + p] == INF) continue; type_2 = vrna_get_ptype(indx[q] + p, ptype); if (noGUclosure) if (no_close || (type_2 == 3) || (type_2 == 4)) if ((p > i + 1) || (q < j - 1)) continue; /* continue unless stack */ if ((sn[i] == sn[p]) && (sn[q] == sn[j])) { energy = E_IntLoop(p - i - 1, j - q - 1, type, rtype[type_2], S1[i + 1], S1[j - 1], S1[p - 1], S1[q + 1], P); new = energy + c[indx[q] + p]; if (sc) { if (sc->energy_up) energy += sc->energy_up[i + 1][p - i - 1] + sc->energy_up[q + 1][j - q - 1]; if (sc->energy_bp) energy += sc->energy_bp[ij]; if (sc->energy_stack) { if ((p == i + 1) && (q == j - 1)) { energy += sc->energy_stack[i] + sc->energy_stack[p] + sc->energy_stack[q] + sc->energy_stack[j]; } } if (sc->f) energy += sc->f(i, j, p, q, VRNA_DECOMP_PAIR_IL, sc->data); } new = energy + c[indx[q] + p]; if (new + best_energy <= threshold) /* stack, bulge, or interior loop */ fork_int_state(i, j, p, q, state, part_energy + energy, env); } /*end of if block */ } /* end of q-loop */ } /* end of p-loop */ } if (sn[i] != sn[j]) { /*look in fc*/ if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (fc[i + 1] != INF) && (fc[j - 1] != INF)) { rt = rtype[type]; element_energy = 0; switch (dangle_model) { case 0: element_energy = vrna_E_ext_stem(rt, -1, -1, P); break; default: element_energy = vrna_E_ext_stem(rt, (sn[j - 1] == sn[j]) ? S1[j - 1] : -1, (sn[i] == sn[i + 1]) ? S1[i + 1] : -1, P); break; } if (fc[i + 1] + fc[j - 1] + element_energy + best_energy <= threshold) fork_two_states_pair(i, j, ss[so[1]], state, part_energy + element_energy, 4, 5, env); } } mm = P->MLclosing; rt = rtype[type]; if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP) && ((vc->strands < 2) || ((i != se[so[0]]) && (j != ss[so[1]])))) { element_energy = mm; switch (dangle_model) { case 0: element_energy = E_MLstem(rt, -1, -1, P) + mm; break; default: element_energy = E_MLstem(rt, S1[j - 1], S1[i + 1], P) + mm; break; } if (sc) { if (sc->energy_bp) element_energy += sc->energy_bp[ij]; if (sc->f) element_energy += sc->f(i, j, i + 1, j - 1, VRNA_DECOMP_PAIR_ML, sc->data); } /* multiloop decomposition */ if ((sc) && (sc->f)) { for (k = i + turn + 2; k <= j - turn - 2; k++) { int eee = fML[indx[k - 1] + i + 1]; if ((eee != INF) && (fM1[indx[j - 1] + k] != INF)) { eee += fM1[indx[j - 1] + k] + best_energy; int aux_eee = element_energy + sc->f(i + 1, j - 1, k - 1, k, VRNA_DECOMP_ML_ML_ML, sc->data); if ((eee + aux_eee) <= threshold) fork_two_states_pair(i, j, k, state, part_energy + aux_eee, 1, 3, env); } } } else { for (k = i + turn + 2; k <= j - turn - 2; k++) { int eee = fML[indx[k - 1] + i + 1]; if ((eee != INF) && (fM1[indx[j - 1] + k] != INF)) { /* multiloop decomposition */ if ((eee + fM1[indx[j - 1] + k] + element_energy + best_energy) <= threshold) fork_two_states_pair(i, j, k, state, part_energy + element_energy, 1, 3, env); } } } } if (sn[i] == sn[j]) { if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_HP_LOOP) && (!no_close)) { element_energy = vrna_E_hp_loop(vc, i, j); if (element_energy != INF) { if (element_energy + best_energy <= threshold) /* hairpin structure */ fork_state_pair(i, j, state, part_energy + element_energy, env); } } if (with_gquad) { /* now we have to find all loops where (i,j) encloses a gquad in an interior loops style */ int cnt, *p, *q, *en, tmp_en; p = q = en = NULL; en = E_GQuad_IntLoop_exhaustive(i, j, &p, &q, type, S1, ggg, threshold - best_energy, indx, P); for (cnt = 0; p[cnt] != -1; cnt++) { if ((hc->up_int[i + 1] >= p[cnt] - i - 1) && (hc->up_int[q[cnt] + 1] >= j - q[cnt] - 1)) { tmp_en = en[cnt]; if (sc) { if (sc->energy_bp) tmp_en += sc->energy_bp[ij]; if (sc->energy_up) tmp_en += sc->energy_up[i + 1][p[cnt] - i - 1] + sc->energy_up[q[cnt] + 1][j - q[cnt] - 1]; } new_state = derive_new_state(p[cnt], q[cnt], state, tmp_en + part_energy, 6); make_pair(i, j, new_state); /* new_state->best_energy = new + best_energy; */ push(env->Stack, new_state); env->nopush = false; } } free(en); free(p); free(q); } } best_energy -= part_energy; best_energy -= temp_energy; return; } PRIVATE void old_subopt_print(const char *structure, float energy, void *data) { struct old_subopt_dat *d = (struct old_subopt_dat *)data; if (structure && d->fp) { char *e_string = vrna_strdup_printf(" %6.2f", energy); print_structure(d->fp, structure, e_string); free(e_string); } } PRIVATE void old_subopt_store(const char *structure, float energy, void *data) { struct old_subopt_dat *d = (struct old_subopt_dat *)data; /* store solution */ if (d->n_sol + 1 == d->max_sol) { d->max_sol *= 2; d->SolutionList = (SOLUTION *)vrna_realloc(d->SolutionList, d->max_sol * sizeof(SOLUTION)); } if (structure) { d->SolutionList[d->n_sol].energy = energy; d->SolutionList[d->n_sol++].structure = strdup(structure); } else { d->SolutionList[d->n_sol].energy = 0; d->SolutionList[d->n_sol++].structure = NULL; } } PRIVATE void old_subopt_store_compressed(const char *structure, float energy, void *data) { struct old_subopt_dat *d = (struct old_subopt_dat *)data; /* store solution */ if (d->n_sol + 1 == d->max_sol) { d->max_sol *= 2; d->SolutionList = (SOLUTION *)vrna_realloc(d->SolutionList, d->max_sol * sizeof(SOLUTION)); } if (structure) { d->SolutionList[d->n_sol].energy = energy; if (d->cp > 0) { int cp = d->cp; char *s = vrna_cut_point_remove(structure, &cp); d->SolutionList[d->n_sol++].structure = vrna_db_pack(s); free(s); } else { d->SolutionList[d->n_sol++].structure = vrna_db_pack(structure); } } else { d->SolutionList[d->n_sol].energy = 0; d->SolutionList[d->n_sol++].structure = NULL; } } /*###########################################*/ /*# deprecated functions below #*/ /*###########################################*/ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY PUBLIC SOLUTION * subopt(char *seq, char *structure, int delta, FILE *fp) { return wrap_subopt(seq, structure, NULL, delta, fold_constrained, 0, fp); } PUBLIC SOLUTION * subopt_circ(char *seq, char *structure, int delta, FILE *fp) { return wrap_subopt(seq, structure, NULL, delta, fold_constrained, 1, fp); } PUBLIC SOLUTION * subopt_par(char *seq, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp) { return wrap_subopt(seq, structure, parameters, delta, is_constrained, is_circular, fp); } PRIVATE SOLUTION * wrap_subopt(char *string, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp) { vrna_fold_compound_t *vc; vrna_param_t *P; char *seq; #ifdef _OPENMP /* Explicitly turn off dynamic threads */ omp_set_dynamic(0); #endif /* we need the parameter structure for hard constraints */ if (parameters) { P = vrna_params_copy(parameters); } else { vrna_md_t md; set_model_details(&md); md.temperature = temperature; P = vrna_params(&md); } P->model_details.circ = is_circular; P->model_details.uniq_ML = uniq_ML = 1; /* what about cofold sequences here? Is it safe to call the below cut_point_insert() ? */ /* dirty hack to reinsert the '&' according to the global variable 'cut_point' */ seq = vrna_cut_point_insert(string, cut_point); vc = vrna_fold_compound(seq, &(P->model_details), ((is_circular == 0) ? VRNA_OPTION_HYBRID : VRNA_OPTION_DEFAULT)); if (parameters) { /* replace params if necessary */ free(vc->params); vc->params = P; } else { free(P); } /* handle hard constraints in pseudo dot-bracket format if passed via simple interface */ if (is_constrained && structure) { unsigned int constraint_options = 0; constraint_options |= VRNA_CONSTRAINT_DB | VRNA_CONSTRAINT_DB_PIPE | VRNA_CONSTRAINT_DB_DOT | VRNA_CONSTRAINT_DB_X | VRNA_CONSTRAINT_DB_ANG_BRACK | VRNA_CONSTRAINT_DB_RND_BRACK | VRNA_CONSTRAINT_DB_INTRAMOL | VRNA_CONSTRAINT_DB_INTERMOL; vrna_constraints_add(vc, (const char *)structure, constraint_options); } if (backward_compat_compound && backward_compat) vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = vc; backward_compat = 1; /* cleanup */ free(seq); return vrna_subopt(vc, delta, subopt_sorted, fp); } #endif /*---------------------------------------------------------------------------*/ /* Well, that is the end!----------------------------------------------------*/ /*---------------------------------------------------------------------------*/
ellipticBuildContinuous.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "elliptic.h" // compare on global indices int parallelCompareRowColumn(const void* a, const void* b) { nonZero_t* fa = (nonZero_t*) a; nonZero_t* fb = (nonZero_t*) b; if(fa->row < fb->row) return -1; if(fa->row > fb->row) return +1; if(fa->col < fb->col) return -1; if(fa->col > fb->col) return +1; return 0; } // void ellipticBuildContinuousTri2D (elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts); void ellipticBuildContinuousQuad2D(elliptic_t* elliptic, nonZero_t** A, dlong* nnz, ogs_t** ogs, hlong* globalStarts); // void ellipticBuildContinuousQuad3D(elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts); // void ellipticBuildContinuousTet3D (elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts); void ellipticBuildContinuousHex3D (elliptic_t* elliptic, nonZero_t** A, dlong* nnz, ogs_t** ogs, hlong* globalStarts); void ellipticBuildContinuous(elliptic_t* elliptic, nonZero_t** A, dlong* nnz, ogs_t** ogs, hlong* globalStarts) { switch(elliptic->elementType) { // case TRIANGLES: // ellipticBuildContinuousTri2D(elliptic, lambda, A, nnz, ogs, globalStarts); break; case QUADRILATERALS: { // if(elliptic->dim==2) ellipticBuildContinuousQuad2D(elliptic, A, nnz, ogs, globalStarts); // else // ellipticBuildContinuousQuad3D(elliptic, lambda, A, nnz, ogs, globalStarts); break; } // case TETRAHEDRA: // ellipticBuildContinuousTet3D(elliptic, lambda, A, nnz, ogs, globalStarts); break; case HEXAHEDRA: ellipticBuildContinuousHex3D(elliptic, A, nnz, ogs, globalStarts); break; } } void ellipticBuildContinuousQuad2D(elliptic_t* elliptic, nonZero_t** A, dlong* nnz, ogs_t** ogs, hlong* globalStarts) { mesh_t* mesh = elliptic->mesh; setupAide options = elliptic->options; // currently constant coefficient case only const dfloat lambda = elliptic->lambda[0]; int rank = mesh->rank; //use the masked gs handle to define a global ordering // number of degrees of freedom on this rank (after gathering) hlong Ngather = elliptic->ogs->Ngather; dlong Ntotal = mesh->Np * mesh->Nelements; // create a global numbering system hlong* globalIds = (hlong*) calloc(Ngather,sizeof(hlong)); int* owner = (int*) calloc(Ngather,sizeof(int)); // every gathered degree of freedom has its own global id MPI_Allgather(&Ngather, 1, MPI_HLONG, globalStarts + 1, 1, MPI_HLONG, mesh->comm); for(int r = 0; r < mesh->size; ++r) globalStarts[r + 1] = globalStarts[r] + globalStarts[r + 1]; //use the offsets to set a consecutive global numbering for (dlong n = 0; n < elliptic->ogs->Ngather; n++) { globalIds[n] = n + globalStarts[rank]; owner[n] = rank; } //scatter this numbering to the original nodes hlong* globalNumbering = (hlong*) calloc(Ntotal,sizeof(hlong)); int* globalOwners = (int*) calloc(Ntotal,sizeof(int)); for (dlong n = 0; n < Ntotal; n++) globalNumbering[n] = -1; ogsScatter(globalNumbering, globalIds, ogsHlong, ogsAdd, elliptic->ogs); ogsScatter(globalOwners, owner, ogsInt, ogsAdd, elliptic->ogs); free(globalIds); free(owner); // 2. Build non-zeros of stiffness matrix (unassembled) dlong nnzLocal = mesh->Np * mesh->Np * mesh->Nelements; nonZero_t* sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t)); int* AsendCounts = (int*) calloc(mesh->size, sizeof(int)); int* ArecvCounts = (int*) calloc(mesh->size, sizeof(int)); int* AsendOffsets = (int*) calloc(mesh->size + 1, sizeof(int)); int* ArecvOffsets = (int*) calloc(mesh->size + 1, sizeof(int)); int* mask = (int*) calloc(mesh->Np * mesh->Nelements,sizeof(int)); for (dlong n = 0; n < elliptic->Nmasked; n++) mask[elliptic->maskIds[n]] = 1; if(mesh->rank == 0) printf("Building full FEM matrix..."); fflush(stdout); //Build unassembed non-zeros dlong cnt = 0; for (dlong e = 0; e < mesh->Nelements; e++) for (int ny = 0; ny < mesh->Nq; ny++) for (int nx = 0; nx < mesh->Nq; nx++) { if (mask[e * mesh->Np + nx + ny * mesh->Nq]) continue; //skip masked nodes for (int my = 0; my < mesh->Nq; my++) for (int mx = 0; mx < mesh->Nq; mx++) { if (mask[e * mesh->Np + mx + my * mesh->Nq]) continue; //skip masked nodes int id; dfloat val = 0.; if (ny == my) { for (int k = 0; k < mesh->Nq; k++) { id = k + ny * mesh->Nq; dfloat Grr = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + G00ID * mesh->Np]; val += Grr * mesh->D[nx + k * mesh->Nq] * mesh->D[mx + k * mesh->Nq]; } } id = mx + ny * mesh->Nq; dfloat Grs = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + G01ID * mesh->Np]; val += Grs * mesh->D[nx + mx * mesh->Nq] * mesh->D[my + ny * mesh->Nq]; id = nx + my * mesh->Nq; dfloat Gsr = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + G01ID * mesh->Np]; val += Gsr * mesh->D[mx + nx * mesh->Nq] * mesh->D[ny + my * mesh->Nq]; if (nx == mx) { for (int k = 0; k < mesh->Nq; k++) { id = nx + k * mesh->Nq; dfloat Gss = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + G11ID * mesh->Np]; val += Gss * mesh->D[ny + k * mesh->Nq] * mesh->D[my + k * mesh->Nq]; } } if ((nx == mx) && (ny == my)) { id = nx + ny * mesh->Nq; dfloat JW = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + GWJID * mesh->Np]; val += JW * lambda; } dfloat nonZeroThreshold = 1e-7; if (fabs(val) > nonZeroThreshold) { // pack non-zero sendNonZeros[cnt].val = val; sendNonZeros[cnt].row = globalNumbering[e * mesh->Np + nx + ny * mesh->Nq]; sendNonZeros[cnt].col = globalNumbering[e * mesh->Np + mx + my * mesh->Nq]; sendNonZeros[cnt].ownerRank = globalOwners[e * mesh->Np + nx + ny * mesh->Nq]; cnt++; } } } // Make the MPI_NONZERO_T data type MPI_Datatype MPI_NONZERO_T; MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT}; int blength[4] = {1, 1, 1, 1}; MPI_Aint addr[4], displ[4]; MPI_Get_address ( &(sendNonZeros[0] ), addr + 0); MPI_Get_address ( &(sendNonZeros[0].col ), addr + 1); MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr + 2); MPI_Get_address ( &(sendNonZeros[0].val ), addr + 3); displ[0] = 0; displ[1] = addr[1] - addr[0]; displ[2] = addr[2] - addr[0]; displ[3] = addr[3] - addr[0]; MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T); MPI_Type_commit (&MPI_NONZERO_T); // count how many non-zeros to send to each process for(dlong n = 0; n < cnt; ++n) AsendCounts[sendNonZeros[n].ownerRank]++; // sort by row ordering qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn); // find how many nodes to expect (should use sparse version) MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm); // find send and recv offsets for gather *nnz = 0; for(int r = 0; r < mesh->size; ++r) { AsendOffsets[r + 1] = AsendOffsets[r] + AsendCounts[r]; ArecvOffsets[r + 1] = ArecvOffsets[r] + ArecvCounts[r]; *nnz += ArecvCounts[r]; } *A = (nonZero_t*) calloc(*nnz, sizeof(nonZero_t)); // determine number to receive MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T, (*A), ArecvCounts, ArecvOffsets, MPI_NONZERO_T, mesh->comm); // sort received non-zero entries by row block (may need to switch compareRowColumn tests) qsort((*A), *nnz, sizeof(nonZero_t), parallelCompareRowColumn); // compress duplicates cnt = 0; for(dlong n = 1; n < *nnz; ++n) { if((*A)[n].row == (*A)[cnt].row && (*A)[n].col == (*A)[cnt].col) { (*A)[cnt].val += (*A)[n].val; }else { ++cnt; (*A)[cnt] = (*A)[n]; } } if (*nnz) cnt++; *nnz = cnt; #if 1 // Write matlab dat for postprocess char fname[BUFSIZ]; sprintf(fname, "Ax.dat"); FILE* fp; fp = fopen(fname, "w"); for(dlong n = 1; n < *nnz; ++n) fprintf(fp, hlongFormat " " hlongFormat " %.8e\n", (*A)[n].row + 1, (*A)[n].col + 1, (*A)[n].val); fclose(fp); #endif if(mesh->rank == 0) printf("done.\n"); MPI_Barrier(mesh->comm); MPI_Type_free(&MPI_NONZERO_T); free(sendNonZeros); free(globalNumbering); free(globalOwners); free(AsendCounts); free(ArecvCounts); free(AsendOffsets); free(ArecvOffsets); } void ellipticBuildContinuousHex3D(elliptic_t* elliptic, nonZero_t** A, dlong* nnz, ogs_t** ogs, hlong* globalStarts) { mesh2D* mesh = elliptic->mesh; setupAide options = elliptic->options; // currently constant coefficient case only const dfloat lambda = elliptic->lambda[0]; int rank = mesh->rank; //use the masked gs handle to define a global ordering // number of degrees of freedom on this rank (after gathering) hlong Ngather = elliptic->ogs->Ngather; dlong Ntotal = mesh->Np * mesh->Nelements; // create a global numbering system hlong* globalIds = (hlong*) calloc(Ngather,sizeof(hlong)); int* owner = (int*) calloc(Ngather,sizeof(int)); // every gathered degree of freedom has its own global id MPI_Allgather(&Ngather, 1, MPI_HLONG, globalStarts + 1, 1, MPI_HLONG, mesh->comm); for(int r = 0; r < mesh->size; ++r) globalStarts[r + 1] = globalStarts[r] + globalStarts[r + 1]; //use the offsets to set a consecutive global numbering for (dlong n = 0; n < elliptic->ogs->Ngather; n++) { globalIds[n] = n + globalStarts[rank]; owner[n] = rank; } //scatter this numbering to the original nodes hlong* globalNumbering = (hlong*) calloc(Ntotal,sizeof(hlong)); int* globalOwners = (int*) calloc(Ntotal,sizeof(int)); for (dlong n = 0; n < Ntotal; n++) globalNumbering[n] = -1; ogsScatter(globalNumbering, globalIds, ogsHlong, ogsAdd, elliptic->ogs); ogsScatter(globalOwners, owner, ogsInt, ogsAdd, elliptic->ogs); free(globalIds); free(owner); // 2. Build non-zeros of stiffness matrix (unassembled) dlong nnzLocal = mesh->Np * mesh->Np * mesh->Nelements; nonZero_t* sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t)); int* AsendCounts = (int*) calloc(mesh->size, sizeof(int)); int* ArecvCounts = (int*) calloc(mesh->size, sizeof(int)); int* AsendOffsets = (int*) calloc(mesh->size + 1, sizeof(int)); int* ArecvOffsets = (int*) calloc(mesh->size + 1, sizeof(int)); int* mask = (int*) calloc(mesh->Np * mesh->Nelements,sizeof(int)); for (dlong n = 0; n < elliptic->Nmasked; n++) mask[elliptic->maskIds[n]] = 1; if(mesh->rank == 0) printf("Building full FEM matrix..."); fflush(stdout); dlong cnt = 0; for (dlong e = 0; e < mesh->Nelements; e++) for (int nz = 0; nz < mesh->Nq; nz++) for (int ny = 0; ny < mesh->Nq; ny++) for (int nx = 0; nx < mesh->Nq; nx++) { int idn = nx + ny * mesh->Nq + nz * mesh->Nq * mesh->Nq; if (mask[e * mesh->Np + idn]) continue; //skip masked nodes for (int mz = 0; mz < mesh->Nq; mz++) for (int my = 0; my < mesh->Nq; my++) for (int mx = 0; mx < mesh->Nq; mx++) { int idm = mx + my * mesh->Nq + mz * mesh->Nq * mesh->Nq; if (mask[e * mesh->Np + idm]) continue; //skip masked nodes int id; dfloat val = 0.; if ((ny == my) && (nz == mz)) { for (int k = 0; k < mesh->Nq; k++) { id = k + ny * mesh->Nq + nz * mesh->Nq * mesh->Nq; dfloat Grr = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + G00ID * mesh->Np]; val += Grr * mesh->D[nx + k * mesh->Nq] * mesh->D[mx + k * mesh->Nq]; } } if (nz == mz) { id = mx + ny * mesh->Nq + nz * mesh->Nq * mesh->Nq; dfloat Grs = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + G01ID * mesh->Np]; val += Grs * mesh->D[nx + mx * mesh->Nq] * mesh->D[my + ny * mesh->Nq]; id = nx + my * mesh->Nq + nz * mesh->Nq * mesh->Nq; dfloat Gsr = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + G01ID * mesh->Np]; val += Gsr * mesh->D[mx + nx * mesh->Nq] * mesh->D[ny + my * mesh->Nq]; } if (ny == my) { id = mx + ny * mesh->Nq + nz * mesh->Nq * mesh->Nq; dfloat Grt = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + G02ID * mesh->Np]; val += Grt * mesh->D[nx + mx * mesh->Nq] * mesh->D[mz + nz * mesh->Nq]; id = nx + ny * mesh->Nq + mz * mesh->Nq * mesh->Nq; dfloat Gst = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + G02ID * mesh->Np]; val += Gst * mesh->D[mx + nx * mesh->Nq] * mesh->D[nz + mz * mesh->Nq]; } if ((nx == mx) && (nz == mz)) { for (int k = 0; k < mesh->Nq; k++) { id = nx + k * mesh->Nq + nz * mesh->Nq * mesh->Nq; dfloat Gss = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + G11ID * mesh->Np]; val += Gss * mesh->D[ny + k * mesh->Nq] * mesh->D[my + k * mesh->Nq]; } } if (nx == mx) { id = nx + my * mesh->Nq + nz * mesh->Nq * mesh->Nq; dfloat Gst = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + G12ID * mesh->Np]; val += Gst * mesh->D[ny + my * mesh->Nq] * mesh->D[mz + nz * mesh->Nq]; id = nx + ny * mesh->Nq + mz * mesh->Nq * mesh->Nq; dfloat Gts = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + G12ID * mesh->Np]; val += Gts * mesh->D[my + ny * mesh->Nq] * mesh->D[nz + mz * mesh->Nq]; } if ((nx == mx) && (ny == my)) { for (int k = 0; k < mesh->Nq; k++) { id = nx + ny * mesh->Nq + k * mesh->Nq * mesh->Nq; dfloat Gtt = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + G22ID * mesh->Np]; val += Gtt * mesh->D[nz + k * mesh->Nq] * mesh->D[mz + k * mesh->Nq]; } } if ((nx == mx) && (ny == my) && (nz == mz)) { id = nx + ny * mesh->Nq + nz * mesh->Nq * mesh->Nq; dfloat JW = mesh->ggeo[e * mesh->Np * mesh->Nggeo + id + GWJID * mesh->Np]; val += JW * lambda; } // pack non-zero dfloat nonZeroThreshold = 1e-7; if (fabs(val) >= nonZeroThreshold) { sendNonZeros[cnt].val = val; sendNonZeros[cnt].row = globalNumbering[e * mesh->Np + idn]; sendNonZeros[cnt].col = globalNumbering[e * mesh->Np + idm]; sendNonZeros[cnt].ownerRank = globalOwners[e * mesh->Np + idn]; cnt++; } } } // Make the MPI_NONZERO_T data type MPI_Datatype MPI_NONZERO_T; MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT}; int blength[4] = {1, 1, 1, 1}; MPI_Aint addr[4], displ[4]; MPI_Get_address ( &(sendNonZeros[0] ), addr + 0); MPI_Get_address ( &(sendNonZeros[0].col ), addr + 1); MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr + 2); MPI_Get_address ( &(sendNonZeros[0].val ), addr + 3); displ[0] = 0; displ[1] = addr[1] - addr[0]; displ[2] = addr[2] - addr[0]; displ[3] = addr[3] - addr[0]; MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T); MPI_Type_commit (&MPI_NONZERO_T); // count how many non-zeros to send to each process for(dlong n = 0; n < cnt; ++n) AsendCounts[sendNonZeros[n].ownerRank]++; // sort by row ordering qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn); // find how many nodes to expect (should use sparse version) MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm); // find send and recv offsets for gather *nnz = 0; for(int r = 0; r < mesh->size; ++r) { AsendOffsets[r + 1] = AsendOffsets[r] + AsendCounts[r]; ArecvOffsets[r + 1] = ArecvOffsets[r] + ArecvCounts[r]; *nnz += ArecvCounts[r]; } *A = (nonZero_t*) calloc(*nnz, sizeof(nonZero_t)); // determine number to receive MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T, (*A), ArecvCounts, ArecvOffsets, MPI_NONZERO_T, mesh->comm); // sort received non-zero entries by row block (may need to switch compareRowColumn tests) qsort((*A), *nnz, sizeof(nonZero_t), parallelCompareRowColumn); // compress duplicates cnt = 0; for(dlong n = 1; n < *nnz; ++n) { if((*A)[n].row == (*A)[cnt].row && (*A)[n].col == (*A)[cnt].col) { (*A)[cnt].val += (*A)[n].val; }else { ++cnt; (*A)[cnt] = (*A)[n]; } } if (*nnz) cnt++; *nnz = cnt; if(mesh->rank == 0) printf("done.\n"); MPI_Barrier(mesh->comm); MPI_Type_free(&MPI_NONZERO_T); free(sendNonZeros); free(globalNumbering); free(globalOwners); free(AsendCounts); free(ArecvCounts); free(AsendOffsets); free(ArecvOffsets); } // void ellipticBuildContinuousTri2D(elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts) { // mesh2D *mesh = elliptic->mesh; // setupAide options = elliptic->options; // int rank = mesh->rank; // //use the masked gs handle to define a global ordering // // number of degrees of freedom on this rank (after gathering) // hlong Ngather = elliptic->ogs->Ngather; // dlong Ntotal = mesh->Np*mesh->Nelements; // // create a global numbering system // hlong *globalIds = (hlong *) calloc(Ngather,sizeof(hlong)); // int *owner = (int *) calloc(Ngather,sizeof(int)); // // every gathered degree of freedom has its own global id // MPI_Allgather(&Ngather, 1, MPI_HLONG, globalStarts+1, 1, MPI_HLONG, mesh->comm); // for(int r=0;r<mesh->size;++r) // globalStarts[r+1] = globalStarts[r]+globalStarts[r+1]; // //use the offsets to set a consecutive global numbering // for (dlong n =0;n<elliptic->ogs->Ngather;n++) { // globalIds[n] = n + globalStarts[rank]; // owner[n] = rank; // } // //scatter this numbering to the original nodes // hlong *globalNumbering = (hlong *) calloc(Ntotal,sizeof(hlong)); // int *globalOwners = (int *) calloc(Ntotal,sizeof(int)); // for (dlong n=0;n<Ntotal;n++) globalNumbering[n] = -1; // ogsScatter(globalNumbering, globalIds, ogsHlong, ogsAdd, elliptic->ogs); // ogsScatter(globalOwners, owner, ogsInt, ogsAdd, elliptic->ogs); // free(globalIds); free(owner); // // Build non-zeros of stiffness matrix (unassembled) // dlong nnzLocal = mesh->Np*mesh->Np*mesh->Nelements; // nonZero_t *sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t)); // int *AsendCounts = (int*) calloc(mesh->size, sizeof(int)); // int *ArecvCounts = (int*) calloc(mesh->size, sizeof(int)); // int *AsendOffsets = (int*) calloc(mesh->size+1, sizeof(int)); // int *ArecvOffsets = (int*) calloc(mesh->size+1, sizeof(int)); // dfloat *Srr = (dfloat *) calloc(mesh->Np*mesh->Np,sizeof(dfloat)); // dfloat *Srs = (dfloat *) calloc(mesh->Np*mesh->Np,sizeof(dfloat)); // dfloat *Sss = (dfloat *) calloc(mesh->Np*mesh->Np,sizeof(dfloat)); // dfloat *MM = (dfloat *) calloc(mesh->Np*mesh->Np,sizeof(dfloat)); // for (int n=0;n<mesh->Np;n++) { // for (int m=0;m<mesh->Np;m++) { // Srr[m+n*mesh->Np] = mesh->Srr[m+n*mesh->Np]; // Srs[m+n*mesh->Np] = mesh->Srs[m+n*mesh->Np] + mesh->Ssr[m+n*mesh->Np]; // Sss[m+n*mesh->Np] = mesh->Sss[m+n*mesh->Np]; // MM[m+n*mesh->Np] = mesh->MM[m+n*mesh->Np]; // } // } // if(mesh->rank==0) printf("Building full FEM matrix...");fflush(stdout); // //Build unassembed non-zeros // dlong cnt =0; // for (dlong e=0;e<mesh->Nelements;e++) { // dfloat Grr = mesh->ggeo[e*mesh->Nggeo + G00ID]; // dfloat Grs = mesh->ggeo[e*mesh->Nggeo + G01ID]; // dfloat Gss = mesh->ggeo[e*mesh->Nggeo + G11ID]; // dfloat J = mesh->ggeo[e*mesh->Nggeo + GWJID]; // for (int n=0;n<mesh->Np;n++) { // if (globalNumbering[e*mesh->Np + n]<0) continue; //skip masked nodes // for (int m=0;m<mesh->Np;m++) { // if (globalNumbering[e*mesh->Np + m]<0) continue; //skip masked nodes // dfloat val = 0.; // val += Grr*Srr[m+n*mesh->Np]; // val += Grs*Srs[m+n*mesh->Np]; // val += Gss*Sss[m+n*mesh->Np]; // val += J*lambda*MM[m+n*mesh->Np]; // dfloat nonZeroThreshold = 1e-7; // if (fabs(val)>nonZeroThreshold) { // // pack non-zero // sendNonZeros[cnt].val = val; // sendNonZeros[cnt].row = globalNumbering[e*mesh->Np + n]; // sendNonZeros[cnt].col = globalNumbering[e*mesh->Np + m]; // sendNonZeros[cnt].ownerRank = globalOwners[e*mesh->Np + n]; // cnt++; // } // } // } // } // // Make the MPI_NONZERO_T data type // MPI_Datatype MPI_NONZERO_T; // MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT}; // int blength[4] = {1, 1, 1, 1}; // MPI_Aint addr[4], displ[4]; // MPI_Get_address ( &(sendNonZeros[0] ), addr+0); // MPI_Get_address ( &(sendNonZeros[0].col ), addr+1); // MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr+2); // MPI_Get_address ( &(sendNonZeros[0].val ), addr+3); // displ[0] = 0; // displ[1] = addr[1] - addr[0]; // displ[2] = addr[2] - addr[0]; // displ[3] = addr[3] - addr[0]; // MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T); // MPI_Type_commit (&MPI_NONZERO_T); // // count how many non-zeros to send to each process // for(dlong n=0;n<cnt;++n) // AsendCounts[sendNonZeros[n].ownerRank]++; // // sort by row ordering // qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn); // // find how many nodes to expect (should use sparse version) // MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm); // // find send and recv offsets for gather // *nnz = 0; // for(int r=0;r<mesh->size;++r){ // AsendOffsets[r+1] = AsendOffsets[r] + AsendCounts[r]; // ArecvOffsets[r+1] = ArecvOffsets[r] + ArecvCounts[r]; // *nnz += ArecvCounts[r]; // } // *A = (nonZero_t*) calloc(*nnz, sizeof(nonZero_t)); // // determine number to receive // MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T, // (*A), ArecvCounts, ArecvOffsets, MPI_NONZERO_T, // mesh->comm); // // sort received non-zero entries by row block (may need to switch compareRowColumn tests) // qsort((*A), *nnz, sizeof(nonZero_t), parallelCompareRowColumn); // // compress duplicates // cnt = 0; // for(dlong n=1;n<*nnz;++n){ // if((*A)[n].row == (*A)[cnt].row && // (*A)[n].col == (*A)[cnt].col){ // (*A)[cnt].val += (*A)[n].val; // } // else{ // ++cnt; // (*A)[cnt] = (*A)[n]; // } // } // if (*nnz) cnt++; // *nnz = cnt; // if(mesh->rank==0) printf("done.\n"); // MPI_Barrier(mesh->comm); // MPI_Type_free(&MPI_NONZERO_T); // free(sendNonZeros); // free(globalNumbering); free(globalOwners); // free(AsendCounts); // free(ArecvCounts); // free(AsendOffsets); // free(ArecvOffsets); // free(Srr); // free(Srs); // free(Sss); // free(MM ); // } // void ellipticBuildContinuousQuad3D(elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts) { // mesh2D *mesh = elliptic->mesh; // setupAide options = elliptic->options; // int rank = mesh->rank; // //use the masked gs handle to define a global ordering // // number of degrees of freedom on this rank (after gathering) // hlong Ngather = elliptic->ogs->Ngather; // dlong Ntotal = mesh->Np*mesh->Nelements; // // create a global numbering system // hlong *globalIds = (hlong *) calloc(Ngather,sizeof(hlong)); // int *owner = (int *) calloc(Ngather,sizeof(int)); // // every gathered degree of freedom has its own global id // MPI_Allgather(&Ngather, 1, MPI_HLONG, globalStarts+1, 1, MPI_HLONG, mesh->comm); // for(int r=0;r<mesh->size;++r) // globalStarts[r+1] = globalStarts[r]+globalStarts[r+1]; // //use the offsets to set a consecutive global numbering // for (dlong n =0;n<elliptic->ogs->Ngather;n++) { // globalIds[n] = n + globalStarts[rank]; // owner[n] = rank; // } // //scatter this numbering to the original nodes // hlong *globalNumbering = (hlong *) calloc(Ntotal,sizeof(hlong)); // int *globalOwners = (int *) calloc(Ntotal,sizeof(int)); // for (dlong n=0;n<Ntotal;n++) globalNumbering[n] = -1; // ogsScatter(globalNumbering, globalIds, ogsHlong, ogsAdd, elliptic->ogs); // ogsScatter(globalOwners, owner, ogsInt, ogsAdd, elliptic->ogs); // free(globalIds); free(owner); // // 2. Build non-zeros of stiffness matrix (unassembled) // dlong nnzLocal = mesh->Np*mesh->Np*mesh->Nelements; // nonZero_t *sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t)); // int *AsendCounts = (int*) calloc(mesh->size, sizeof(int)); // int *ArecvCounts = (int*) calloc(mesh->size, sizeof(int)); // int *AsendOffsets = (int*) calloc(mesh->size+1, sizeof(int)); // int *ArecvOffsets = (int*) calloc(mesh->size+1, sizeof(int)); // int *mask = (int *) calloc(mesh->Np*mesh->Nelements,sizeof(int)); // for (dlong n=0;n<elliptic->Nmasked;n++) mask[elliptic->maskIds[n]] = 1; // if(mesh->rank==0) printf("Building full FEM matrix...");fflush(stdout); // #if 0 // hlong NTf = mesh->Nelements*mesh->Np * mesh->Nelements*mesh->Np ; // dfloat *Af = (dfloat *)calloc(NTf, sizeof(dfloat)); // #endif // //Build unassembed non-zeros // dlong cnt =0; // for (dlong e=0;e<mesh->Nelements;e++) { // for (int ny=0;ny<mesh->Nq;ny++) { // for (int nx=0;nx<mesh->Nq;nx++) { // if (mask[e*mesh->Np + nx+ny*mesh->Nq]) continue; //skip masked nodes // for (int my=0;my<mesh->Nq;my++) { // for (int mx=0;mx<mesh->Nq;mx++) { // if (mask[e*mesh->Np + mx+my*mesh->Nq]) continue; //skip masked nodes // int id; // dfloat val = 0.; // if (ny==my) { // for (int k=0;k<mesh->Nq;k++) { // id = k+ny*mesh->Nq; // dfloat Grr = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G00ID*mesh->Np]; // val += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[mx+k*mesh->Nq]; // } // } // id = mx+ny*mesh->Nq; // dfloat Grs = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G01ID*mesh->Np]; // val += Grs*mesh->D[nx+mx*mesh->Nq]*mesh->D[my+ny*mesh->Nq]; // id = nx+my*mesh->Nq; // dfloat Gsr = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G01ID*mesh->Np]; // val += Gsr*mesh->D[mx+nx*mesh->Nq]*mesh->D[ny+my*mesh->Nq]; // // id = mx+ny*mesh->Nq; // // dfloat Grt = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G02ID*mesh->Np]; // // val += Grt*mesh->D[nx+mx*mesh->Nq]; // // id = nx+my*mesh->Nq; // // dfloat Gtr = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G02ID*mesh->Np]; // // val += Gtr*mesh->D[mx+nx*mesh->Nq]; // if (nx==mx) { // for (int k=0;k<mesh->Nq;k++) { // id = nx+k*mesh->Nq; // dfloat Gss = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G11ID*mesh->Np]; // val += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[my+k*mesh->Nq]; // } // } // // double check following two: AK // // id = nx+my*mesh->Nq; // // dfloat Gst = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G12ID*mesh->Np]; // // val += Gst*mesh->D[ny+my*mesh->Nq]; // // id = mx+ny*mesh->Nq; // // dfloat Gts = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G12ID*mesh->Np]; // // val += Gts*mesh->D[my+ny*mesh->Nq]; // if ((nx==mx)&&(ny==my)) { // id = nx + ny*mesh->Nq; // // dfloat Gtt = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + G22ID*mesh->Np]; // // val += Gtt; // dfloat JW = mesh->ggeo[e*mesh->Np*mesh->Nggeo + id + GWJID*mesh->Np]; // val += JW*lambda; // } // #if 0 // const hlong rowid = e*mesh->Np + nx + ny*mesh->Nq; // const hlong colid = e*mesh->Np + mx + my*mesh->Nq; // Af[rowid*mesh->Nelements*mesh->Np + colid] = val; // #endif // dfloat nonZeroThreshold = 1e-7; // if (fabs(val)>nonZeroThreshold) { // // pack non-zero // sendNonZeros[cnt].val = val; // sendNonZeros[cnt].row = globalNumbering[e*mesh->Np + nx+ny*mesh->Nq]; // sendNonZeros[cnt].col = globalNumbering[e*mesh->Np + mx+my*mesh->Nq]; // sendNonZeros[cnt].ownerRank = globalOwners[e*mesh->Np + nx+ny*mesh->Nq]; // cnt++; // } // } // } // } // } // } // #if 0 // // Write matlab dat for postprocess // char fname[BUFSIZ]; // sprintf(fname, "Ax.dat"); // FILE *fp; // fp = fopen(fname, "w"); // for(hlong row = 0; row<(mesh->Nelements*mesh->Np); row++){ // for(hlong col = 0; col<(mesh->Nelements*mesh->Np); col++){ // dfloat val = Af[row*mesh->Nelements*mesh->Np + col]; // fprintf(fp,"%.8e ", val); // } // fprintf(fp,"\n"); // } // fclose(fp); // #endif // // Make the MPI_NONZERO_T data type // MPI_Datatype MPI_NONZERO_T; // MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT}; // int blength[4] = {1, 1, 1, 1}; // MPI_Aint addr[4], displ[4]; // MPI_Get_address ( &(sendNonZeros[0] ), addr+0); // MPI_Get_address ( &(sendNonZeros[0].col ), addr+1); // MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr+2); // MPI_Get_address ( &(sendNonZeros[0].val ), addr+3); // displ[0] = 0; // displ[1] = addr[1] - addr[0]; // displ[2] = addr[2] - addr[0]; // displ[3] = addr[3] - addr[0]; // MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T); // MPI_Type_commit (&MPI_NONZERO_T); // // count how many non-zeros to send to each process // for(dlong n=0;n<cnt;++n) // AsendCounts[sendNonZeros[n].ownerRank]++; // // sort by row ordering // qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn); // // find how many nodes to expect (should use sparse version) // MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm); // // find send and recv offsets for gather // *nnz = 0; // for(int r=0;r<mesh->size;++r){ // AsendOffsets[r+1] = AsendOffsets[r] + AsendCounts[r]; // ArecvOffsets[r+1] = ArecvOffsets[r] + ArecvCounts[r]; // *nnz += ArecvCounts[r]; // } // *A = (nonZero_t*) calloc(*nnz, sizeof(nonZero_t)); // // determine number to receive // MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T, // (*A), ArecvCounts, ArecvOffsets, MPI_NONZERO_T, // mesh->comm); // // sort received non-zero entries by row block (may need to switch compareRowColumn tests) // qsort((*A), *nnz, sizeof(nonZero_t), parallelCompareRowColumn); // // compress duplicates // cnt = 0; // for(dlong n=1;n<*nnz;++n){ // if((*A)[n].row == (*A)[cnt].row && // (*A)[n].col == (*A)[cnt].col){ // (*A)[cnt].val += (*A)[n].val; // } // else{ // ++cnt; // (*A)[cnt] = (*A)[n]; // } // } // if (*nnz) cnt++; // *nnz = cnt; // #if 0 // // Write matlab dat for postprocess // char fname[BUFSIZ]; // sprintf(fname, "Ax.dat"); // FILE *fp; // fp = fopen(fname, "w"); // for(dlong n=1;n<*nnz;++n){ // fprintf(fp,"%d %d %.8e\n", (*A)[n].row+1, (*A)[n].col+1, (*A)[n].val); // } // fclose(fp); // #endif // if(mesh->rank==0) printf("done.\n"); // MPI_Barrier(mesh->comm); // MPI_Type_free(&MPI_NONZERO_T); // free(sendNonZeros); // free(globalNumbering); free(globalOwners); // free(AsendCounts); // free(ArecvCounts); // free(AsendOffsets); // free(ArecvOffsets); // } // void ellipticBuildContinuousTet3D(elliptic_t *elliptic, dfloat lambda, nonZero_t **A, dlong *nnz, ogs_t **ogs, hlong *globalStarts) { // mesh2D *mesh = elliptic->mesh; // setupAide options = elliptic->options; // int rank = mesh->rank; // //use the masked gs handle to define a global ordering // // number of degrees of freedom on this rank (after gathering) // hlong Ngather = elliptic->ogs->Ngather; // dlong Ntotal = mesh->Np*mesh->Nelements; // // create a global numbering system // hlong *globalIds = (hlong *) calloc(Ngather,sizeof(hlong)); // int *owner = (int *) calloc(Ngather,sizeof(int)); // // every gathered degree of freedom has its own global id // MPI_Allgather(&Ngather, 1, MPI_HLONG, globalStarts+1, 1, MPI_HLONG, mesh->comm); // for(int r=0;r<mesh->size;++r) // globalStarts[r+1] = globalStarts[r]+globalStarts[r+1]; // //use the offsets to set a consecutive global numbering // for (dlong n =0;n<elliptic->ogs->Ngather;n++) { // globalIds[n] = n + globalStarts[rank]; // owner[n] = rank; // } // //scatter this numbering to the original nodes // hlong *globalNumbering = (hlong *) calloc(Ntotal,sizeof(hlong)); // int *globalOwners = (int *) calloc(Ntotal,sizeof(int)); // for (dlong n=0;n<Ntotal;n++) globalNumbering[n] = -1; // ogsScatter(globalNumbering, globalIds, ogsHlong, ogsAdd, elliptic->ogs); // ogsScatter(globalOwners, owner, ogsInt, ogsAdd, elliptic->ogs); // free(globalIds); free(owner); // // Build non-zeros of stiffness matrix (unassembled) // dlong nnzLocal = mesh->Np*mesh->Np*mesh->Nelements; // nonZero_t *sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t)); // int *AsendCounts = (int*) calloc(mesh->size, sizeof(int)); // int *ArecvCounts = (int*) calloc(mesh->size, sizeof(int)); // int *AsendOffsets = (int*) calloc(mesh->size+1, sizeof(int)); // int *ArecvOffsets = (int*) calloc(mesh->size+1, sizeof(int)); // int *mask = (int *) calloc(mesh->Np*mesh->Nelements,sizeof(int)); // for (dlong n=0;n<elliptic->Nmasked;n++) mask[elliptic->maskIds[n]] = 1; // //Build unassembed non-zeros // if(mesh->rank==0) printf("Building full FEM matrix...");fflush(stdout); // dlong cnt =0; // #pragma omp parallel for // for (dlong e=0;e<mesh->Nelements;e++) { // dfloat Grr = mesh->ggeo[e*mesh->Nggeo + G00ID]; // dfloat Grs = mesh->ggeo[e*mesh->Nggeo + G01ID]; // dfloat Grt = mesh->ggeo[e*mesh->Nggeo + G02ID]; // dfloat Gss = mesh->ggeo[e*mesh->Nggeo + G11ID]; // dfloat Gst = mesh->ggeo[e*mesh->Nggeo + G12ID]; // dfloat Gtt = mesh->ggeo[e*mesh->Nggeo + G22ID]; // dfloat J = mesh->ggeo[e*mesh->Nggeo + GWJID]; // for (int n=0;n<mesh->Np;n++) { // if (mask[e*mesh->Np + n]) continue; //skip masked nodes // for (int m=0;m<mesh->Np;m++) { // if (mask[e*mesh->Np + m]) continue; //skip masked nodes // dfloat val = 0.; // val += Grr*mesh->Srr[m+n*mesh->Np]; // val += Grs*mesh->Srs[m+n*mesh->Np]; // val += Grt*mesh->Srt[m+n*mesh->Np]; // val += Grs*mesh->Ssr[m+n*mesh->Np]; // val += Gss*mesh->Sss[m+n*mesh->Np]; // val += Gst*mesh->Sst[m+n*mesh->Np]; // val += Grt*mesh->Str[m+n*mesh->Np]; // val += Gst*mesh->Sts[m+n*mesh->Np]; // val += Gtt*mesh->Stt[m+n*mesh->Np]; // val += J*lambda*mesh->MM[m+n*mesh->Np]; // dfloat nonZeroThreshold = 1e-7; // if (fabs(val)>nonZeroThreshold) { // #pragma omp critical // { // // pack non-zero // sendNonZeros[cnt].val = val; // sendNonZeros[cnt].row = globalNumbering[e*mesh->Np + n]; // sendNonZeros[cnt].col = globalNumbering[e*mesh->Np + m]; // sendNonZeros[cnt].ownerRank = globalOwners[e*mesh->Np + n]; // cnt++; // } // } // } // } // } // // Make the MPI_NONZERO_T data type // MPI_Datatype MPI_NONZERO_T; // MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT}; // int blength[4] = {1, 1, 1, 1}; // MPI_Aint addr[4], displ[4]; // MPI_Get_address ( &(sendNonZeros[0] ), addr+0); // MPI_Get_address ( &(sendNonZeros[0].col ), addr+1); // MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr+2); // MPI_Get_address ( &(sendNonZeros[0].val ), addr+3); // displ[0] = 0; // displ[1] = addr[1] - addr[0]; // displ[2] = addr[2] - addr[0]; // displ[3] = addr[3] - addr[0]; // MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T); // MPI_Type_commit (&MPI_NONZERO_T); // // count how many non-zeros to send to each process // for(dlong n=0;n<cnt;++n) // AsendCounts[sendNonZeros[n].ownerRank] += 1; // // sort by row ordering // qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn); // // find how many nodes to expect (should use sparse version) // MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm); // // find send and recv offsets for gather // *nnz = 0; // for(int r=0;r<mesh->size;++r){ // AsendOffsets[r+1] = AsendOffsets[r] + AsendCounts[r]; // ArecvOffsets[r+1] = ArecvOffsets[r] + ArecvCounts[r]; // *nnz += ArecvCounts[r]; // } // *A = (nonZero_t*) calloc(*nnz, sizeof(nonZero_t)); // // determine number to receive // MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T, // (*A), ArecvCounts, ArecvOffsets, MPI_NONZERO_T, // mesh->comm); // // sort received non-zero entries by row block (may need to switch compareRowColumn tests) // qsort((*A), *nnz, sizeof(nonZero_t), parallelCompareRowColumn); // // compress duplicates // cnt = 0; // for(dlong n=1;n<*nnz;++n){ // if((*A)[n].row == (*A)[cnt].row && // (*A)[n].col == (*A)[cnt].col){ // (*A)[cnt].val += (*A)[n].val; // } // else{ // ++cnt; // (*A)[cnt] = (*A)[n]; // } // } // if (*nnz) cnt++; // *nnz = cnt; // if(mesh->rank==0) printf("done.\n"); // MPI_Barrier(mesh->comm); // MPI_Type_free(&MPI_NONZERO_T); // free(sendNonZeros); // free(globalNumbering); free(globalOwners); // free(AsendCounts); // free(ArecvCounts); // free(AsendOffsets); // free(ArecvOffsets); // free(mask); // }
avx.c
#include "q_incs.h" #ifdef AVX #include <immintrin.h> // for AVX #include <smmintrin.h> // for AVX #endif #include "avx.h" #if defined(__GNUC__) #define PORTABLE_ALIGN16 __attribute__((aligned(16))) #else #define PORTABLE_ALIGN16 __declspec(align(16)) #endif #define REG_WIDTH_IN_BITS 256 #define BITS_PER_BYTE 8 int va_times_sb_plus_vc( float *A, float sB, float *C, float *D, int32_t nI ) { int status = 0; if ( A == NULL ) { go_BYE(-1); } if ( C == NULL ) { go_BYE(-1); } if ( D == NULL ) { go_BYE(-1); } if ( nI <= 0 ) { go_BYE(-1); } #ifdef AVX int stride = REG_WIDTH_IN_BITS / (BITS_PER_BYTE * sizeof(float)); int nI_rem = ( nI % stride ); // loop with fma __m256 b = _mm256_setr_ps(sB, sB, sB, sB, sB, sB, sB, sB); for ( int i = 0; i < (nI-nI_rem); i += stride ) { __m256 a = _mm256_load_ps(A+i); __m256 c = _mm256_load_ps(C+i); __m256 d = _mm256_fmadd_ps(a, b, c); _mm256_store_ps(D+i, d); #ifdef COUNT num_f_flops += 2*stride; #endif } // loop without fma for ( int i = (nI-nI_rem); i < nI; i++ ) { D[i] = C[i] + ( A[i] * sB ); #ifdef COUNT num_f_flops += 2; #endif } #else #pragma omp simd for ( int i = 0; i < nI; i++ ) { // for batch size D[i] = C[i] + ( A[i] * sB ); #ifdef COUNT num_f_flops += 2; #endif } #endif BYE: return status; } //=========================================================================== int va_dot_vb( float *A, float *B, float *C, int32_t nI ) { int status = 0; if ( A == NULL ) { go_BYE(-1); } if ( B == NULL ) { go_BYE(-1); } if ( C == NULL ) { go_BYE(-1); } if ( nI <= 0 ) { go_BYE(-1); } float sum = 0; #ifdef AVX int stride = REG_WIDTH_IN_BITS / (BITS_PER_BYTE * sizeof(float)); int nI_rem = ( nI % stride ); __m256 num1, num2, num3, num4; float PORTABLE_ALIGN16 tmpres[stride]; num4 = _mm256_setzero_ps(); //sets sum to zero for ( i = 0; i < n; i += stride) { //loads array a into num1 num1= a[7] a[6] ... a[1] a[0] num1 = _mm256_loadu_ps(a+i); //loads array b into num2 num2= b[7] b[6] ... b[1] b[0] num2 = _mm256_loadu_ps(b+i); // performs multiplication // num3 = a[7]*b[7] a[6]*b[6] ... a[1]*b[1] a[0]*b[0] num3 = _mm256_mul_ps(num1, num2); //horizontal addition by converting to scalars _mm256_store_ps(tmpres, num3); // accumulate in sum sum += tmpres[0] + tmpres[1] + tmpres[2] + tmpres[3] + tmpres[4] + tmpres[5] + tmpres[6] + tmpres[7]; } // loop for remaining elements for ( int i = (nI-nI_rem); i < nI; i++ ) { sum += A[i] * B[i]; #ifdef COUNT num_b_flops += 2; #endif } #else #pragma omp simd reduction(+:sum) for ( int i = 0; i < nI; i++ ) { sum += A[i] * B[i]; #ifdef COUNT num_b_flops += 2; #endif } *C = sum; #endif BYE: return status; }
_Atomic-3.c
/* PR c/65467 */ /* { dg-do compile } */ /* { dg-additional-options "-std=c11" } */ void f1 (void) { _Atomic int i = 0, k[4]; int j = 0; k[0] = 0; k[1] = 0; k[2] = 0; k[3] = 0; #pragma omp parallel reduction (+:i) /* { dg-error "'_Atomic' 'i' in 'reduction' clause" } */ i++; #pragma omp declare reduction (foo: _Atomic int: omp_out += omp_in) initializer (omp_priv = omp_orig * 0) /* { dg-error "'_Atomic' qualified type in '#pragma omp declare reduction'" } */ #pragma omp declare reduction (bar: int: omp_out += omp_in) initializer (omp_priv = omp_orig * 0) #pragma omp parallel reduction (bar:j) j++; #pragma omp parallel reduction (bar:i) /* { dg-error "'_Atomic' 'i' in 'reduction' clause" } */ i++; #pragma omp parallel reduction (+:k) /* { dg-error "'_Atomic' 'k' in 'reduction' clause" } */ k[1]++; #pragma omp parallel reduction (+:k[1:2]) /* { dg-error "'_Atomic' \[^\n\r]* in 'reduction' clause" } */ k[1]++; } void f2 (int *_Atomic p) { #pragma omp simd aligned (p : 16) /* { dg-error "'_Atomic' 'p' in 'aligned' clause" } */ for (int i = 0; i < 16; i++) p[i]++; } _Atomic int x; void f3 (_Atomic int *p) { int i; #pragma omp atomic write x = 6; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ #pragma omp atomic read i = x; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ #pragma omp atomic update x += 6; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ #pragma omp atomic capture i = x *= 2; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ #pragma omp atomic write p[2] = 6; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ #pragma omp atomic read i = p[2]; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ #pragma omp atomic update p[2] += 6; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ #pragma omp atomic capture i = p[2] *= 2; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ } #pragma omp declare simd linear(x:1) /* { dg-error "'_Atomic' 'x' in 'linear' clause" } */ int f4 (_Atomic int x, int y) { return x + y; }
pyField.h
#pragma once #include <memory> #include "Grid.h" #include "FieldValue.h" #include "Mapping.h" #include "Fdtd.h" #include "Psatd.h" #include "Pstd.h" #include "Mapping.h" #include "pybind11/pybind11.h" namespace py = pybind11; using namespace pybind11::literals; namespace pfc { template <class TGrid, class TFieldSolver> class pyFieldEntity : public TGrid, public TFieldSolver { public: pyFieldEntity(const Int3 & numInternalCells, const FP3 & minCoords, const FP3 & steps, FP dt) : TGrid(Int3(numInternalCells), minCoords, steps, numInternalCells), TFieldSolver(static_cast<TGrid*>(this), dt) {} void refresh() { this->globalTime = 0.0; } }; template <class TGrid, class TFieldSolver, class TDerived, bool ifStraggered> class pyStraggeredFieldIntarface {}; // spatial straggered grids template <class TGrid, class TFieldSolver, class TDerived> class pyStraggeredFieldIntarface<TGrid, TFieldSolver, TDerived, true> { public: template <class FieldConfigurationType> void setFieldConfiguration(const FieldConfigurationType* fieldConf) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); const int chunkSize = 32; const int nChunks = fieldEntity->numCells.z / chunkSize; const int chunkRem = fieldEntity->numCells.z % chunkSize; const int nx = fieldEntity->numCells.x, ny = fieldEntity->numCells.y; #pragma omp parallel for collapse(2) for (int i = 0; i < nx; i++) for (int j = 0; j < ny; j++) for (int chunk = 0; chunk < nChunks + 1; chunk++) { FP3 cEx[chunkSize], cEy[chunkSize], cEz[chunkSize]; FP3 cBx[chunkSize], cBy[chunkSize], cBz[chunkSize]; int kLast = chunk == nChunks ? chunkRem : chunkSize; #pragma ivdep for (int k = 0; k < kLast; k++) { cEx[k] = derived->convertCoords(fieldEntity->ExPosition(i, j, chunk * chunkSize), fieldEntity->timeShiftE); cEy[k] = derived->convertCoords(fieldEntity->EyPosition(i, j, chunk * chunkSize), fieldEntity->timeShiftE); cEz[k] = derived->convertCoords(fieldEntity->EzPosition(i, j, chunk * chunkSize), fieldEntity->timeShiftE); cBx[k] = derived->convertCoords(fieldEntity->BxPosition(i, j, chunk * chunkSize), fieldEntity->timeShiftB); cBy[k] = derived->convertCoords(fieldEntity->ByPosition(i, j, chunk * chunkSize), fieldEntity->timeShiftB); cBz[k] = derived->convertCoords(fieldEntity->BzPosition(i, j, chunk * chunkSize), fieldEntity->timeShiftB); } #pragma ivdep #pragma omp simd for (int k = 0; k < kLast; k++) { fieldEntity->Ex(i, j, k) = fieldConf->getE(cEx[k].x, cEx[k].y, cEx[k].z).x; fieldEntity->Ey(i, j, k) = fieldConf->getE(cEy[k].x, cEy[k].y, cEy[k].z).y; fieldEntity->Ez(i, j, k) = fieldConf->getE(cEz[k].x, cEz[k].y, cEz[k].z).z; fieldEntity->Bx(i, j, k) = fieldConf->getB(cBx[k].x, cBx[k].y, cBx[k].z).x; fieldEntity->By(i, j, k) = fieldConf->getB(cBy[k].x, cBy[k].y, cBy[k].z).y; fieldEntity->Bz(i, j, k) = fieldConf->getB(cBz[k].x, cBz[k].y, cBz[k].z).z; } } } }; // collocated grids template <class TGrid, class TFieldSolver, class TDerived> class pyStraggeredFieldIntarface<TGrid, TFieldSolver, TDerived, false> { public: template <class FieldConfigurationType> void setFieldConfiguration(const FieldConfigurationType* fieldConf) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); const int chunkSize = 32; const int nChunks = fieldEntity->numCells.z / chunkSize; const int chunkRem = fieldEntity->numCells.z % chunkSize; const int nx = fieldEntity->numCells.x, ny = fieldEntity->numCells.y; #pragma omp parallel for collapse(2) for (int i = 0; i < nx; i++) for (int j = 0; j < ny; j++) for (int chunk = 0; chunk < nChunks + 1; chunk++) { FP3 coords[chunkSize]; int kLast = chunk == nChunks ? chunkRem : chunkSize; FP3 startPosition = fieldEntity->ExPosition(i, j, chunk * chunkSize); #pragma ivdep for (int k = 0; k < kLast; k++) { FP3 position(startPosition.x, startPosition.y, startPosition.z + k * fieldEntity->steps.z); coords[k] = derived->convertCoords(position); } #pragma ivdep #pragma omp simd for (int k = 0; k < kLast; k++) { FP3 E, B; fieldConf->getEB(coords[k].x, coords[k].y, coords[k].z, &E, &B); fieldEntity->Ex(i, j, k + chunk * chunkSize) = E.x; fieldEntity->Ey(i, j, k + chunk * chunkSize) = E.y; fieldEntity->Ez(i, j, k + chunk * chunkSize) = E.z; fieldEntity->Bx(i, j, k + chunk * chunkSize) = B.x; fieldEntity->By(i, j, k + chunk * chunkSize) = B.y; fieldEntity->Bz(i, j, k + chunk * chunkSize) = B.z; } } } void pySetEMField(py::function fValueField) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 coords = derived->convertCoords(fieldEntity->ExPosition(i, j, k)); ValueField field = fValueField("x"_a = coords.x, "y"_a = coords.y, "z"_a = coords.z). template cast<ValueField>(); fieldEntity->Ex(i, j, k) = field.E.x; fieldEntity->Ey(i, j, k) = field.E.y; fieldEntity->Ez(i, j, k) = field.E.z; fieldEntity->Bx(i, j, k) = field.B.x; fieldEntity->By(i, j, k) = field.B.y; fieldEntity->Bz(i, j, k) = field.B.z; } } void setEMField(int64_t _fValueField) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); void(*fValueField)(FP, FP, FP, FP*) = (void(*)(FP, FP, FP, FP*))_fValueField; const int chunkSize = 32; const int nChunks = fieldEntity->numCells.z / chunkSize; const int chunkRem = fieldEntity->numCells.z % chunkSize; const int nx = fieldEntity->numCells.x, ny = fieldEntity->numCells.y; #pragma omp parallel for collapse(2) for (int i = 0; i < nx; i++) for (int j = 0; j < ny; j++) for (int chunk = 0; chunk < nChunks + 1; chunk++) { FP3 coords[chunkSize]; int kLast = chunk == nChunks ? chunkRem : chunkSize; FP3 startPosition = fieldEntity->ExPosition(i, j, chunk * chunkSize); #pragma ivdep for (int k = 0; k < kLast; k++) { FP3 position(startPosition.x, startPosition.y, startPosition.z + k * fieldEntity->steps.z); coords[k] = derived->convertCoords(position); } #pragma ivdep #pragma omp simd for (int k = 0; k < kLast; k++) { ValueField field(0.0, 0.0, 0.0, 0.0, 0.0, 0.0); fValueField(coords[k].x, coords[k].y, coords[k].z, &(field.E.x)); fieldEntity->Ex(i, j, k + chunk * chunkSize) = field.E.x; fieldEntity->Ey(i, j, k + chunk * chunkSize) = field.E.y; fieldEntity->Ez(i, j, k + chunk * chunkSize) = field.E.z; fieldEntity->Bx(i, j, k + chunk * chunkSize) = field.B.x; fieldEntity->By(i, j, k + chunk * chunkSize) = field.B.y; fieldEntity->Bz(i, j, k + chunk * chunkSize) = field.B.z; } } } }; template<class TGrid, class TFieldSolver, class TDerived> class pyFieldGridInterface : public pyStraggeredFieldIntarface<TGrid, TFieldSolver, TDerived, TGrid::ifFieldsSpatialStraggered && TGrid::ifFieldsTimeStraggered> { public: pyFieldGridInterface() { fEt[0] = 0; fEt[1] = 0; fEt[2] = 0; fBt[0] = 0; fBt[1] = 0; fBt[2] = 0; isAnalytical = false; } void setAnalytical(int64_t _fEx, int64_t _fEy, int64_t _fEz, int64_t _fBx, int64_t _fBy, int64_t _fBz) { fEt[0] = _fEx; fEt[1] = _fEy; fEt[2] = _fEz; fBt[0] = _fBx; fBt[1] = _fBy; fBt[2] = _fBz; isAnalytical = true; } void analyticalUpdateFields(FP t) { if (isAnalytical) { setExyzt(fEt[0], fEt[1], fEt[2], t); setBxyzt(fBt[0], fBt[1], fBt[2], t); } } void pySetExyz(py::function fEx, py::function fEy, py::function fEz) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cEx, cEy, cEz; cEx = derived->convertCoords(fieldEntity->ExPosition(i, j, k), fieldEntity->timeShiftE); cEy = derived->convertCoords(fieldEntity->EyPosition(i, j, k), fieldEntity->timeShiftE); cEz = derived->convertCoords(fieldEntity->EzPosition(i, j, k), fieldEntity->timeShiftE); fieldEntity->Ex(i, j, k) = fEx("x"_a = cEx.x, "y"_a = cEx.y, "z"_a = cEx.z).template cast<FP>(); fieldEntity->Ey(i, j, k) = fEy("x"_a = cEy.x, "y"_a = cEy.y, "z"_a = cEy.z).template cast<FP>(); fieldEntity->Ez(i, j, k) = fEz("x"_a = cEz.x, "y"_a = cEz.y, "z"_a = cEz.z).template cast<FP>(); } } void pySetE(py::function fE) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cEx, cEy, cEz; cEx = derived->convertCoords(fieldEntity->ExPosition(i, j, k), fieldEntity->timeShiftE); cEy = derived->convertCoords(fieldEntity->EyPosition(i, j, k), fieldEntity->timeShiftE); cEz = derived->convertCoords(fieldEntity->EzPosition(i, j, k), fieldEntity->timeShiftE); fieldEntity->Ex(i, j, k) = fE("x"_a = cEx.x, "y"_a = cEx.y, "z"_a = cEx.z).template cast<FP3>().x; fieldEntity->Ey(i, j, k) = fE("x"_a = cEy.x, "y"_a = cEy.y, "z"_a = cEy.z).template cast<FP3>().y; fieldEntity->Ez(i, j, k) = fE("x"_a = cEz.x, "y"_a = cEz.y, "z"_a = cEz.z).template cast<FP3>().z; } } void setExyz(int64_t _fEx, int64_t _fEy, int64_t _fEz) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP(*fEx)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fEx; FP(*fEy)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fEy; FP(*fEz)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fEz; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cEx, cEy, cEz; cEx = derived->convertCoords(fieldEntity->ExPosition(i, j, k), fieldEntity->timeShiftE); cEy = derived->convertCoords(fieldEntity->EyPosition(i, j, k), fieldEntity->timeShiftE); cEz = derived->convertCoords(fieldEntity->EzPosition(i, j, k), fieldEntity->timeShiftE); fieldEntity->Ex(i, j, k) = fEx(cEx.x, cEx.y, cEx.z); fieldEntity->Ey(i, j, k) = fEy(cEy.x, cEy.y, cEy.z); fieldEntity->Ez(i, j, k) = fEz(cEz.x, cEz.y, cEz.z); } } void setExyzt(int64_t _fEx, int64_t _fEy, int64_t _fEz, FP t) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP(*fEx)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fEx; FP(*fEy)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fEy; FP(*fEz)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fEz; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cEx, cEy, cEz; cEx = derived->convertCoords(fieldEntity->ExPosition(i, j, k), fieldEntity->timeShiftE); cEy = derived->convertCoords(fieldEntity->EyPosition(i, j, k), fieldEntity->timeShiftE); cEz = derived->convertCoords(fieldEntity->EzPosition(i, j, k), fieldEntity->timeShiftE); fieldEntity->Ex(i, j, k) = fEx(cEx.x, cEx.y, cEx.z, t + fieldEntity->timeShiftE); fieldEntity->Ey(i, j, k) = fEy(cEy.x, cEy.y, cEy.z, t + fieldEntity->timeShiftE); fieldEntity->Ez(i, j, k) = fEz(cEz.x, cEz.y, cEz.z, t + fieldEntity->timeShiftE); } } void setE(int64_t _fE) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP3(*fE)(FP, FP, FP) = (FP3(*)(FP, FP, FP))_fE; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cEx, cEy, cEz; cEx = derived->convertCoords(fieldEntity->ExPosition(i, j, k), fieldEntity->timeShiftE); cEy = derived->convertCoords(fieldEntity->EyPosition(i, j, k), fieldEntity->timeShiftE); cEz = derived->convertCoords(fieldEntity->EzPosition(i, j, k), fieldEntity->timeShiftE); fieldEntity->Ex(i, j, k) = fE(cEx.x, cEx.y, cEx.z).x; fieldEntity->Ey(i, j, k) = fE(cEy.x, cEy.y, cEy.z).y; fieldEntity->Ez(i, j, k) = fE(cEz.x, cEz.y, cEz.z).z; } } void pySetBxyz(py::function fBx, py::function fBy, py::function fBz) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cBx, cBy, cBz; cBx = derived->convertCoords(fieldEntity->BxPosition(i, j, k), fieldEntity->timeShiftB); cBy = derived->convertCoords(fieldEntity->ByPosition(i, j, k), fieldEntity->timeShiftB); cBz = derived->convertCoords(fieldEntity->BzPosition(i, j, k), fieldEntity->timeShiftB); fieldEntity->Bx(i, j, k) = fBx("x"_a = cBx.x, "y"_a = cBx.y, "z"_a = cBx.z).template cast<FP>(); fieldEntity->By(i, j, k) = fBy("x"_a = cBy.x, "y"_a = cBy.y, "z"_a = cBy.z).template cast<FP>(); fieldEntity->Bz(i, j, k) = fBz("x"_a = cBz.x, "y"_a = cBz.y, "z"_a = cBz.z).template cast<FP>(); } } void pySetB(py::function fB) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cBx, cBy, cBz; cBx = derived->convertCoords(fieldEntity->BxPosition(i, j, k), fieldEntity->timeShiftB); cBy = derived->convertCoords(fieldEntity->ByPosition(i, j, k), fieldEntity->timeShiftB); cBz = derived->convertCoords(fieldEntity->BzPosition(i, j, k), fieldEntity->timeShiftB); fieldEntity->Bx(i, j, k) = fB("x"_a = cBx.x, "y"_a = cBx.y, "z"_a = cBx.z).template cast<FP3>().x; fieldEntity->By(i, j, k) = fB("x"_a = cBy.x, "y"_a = cBy.y, "z"_a = cBy.z).template cast<FP3>().y; fieldEntity->Bz(i, j, k) = fB("x"_a = cBz.x, "y"_a = cBz.y, "z"_a = cBz.z).template cast<FP3>().z; } } void setBxyz(int64_t _fBx, int64_t _fBy, int64_t _fBz) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP(*fBx)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fBx; FP(*fBy)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fBy; FP(*fBz)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fBz; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cBx, cBy, cBz; cBx = derived->convertCoords(fieldEntity->BxPosition(i, j, k), fieldEntity->timeShiftB); cBy = derived->convertCoords(fieldEntity->ByPosition(i, j, k), fieldEntity->timeShiftB); cBz = derived->convertCoords(fieldEntity->BzPosition(i, j, k), fieldEntity->timeShiftB); fieldEntity->Bx(i, j, k) = fBx(cBx.x, cBx.y, cBx.z); fieldEntity->By(i, j, k) = fBy(cBy.x, cBy.y, cBy.z); fieldEntity->Bz(i, j, k) = fBz(cBz.x, cBz.y, cBz.z); } } void setBxyzt(int64_t _fBx, int64_t _fBy, int64_t _fBz, FP t) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP(*fBx)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fBx; FP(*fBy)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fBy; FP(*fBz)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fBz; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cBx, cBy, cBz; cBx = derived->convertCoords(fieldEntity->BxPosition(i, j, k), fieldEntity->timeShiftB); cBy = derived->convertCoords(fieldEntity->ByPosition(i, j, k), fieldEntity->timeShiftB); cBz = derived->convertCoords(fieldEntity->BzPosition(i, j, k), fieldEntity->timeShiftB); fieldEntity->Bx(i, j, k) = fBx(cBx.x, cBx.y, cBx.z, t + fieldEntity->timeShiftB); fieldEntity->By(i, j, k) = fBy(cBy.x, cBy.y, cBy.z, t + fieldEntity->timeShiftB); fieldEntity->Bz(i, j, k) = fBz(cBz.x, cBz.y, cBz.z, t + fieldEntity->timeShiftB); } } void setB(int64_t _fB) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP3(*fB)(FP, FP, FP) = (FP3(*)(FP, FP, FP))_fB; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cBx, cBy, cBz; cBx = derived->convertCoords(fieldEntity->BxPosition(i, j, k), fieldEntity->timeShiftB); cBy = derived->convertCoords(fieldEntity->ByPosition(i, j, k), fieldEntity->timeShiftB); cBz = derived->convertCoords(fieldEntity->BzPosition(i, j, k), fieldEntity->timeShiftB); fieldEntity->Bx(i, j, k) = fB(cBx.x, cBx.y, cBx.z).x; fieldEntity->By(i, j, k) = fB(cBy.x, cBy.y, cBy.z).y; fieldEntity->Bz(i, j, k) = fB(cBz.x, cBz.y, cBz.z).z; } } void pySetJxyz(py::function fJx, py::function fJy, py::function fJz) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cJx, cJy, cJz; cJx = derived->convertCoords(fieldEntity->JxPosition(i, j, k), fieldEntity->timeShiftJ); cJy = derived->convertCoords(fieldEntity->JyPosition(i, j, k), fieldEntity->timeShiftJ); cJz = derived->convertCoords(fieldEntity->JzPosition(i, j, k), fieldEntity->timeShiftJ); fieldEntity->Jx(i, j, k) = fJx("x"_a = cJx.x, "y"_a = cJx.y, "z"_a = cJx.z).template cast<FP>(); fieldEntity->Jy(i, j, k) = fJy("x"_a = cJy.x, "y"_a = cJy.y, "z"_a = cJy.z).template cast<FP>(); fieldEntity->Jz(i, j, k) = fJz("x"_a = cJz.x, "y"_a = cJz.y, "z"_a = cJz.z).template cast<FP>(); } } void pySetJ(py::function fJ) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cJx, cJy, cJz; cJx = derived->convertCoords(fieldEntity->JxPosition(i, j, k), fieldEntity->timeShiftJ); cJy = derived->convertCoords(fieldEntity->JyPosition(i, j, k), fieldEntity->timeShiftJ); cJz = derived->convertCoords(fieldEntity->JzPosition(i, j, k), fieldEntity->timeShiftJ); fieldEntity->Jx(i, j, k) = fJ("x"_a = cJx.x, "y"_a = cJx.y, "z"_a = cJx.z).template cast<FP3>().x; fieldEntity->Jy(i, j, k) = fJ("x"_a = cJy.x, "y"_a = cJy.y, "z"_a = cJy.z).template cast<FP3>().y; fieldEntity->Jz(i, j, k) = fJ("x"_a = cJz.x, "y"_a = cJz.y, "z"_a = cJz.z).template cast<FP3>().z; } } void setJxyz(int64_t _fJx, int64_t _fJy, int64_t _fJz) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP(*fJx)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fJx; FP(*fJy)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fJy; FP(*fJz)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fJz; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cJx, cJy, cJz; cJx = derived->convertCoords(fieldEntity->JxPosition(i, j, k), fieldEntity->timeShiftJ); cJy = derived->convertCoords(fieldEntity->JyPosition(i, j, k), fieldEntity->timeShiftJ); cJz = derived->convertCoords(fieldEntity->JzPosition(i, j, k), fieldEntity->timeShiftJ); fieldEntity->Jx(i, j, k) = fJx(cJx.x, cJx.y, cJx.z); fieldEntity->Jy(i, j, k) = fJy(cJy.x, cJy.y, cJy.z); fieldEntity->Jz(i, j, k) = fJz(cJz.x, cJz.y, cJz.z); } } void setJxyzt(int64_t _fJx, int64_t _fJy, int64_t _fJz, FP t) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP(*fJx)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fJx; FP(*fJy)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fJy; FP(*fJz)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fJz; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cJx, cJy, cJz; cJx = derived->convertCoords(fieldEntity->JxPosition(i, j, k), fieldEntity->timeShiftJ); cJy = derived->convertCoords(fieldEntity->JyPosition(i, j, k), fieldEntity->timeShiftJ); cJz = derived->convertCoords(fieldEntity->JzPosition(i, j, k), fieldEntity->timeShiftJ); fieldEntity->Jx(i, j, k) = fJx(cJx.x, cJx.y, cJx.z, t + fieldEntity->timeShiftJ); fieldEntity->Jy(i, j, k) = fJy(cJy.x, cJy.y, cJy.z, t + fieldEntity->timeShiftJ); fieldEntity->Jz(i, j, k) = fJz(cJz.x, cJz.y, cJz.z, t + fieldEntity->timeShiftJ); } } void setJ(int64_t _fJ) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP3(*fJ)(FP, FP, FP) = (FP3(*)(FP, FP, FP))_fJ; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cJx, cJy, cJz; cJx = derived->convertCoords(fieldEntity->JxPosition(i, j, k), fieldEntity->timeShiftJ); cJy = derived->convertCoords(fieldEntity->JyPosition(i, j, k), fieldEntity->timeShiftJ); cJz = derived->convertCoords(fieldEntity->JzPosition(i, j, k), fieldEntity->timeShiftJ); fieldEntity->Jx(i, j, k) = fJ(cJx.x, cJx.y, cJx.z).x; fieldEntity->Jy(i, j, k) = fJ(cJy.x, cJy.y, cJy.z).y; fieldEntity->Jz(i, j, k) = fJ(cJz.x, cJz.y, cJz.z).z; } } FP3 getE(const FP3& coords) const { pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = static_cast<const TDerived*>(this)->getFieldEntity(); FP3 result; if (isAnalytical) { FP(*fx)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))fEt[0]; FP(*fy)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))fEt[1]; FP(*fz)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))fEt[2]; FP time = fieldEntity->globalTime + fieldEntity->timeShiftE; result[0] = fx(coords.x, coords.y, coords.z, time); result[1] = fy(coords.x, coords.y, coords.z, time); result[2] = fz(coords.x, coords.y, coords.z, time); } else { result = fieldEntity->getE(coords); } return result; } FP3 getB(const FP3& coords) const { pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = static_cast<const TDerived*>(this)->getFieldEntity(); FP3 result; if (isAnalytical) { FP(*fx)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))fBt[0]; FP(*fy)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))fBt[1]; FP(*fz)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))fBt[2]; FP time = fieldEntity->globalTime + fieldEntity->timeShiftB; result[0] = fx(coords.x, coords.y, coords.z, time); result[1] = fy(coords.x, coords.y, coords.z, time); result[2] = fz(coords.x, coords.y, coords.z, time); } else { result = fieldEntity->getB(coords); } return result; } FP3 getJ(const FP3& coords) const { return static_cast<const TDerived*>(this)->getFieldEntity()->getJ(coords); } void getFields(const FP3& coords, FP3& e, FP3& b) const { static_cast<const TDerived*>(this)->getFieldEntity()->getFields(coords, e, b); } private: int64_t fEt[3], fBt[3]; bool isAnalytical; }; template <class TGrid, class TFieldSolver, class TDerived, bool> class pyPoissonFieldSolverInterface {}; template <class TGrid, class TFieldSolver, class TDerived> class pyPoissonFieldSolverInterface<TGrid, TFieldSolver, TDerived, true> { public: void convertFieldsPoissonEquation() { static_cast<TDerived*>(this)->getFieldEntity()->convertFieldsPoissonEquation(); } }; template <class TGrid, class TFieldSolver, class TDerived> class pyPoissonFieldSolverInterface<TGrid, TFieldSolver, TDerived, false> { public: void convertFieldsPoissonEquation() { std::cout << "WARNING: the used field does not include the 'convertFieldsPoissonEquation' method" << std::endl; } }; template <class TGrid, class TFieldSolver, class TDerived, bool> class pyFieldGeneratorSolverInterface {}; template <class TGrid, class TFieldSolver, class TDerived> class pyFieldGeneratorSolverInterface<TGrid, TFieldSolver, TDerived, true> { public: void setFieldGenerator(FieldGenerator<TGrid::gridType>* generator) { static_cast<TDerived*>(this)->getFieldEntity()->setFieldGenerator(generator); } }; template <class TGrid, class TFieldSolver, class TDerived> class pyFieldGeneratorSolverInterface<TGrid, TFieldSolver, TDerived, false> { public: void setFieldGenerator(FieldGenerator<TGrid::gridType>* generator) { std::cout << "WARNING: the used field does not include the 'setFieldGenerator' method" << std::endl; } }; template<class TGrid, class TFieldSolver, class TDerived> class pyFieldSolverInterface : public pyPoissonFieldSolverInterface<TGrid, TFieldSolver, TDerived, std::is_same<TFieldSolver, PSATD>::value || std::is_same<TFieldSolver, PSATDPoisson>::value || std::is_same<TFieldSolver, PSATDTimeStraggered>::value || std::is_same<TFieldSolver, PSATDTimeStraggeredPoisson>::value>, public pyFieldGeneratorSolverInterface<TGrid, TFieldSolver, TDerived, std::is_same<TFieldSolver, FDTD>::value> { public: void setTime(FP time) { static_cast<TDerived*>(this)->getFieldEntity()->globalTime = time; } FP getTime() { return static_cast<TDerived*>(this)->getFieldEntity()->globalTime; } void setPML(int sizePMLx, int sizePMLy, int sizePMLz) { static_cast<TDerived*>(this)->getFieldEntity()->setPML(sizePMLx, sizePMLy, sizePMLz); } void changeTimeStep(double dt) { static_cast<TDerived*>(this)->getFieldEntity()->setTimeStep(dt); } void updateFields() { static_cast<TDerived*>(this)->getFieldEntity()->updateFields(); } void advance(FP dt) { pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = static_cast<TDerived*>(this)->getFieldEntity(); FP oldDt = fieldEntity->dt; fieldEntity->setTimeStep(dt); fieldEntity->updateFields(); fieldEntity->setTimeStep(oldDt); } }; template<class TGrid, class TFieldSolver, class TDerived> class pyFieldInterface: public pyFieldGridInterface<TGrid, TFieldSolver, TDerived>, public pyFieldSolverInterface<TGrid, TFieldSolver, TDerived> { public: using BaseGridInterface = pyFieldGridInterface<TGrid, TFieldSolver, TDerived>; using BaseSolverInterface = pyFieldSolverInterface<TGrid, TFieldSolver, TDerived>; TGrid* getGrid() const { return static_cast<TGrid*>(static_cast<const TDerived*>(this)->getFieldEntity()); } TFieldSolver* getFieldSolver() const { return static_cast<TFieldSolver*>(static_cast<const TDerived*>(this)->getFieldEntity()); } void refresh() { static_cast<TDerived*>(this)->getFieldEntity()->refresh(); } }; class pyFieldBase { public: virtual FP3 getE(const FP3& coords) const = 0; virtual FP3 getB(const FP3& coords) const = 0; virtual FP3 getJ(const FP3& coords) const = 0; void getFields(const FP3& coords, FP3& e, FP3& b) const { e = getE(coords); b = getB(coords); } virtual void updateFields() = 0; virtual void advance(FP dt) = 0; virtual std::shared_ptr<pyFieldBase> applyMapping( const std::shared_ptr<pyFieldBase>& self, const std::shared_ptr<Mapping>& mapping) const = 0; }; template<class TGrid, class TFieldSolver> class pyField : public pyFieldInterface<TGrid, TFieldSolver, pyField<TGrid, TFieldSolver>>, public pyFieldBase { using BaseInterface = pyFieldInterface<TGrid, TFieldSolver, pyField<TGrid, TFieldSolver>>; public: pyField(const Int3 & numInternalCells, const FP3 & minCoords, const FP3 & steps, FP dt) : fieldEntity(new pyFieldEntity<TGrid, TFieldSolver>(numInternalCells, minCoords, steps, dt)) {} pyField(const std::shared_ptr<pyField<TGrid, TFieldSolver>>& other, const std::shared_ptr<Mapping>& mapping) : pyWrappedField(other), mapping(mapping) {} inline pyFieldEntity<TGrid, TFieldSolver>* getFieldEntity() const { if (fieldEntity) return fieldEntity.get(); return pyWrappedField->getFieldEntity(); } inline FP3 convertCoords(const FP3& coords, FP timeShift = 0.0) const { bool status = true; return getDirectCoords(coords, getFieldEntity()->globalTime + timeShift, &status); } std::shared_ptr<pyFieldBase> applyMapping( const std::shared_ptr<pyFieldBase>& self, const std::shared_ptr<Mapping>& mapping) const override { return std::static_pointer_cast<pyFieldBase>( std::make_shared<pyField<TGrid, TFieldSolver>>( std::static_pointer_cast<pyField<TGrid, TFieldSolver>>(self), mapping ) ); } inline FP3 getE(const FP3& coords) const override { bool status = true; FP time = getFieldEntity()->globalTime + getFieldEntity()->timeShiftE; FP3 inverseCoords = getInverseCoords(coords, time, &status); if (!status) return FP3(0, 0, 0); return BaseInterface::getE(inverseCoords); } inline FP3 getB(const FP3& coords) const override { bool status = true; FP time = getFieldEntity()->globalTime + getFieldEntity()->timeShiftB; FP3 inverseCoords = getInverseCoords(coords, time, &status); if (!status) return FP3(0, 0, 0); return BaseInterface::getB(inverseCoords); } FP3 getJ(const FP3& coords) const override { bool status = true; FP time = getFieldEntity()->globalTime + getFieldEntity()->timeShiftJ; FP3 inverseCoords = getInverseCoords(coords, time, &status); if (!status) return FP3(0, 0, 0); return BaseInterface::getJ(inverseCoords); } void updateFields() override { return BaseInterface::updateFields(); } void advance(FP dt) override { return BaseInterface::advance(dt); } protected: inline FP3 getDirectCoords(const FP3& coords, FP time, bool* status) const { FP3 coords_ = coords; *status = true; if (pyWrappedField) coords_ = pyWrappedField->getDirectCoords(coords_, time, status); bool status2 = true; if (mapping) coords_ = mapping->getDirectCoords(coords_, time, &status2); *status = *status && status2; return coords_; } inline FP3 getInverseCoords(const FP3& coords, FP time, bool* status) const { FP3 coords_ = coords; *status = true; if (pyWrappedField) coords_ = pyWrappedField->getInverseCoords(coords_, time, status); bool status2 = true; if (mapping) coords_ = mapping->getInverseCoords(coords_, time, &status2); *status = *status && status2; return coords_; } private: // the simple grid state // if fieldEntity!=0 then pyField is a memory owner std::unique_ptr<pyFieldEntity<TGrid, TFieldSolver>> fieldEntity; // the mapping grid state // if pyWrappedField!=0 then pyField is a wrapper std::shared_ptr<pyField<TGrid, TFieldSolver>> pyWrappedField; std::shared_ptr<Mapping> mapping; }; typedef pyField<YeeGrid, FDTD> pyYeeField; typedef pyField<PSTDGrid, PSTD> pyPSTDField; typedef pyField<PSATDGrid, PSATD> pyPSATDField; typedef pyField<PSATDGrid, PSATDPoisson> pyPSATDPoissonField; typedef pyField<PSATDTimeStraggeredGrid, PSATDTimeStraggered> pyPSATDTimeStraggeredField; typedef pyField<PSATDTimeStraggeredGrid, PSATDTimeStraggeredPoisson> pyPSATDTimeStraggeredPoissonField; class pySumField : public pyFieldBase { public: pySumField(const std::shared_ptr<pyFieldBase>& pyWrappedField1, const std::shared_ptr<pyFieldBase>& pyWrappedField2) : pyWrappedField1(pyWrappedField1), pyWrappedField2(pyWrappedField2) {} pySumField(const std::shared_ptr<pySumField>& other, const std::shared_ptr<Mapping>& mapping) : pyWrappedField1(other->pyWrappedField1->applyMapping(other->pyWrappedField1, mapping)), pyWrappedField2(other->pyWrappedField2->applyMapping(other->pyWrappedField2, mapping)) {} std::shared_ptr<pyFieldBase> applyMapping( const std::shared_ptr<pyFieldBase>& self, const std::shared_ptr<Mapping>& mapping) const override { return std::static_pointer_cast<pyFieldBase>( std::make_shared<pySumField>( std::static_pointer_cast<pySumField>(self), mapping ) ); } FP3 getE(const FP3& coords) const override { return pyWrappedField1->getE(coords) + pyWrappedField2->getE(coords); } FP3 getB(const FP3& coords) const override { return pyWrappedField1->getB(coords) + pyWrappedField2->getB(coords); } FP3 getJ(const FP3& coords) const override { return pyWrappedField1->getJ(coords) + pyWrappedField2->getJ(coords); } void updateFields() override { pyWrappedField1->updateFields(); pyWrappedField2->updateFields(); } void advance(FP dt) override { pyWrappedField1->advance(dt); pyWrappedField2->advance(dt); } private: std::shared_ptr<pyFieldBase> pyWrappedField1; std::shared_ptr<pyFieldBase> pyWrappedField2; }; class pyMulField : public pyFieldBase { public: pyMulField(const std::shared_ptr<pyFieldBase>& pyWrappedField, FP factor) : pyWrappedField(pyWrappedField), factor(factor) {} pyMulField(const std::shared_ptr<pyMulField>& other, const std::shared_ptr<Mapping>& mapping) : pyWrappedField(other->pyWrappedField->applyMapping(other->pyWrappedField, mapping)) {} std::shared_ptr<pyFieldBase> applyMapping( const std::shared_ptr<pyFieldBase>& self, const std::shared_ptr<Mapping>& mapping) const override { return std::static_pointer_cast<pyFieldBase>( std::make_shared<pyMulField>( std::static_pointer_cast<pyMulField>(self), mapping ) ); } FP3 getE(const FP3& coords) const override { return pyWrappedField->getE(coords) * factor; } FP3 getB(const FP3& coords) const override { return pyWrappedField->getB(coords) * factor; } FP3 getJ(const FP3& coords) const override { return pyWrappedField->getJ(coords) * factor; } void updateFields() override { pyWrappedField->updateFields(); } void advance(FP dt) override { pyWrappedField->advance(dt); } private: FP factor = 1.0; std::shared_ptr<pyFieldBase> pyWrappedField; }; }
compare.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP AAA RRRR EEEEE % % C O O MM MM P P A A R R E % % C O O M M M PPPP AAAAA RRRR EEE % % C O O M M P A A R R E % % CCCC OOO M M P A A R R EEEEE % % % % % % MagickCore Image Comparison Methods % % % % Software Design % % John Cristy % % December 2003 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/compare.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/statistic.h" #include "magick/transform.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p a r e I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompareImageChannels() compares one or more image channels of an image % to a reconstructed image and returns the difference image. % % The format of the CompareImageChannels method is: % % Image *CompareImageChannels(const Image *image, % const Image *reconstruct_image,const ChannelType channel, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o channel: the channel. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image, const MetricType metric,double *distortion,ExceptionInfo *exception) { Image *highlight_image; highlight_image=CompareImageChannels(image,reconstruct_image,AllChannels, metric,distortion,exception); return(highlight_image); } MagickExport Image *CompareImageChannels(Image *image, const Image *reconstruct_image,const ChannelType channel, const MetricType metric,double *distortion,ExceptionInfo *exception) { CacheView *highlight_view, *image_view, *reconstruct_view; const char *artifact; Image *difference_image, *highlight_image; ssize_t y; MagickBooleanType status; MagickPixelPacket highlight, lowlight, zero; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) ThrowImageException(ImageError,"ImageSizeDiffers"); status=GetImageChannelDistortion(image,reconstruct_image,channel,metric, distortion,exception); if (status == MagickFalse) return((Image *) NULL); difference_image=CloneImage(image,0,0,MagickTrue,exception); if (difference_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel); highlight_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (highlight_image == (Image *) NULL) { difference_image=DestroyImage(difference_image); return((Image *) NULL); } if (SetImageStorageClass(highlight_image,DirectClass) == MagickFalse) { InheritException(exception,&highlight_image->exception); difference_image=DestroyImage(difference_image); highlight_image=DestroyImage(highlight_image); return((Image *) NULL); } (void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel); (void) QueryMagickColor("#f1001ecc",&highlight,exception); artifact=GetImageArtifact(image,"highlight-color"); if (artifact != (const char *) NULL) (void) QueryMagickColor(artifact,&highlight,exception); (void) QueryMagickColor("#ffffffcc",&lowlight,exception); artifact=GetImageArtifact(image,"lowlight-color"); if (artifact != (const char *) NULL) (void) QueryMagickColor(artifact,&lowlight,exception); if (highlight_image->colorspace == CMYKColorspace) { ConvertRGBToCMYK(&highlight); ConvertRGBToCMYK(&lowlight); } /* Generate difference image. */ status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); highlight_view=AcquireCacheView(highlight_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel, reconstruct_pixel; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register IndexPacket *restrict highlight_indexes; register ssize_t x; register PixelPacket *restrict r; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(highlight_view,0,y,highlight_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); highlight_indexes=GetCacheViewAuthenticIndexQueue(highlight_view); pixel=zero; reconstruct_pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { MagickStatusType difference; SetMagickPixelPacket(image,p,indexes+x,&pixel); SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x, &reconstruct_pixel); difference=MagickFalse; if (channel == AllChannels) { if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse) difference=MagickTrue; } else { if (((channel & RedChannel) != 0) && (p->red != q->red)) difference=MagickTrue; if (((channel & GreenChannel) != 0) && (p->green != q->green)) difference=MagickTrue; if (((channel & BlueChannel) != 0) && (p->blue != q->blue)) difference=MagickTrue; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse) && (p->opacity != q->opacity)) difference=MagickTrue; if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) && (indexes[x] != reconstruct_indexes[x])) difference=MagickTrue; } if (difference != MagickFalse) SetPixelPacket(highlight_image,&highlight,r,highlight_indexes+x); else SetPixelPacket(highlight_image,&lowlight,r,highlight_indexes+x); p++; q++; r++; } sync=SyncCacheViewAuthenticPixels(highlight_view,exception); if (sync == MagickFalse) status=MagickFalse; } highlight_view=DestroyCacheView(highlight_view); reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); (void) CompositeImage(difference_image,image->compose,highlight_image,0,0); highlight_image=DestroyImage(highlight_image); if (status == MagickFalse) difference_image=DestroyImage(difference_image); return(difference_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D i s t o r t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDistortion() compares one or more image channels of an image % to a reconstructed image and returns the specified distortion metric. % % The format of the CompareImageChannels method is: % % MagickBooleanType GetImageChannelDistortion(const Image *image, % const Image *reconstruct_image,const ChannelType channel, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o channel: the channel. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDistortion(Image *image, const Image *reconstruct_image,const MetricType metric,double *distortion, ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelDistortion(image,reconstruct_image,AllChannels, metric,distortion,exception); return(status); } static MagickBooleanType GetAbsoluteDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel,double *distortion, ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; MagickPixelPacket zero; ssize_t y; /* Compute the absolute difference in pixels between two images. */ status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[AllChannels+1]; MagickPixelPacket pixel, reconstruct_pixel; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); pixel=zero; reconstruct_pixel=pixel; (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x, &reconstruct_pixel); if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse) { if ((channel & RedChannel) != 0) channel_distortion[RedChannel]++; if ((channel & GreenChannel) != 0) channel_distortion[GreenChannel]++; if ((channel & BlueChannel) != 0) channel_distortion[BlueChannel]++; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) channel_distortion[OpacityChannel]++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) channel_distortion[BlackChannel]++; channel_distortion[AllChannels]++; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetAbsoluteError) #endif for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static size_t GetNumberChannels(const Image *image, const ChannelType channel) { size_t channels; channels=0; if ((channel & RedChannel) != 0) channels++; if ((channel & GreenChannel) != 0) channels++; if ((channel & BlueChannel) != 0) channels++; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) channels++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) channels++; return(channels); } static MagickBooleanType GetFuzzDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t i; ssize_t y; status=MagickTrue; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[AllChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & RedChannel) != 0) { distance=QuantumScale*(p->red-(MagickRealType) q->red); channel_distortion[RedChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*(p->green-(MagickRealType) q->green); channel_distortion[GreenChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*(p->blue-(MagickRealType) q->blue); channel_distortion[BlueChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) || (reconstruct_image->matte != MagickFalse))) { distance=QuantumScale*((image->matte != MagickFalse ? p->opacity : OpaqueOpacity)-(reconstruct_image->matte != MagickFalse ? q->opacity : OpaqueOpacity)); channel_distortion[OpacityChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=QuantumScale*(indexes[x]-(MagickRealType) reconstruct_indexes[x]); channel_distortion[BlackChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]/=((double) image->columns*image->rows); if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) || (reconstruct_image->matte != MagickFalse))) distortion[AllChannels]/=(double) (GetNumberChannels(image,channel)-1); else distortion[AllChannels]/=(double) GetNumberChannels(image,channel); distortion[AllChannels]=sqrt(distortion[AllChannels]); return(status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t i; ssize_t y; status=MagickTrue; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[AllChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y, reconstruct_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & RedChannel) != 0) { distance=QuantumScale*fabs(p->red-(double) q->red); channel_distortion[RedChannel]+=distance; channel_distortion[AllChannels]+=distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*fabs(p->green-(double) q->green); channel_distortion[GreenChannel]+=distance; channel_distortion[AllChannels]+=distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*fabs(p->blue-(double) q->blue); channel_distortion[BlueChannel]+=distance; channel_distortion[AllChannels]+=distance; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=QuantumScale*fabs(p->opacity-(double) q->opacity); channel_distortion[OpacityChannel]+=distance; channel_distortion[AllChannels]+=distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { distance=QuantumScale*fabs(indexes[x]-(double) reconstruct_indexes[x]); channel_distortion[BlackChannel]+=distance; channel_distortion[AllChannels]+=distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanAbsoluteError) #endif for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]/=((double) image->columns*image->rows); distortion[AllChannels]/=(double) GetNumberChannels(image,channel); return(status); } static MagickBooleanType GetMeanErrorPerPixel(Image *image, const Image *reconstruct_image,const ChannelType channel,double *distortion, ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; MagickRealType alpha, area, beta, maximum_error, mean_error; ssize_t y; status=MagickTrue; alpha=1.0; beta=1.0; area=0.0; maximum_error=0.0; mean_error=0.0; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & OpacityChannel) != 0) { if (image->matte != MagickFalse) alpha=(MagickRealType) (QuantumScale*(GetAlphaPixelComponent(p))); if (reconstruct_image->matte != MagickFalse) beta=(MagickRealType) (QuantumScale*GetAlphaPixelComponent(q)); } if ((channel & RedChannel) != 0) { distance=fabs(alpha*p->red-beta*q->red); distortion[RedChannel]+=distance; distortion[AllChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if ((channel & GreenChannel) != 0) { distance=fabs(alpha*p->green-beta*q->green); distortion[GreenChannel]+=distance; distortion[AllChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if ((channel & BlueChannel) != 0) { distance=fabs(alpha*p->blue-beta*q->blue); distortion[BlueChannel]+=distance; distortion[AllChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=fabs((double) p->opacity-q->opacity); distortion[OpacityChannel]+=distance; distortion[AllChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=fabs(alpha*indexes[x]-beta*reconstruct_indexes[x]); distortion[BlackChannel]+=distance; distortion[AllChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p++; q++; } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=distortion[AllChannels]/area; image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(status); } static MagickBooleanType GetMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t i; ssize_t y; status=MagickTrue; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[AllChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y, reconstruct_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & RedChannel) != 0) { distance=QuantumScale*(p->red-(MagickRealType) q->red); channel_distortion[RedChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*(p->green-(MagickRealType) q->green); channel_distortion[GreenChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*(p->blue-(MagickRealType) q->blue); channel_distortion[BlueChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=QuantumScale*(p->opacity-(MagickRealType) q->opacity); channel_distortion[OpacityChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=QuantumScale*(indexes[x]-(MagickRealType) reconstruct_indexes[x]); channel_distortion[BlackChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]/=((double) image->columns*image->rows); distortion[AllChannels]/=(double) GetNumberChannels(image,channel); return(status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image *image,const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics, *reconstruct_statistics; MagickBooleanType status; MagickOffsetType progress; MagickRealType area; register ssize_t i; ssize_t y; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageChannelStatistics(image,exception); reconstruct_statistics=GetImageChannelStatistics(reconstruct_image,exception); status=MagickTrue; progress=0; for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]=0.0; area=1.0/((MagickRealType) image->columns*image->rows); image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) distortion[RedChannel]+=area*QuantumScale*(p->red- image_statistics[RedChannel].mean)*(q->red- reconstruct_statistics[RedChannel].mean); if ((channel & GreenChannel) != 0) distortion[GreenChannel]+=area*QuantumScale*(p->green- image_statistics[GreenChannel].mean)*(q->green- reconstruct_statistics[GreenChannel].mean); if ((channel & BlueChannel) != 0) distortion[BlueChannel]+=area*QuantumScale*(p->blue- image_statistics[BlueChannel].mean)*(q->blue- reconstruct_statistics[BlueChannel].mean); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[OpacityChannel]+=area*QuantumScale*(p->opacity- image_statistics[OpacityChannel].mean)*(q->opacity- reconstruct_statistics[OpacityChannel].mean); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) distortion[BlackChannel]+=area*QuantumScale*(indexes[x]- image_statistics[OpacityChannel].mean)*(reconstruct_indexes[x]- reconstruct_statistics[OpacityChannel].mean); p++; q++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SimilarityImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ for (i=0; i < (ssize_t) AllChannels; i++) { MagickRealType gamma; gamma=image_statistics[i].standard_deviation* reconstruct_statistics[i].standard_deviation; gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); distortion[i]=QuantumRange*gamma*distortion[i]; } distortion[AllChannels]=0.0; if ((channel & RedChannel) != 0) distortion[AllChannels]+=distortion[RedChannel]*distortion[RedChannel]; if ((channel & GreenChannel) != 0) distortion[AllChannels]+=distortion[GreenChannel]*distortion[GreenChannel]; if ((channel & BlueChannel) != 0) distortion[AllChannels]+=distortion[BlueChannel]*distortion[BlueChannel]; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[AllChannels]+=distortion[OpacityChannel]* distortion[OpacityChannel]; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) distortion[AllChannels]+=distortion[BlackChannel]*distortion[BlackChannel]; distortion[AllChannels]=sqrt(distortion[AllChannels]/GetNumberChannels(image, channel)); /* Free resources. */ reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; ssize_t y; status=MagickTrue; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[AllChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y, reconstruct_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & RedChannel) != 0) { distance=QuantumScale*fabs(p->red-(double) q->red); if (distance > channel_distortion[RedChannel]) channel_distortion[RedChannel]=distance; if (distance > channel_distortion[AllChannels]) channel_distortion[AllChannels]=distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*fabs(p->green-(double) q->green); if (distance > channel_distortion[GreenChannel]) channel_distortion[GreenChannel]=distance; if (distance > channel_distortion[AllChannels]) channel_distortion[AllChannels]=distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*fabs(p->blue-(double) q->blue); if (distance > channel_distortion[BlueChannel]) channel_distortion[BlueChannel]=distance; if (distance > channel_distortion[AllChannels]) channel_distortion[AllChannels]=distance; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=QuantumScale*fabs(p->opacity-(double) q->opacity); if (distance > channel_distortion[OpacityChannel]) channel_distortion[OpacityChannel]=distance; if (distance > channel_distortion[AllChannels]) channel_distortion[AllChannels]=distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=QuantumScale*fabs(indexes[x]-(double) reconstruct_indexes[x]); if (distance > channel_distortion[BlackChannel]) channel_distortion[BlackChannel]=distance; if (distance > channel_distortion[AllChannels]) channel_distortion[AllChannels]=distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPeakAbsoluteError) #endif for (i=0; i <= (ssize_t) AllChannels; i++) if (channel_distortion[i] > distortion[i]) distortion[i]=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { MagickBooleanType status; status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion, exception); if ((channel & RedChannel) != 0) distortion[RedChannel]=20.0*log10((double) 1.0/sqrt( distortion[RedChannel])); if ((channel & GreenChannel) != 0) distortion[GreenChannel]=20.0*log10((double) 1.0/sqrt( distortion[GreenChannel])); if ((channel & BlueChannel) != 0) distortion[BlueChannel]=20.0*log10((double) 1.0/sqrt( distortion[BlueChannel])); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[OpacityChannel]=20.0*log10((double) 1.0/sqrt( distortion[OpacityChannel])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) distortion[BlackChannel]=20.0*log10((double) 1.0/sqrt( distortion[BlackChannel])); distortion[AllChannels]=20.0*log10((double) 1.0/sqrt( distortion[AllChannels])); return(status); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { MagickBooleanType status; status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion, exception); if ((channel & RedChannel) != 0) distortion[RedChannel]=sqrt(distortion[RedChannel]); if ((channel & GreenChannel) != 0) distortion[GreenChannel]=sqrt(distortion[GreenChannel]); if ((channel & BlueChannel) != 0) distortion[BlueChannel]=sqrt(distortion[BlueChannel]); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[OpacityChannel]=sqrt(distortion[OpacityChannel]); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) distortion[BlackChannel]=sqrt(distortion[BlackChannel]); distortion[AllChannels]=sqrt(distortion[AllChannels]); return(status); } MagickExport MagickBooleanType GetImageChannelDistortion(Image *image, const Image *reconstruct_image,const ChannelType channel, const MetricType metric,double *distortion,ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); /* Get image distortion. */ length=AllChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_distortion,0,length* sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case MeanErrorPerPixelMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel, channel_distortion,exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel,channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case PeakSignalToNoiseRatioMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image,channel, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } } *distortion=channel_distortion[AllChannels]; channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D i s t o r t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDistrortion() compares the image channels of an image to a % reconstructed image and returns the specified distortion metric for each % channel. % % The format of the CompareImageChannels method is: % % double *GetImageChannelDistortions(const Image *image, % const Image *reconstruct_image,const MetricType metric, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o exception: return any errors or warnings in this structure. % */ MagickExport double *GetImageChannelDistortions(Image *image, const Image *reconstruct_image,const MetricType metric, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageError,"ImageSizeDiffers","`%s'",image->filename); return((double *) NULL); } /* Get image distortion. */ length=AllChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_distortion,0,length* sizeof(*channel_distortion)); status=MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } case MeanErrorPerPixelMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, AllChannels,channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } case PeakSignalToNoiseRatioMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } } if (status == MagickFalse) { channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return((double *) NULL); } return(channel_distortion); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e s E q u a l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImagesEqual() measures the difference between colors at each pixel % location of two images. A value other than 0 means the colors match % exactly. Otherwise an error measure is computed by summing over all % pixels in an image the distance squared in RGB space between each image % pixel and its corresponding pixel in the reconstruct image. The error % measure is assigned to these image members: % % o mean_error_per_pixel: The mean error for any single pixel in % the image. % % o normalized_mean_error: The normalized mean quantization error for % any single pixel in the image. This distance measure is normalized to % a range between 0 and 1. It is independent of the range of red, green, % and blue values in the image. % % o normalized_maximum_error: The normalized maximum quantization % error for any single pixel in the image. This distance measure is % normalized to a range between 0 and 1. It is independent of the range % of red, green, and blue values in your image. % % A small normalized mean square error, accessed as % image->normalized_mean_error, suggests the images are very similar in % spatial layout and color. % % The format of the IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(Image *image, % const Image *reconstruct_image) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % */ MagickExport MagickBooleanType IsImagesEqual(Image *image, const Image *reconstruct_image) { CacheView *image_view, *reconstruct_view; ExceptionInfo *exception; MagickBooleanType status; MagickRealType area, maximum_error, mean_error, mean_error_per_pixel; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); area=0.0; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; exception=(&image->exception); image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; distance=fabs(p->red-(double) q->red); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; distance=fabs(p->green-(double) q->green); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; distance=fabs(p->blue-(double) q->blue); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; if (image->matte != MagickFalse) { distance=fabs(p->opacity-(double) q->opacity); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if ((image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=fabs(indexes[x]-(double) reconstruct_indexes[x]); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p++; q++; } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area); image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale* mean_error/area); image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error); status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i m i l a r i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SimilarityImage() compares the reference image of the image and returns the % best match offset. In addition, it returns a similarity image such that an % exact match location is completely white and if none of the pixels match, % black, otherwise some gray level in-between. % % The format of the SimilarityImageImage method is: % % Image *SimilarityImage(const Image *image,const Image *reference, % RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o the best match offset of the reference image within the image. % % o similarity: the computed similarity between the images. % % o exception: return any errors or warnings in this structure. % */ static double GetNCCDistortion(const Image *image, const Image *reconstruct_image, const ChannelStatistics *reconstruct_statistics,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics; double distortion; MagickBooleanType status; MagickRealType area, gamma; ssize_t y; unsigned long number_channels; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageChannelStatistics(image,exception); status=MagickTrue; distortion=0.0; area=1.0/((MagickRealType) image->columns*image->rows); image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); for (x=0; x < (ssize_t) image->columns; x++) { distortion+=area*QuantumScale*(p->red- image_statistics[RedChannel].mean)*(q->red- reconstruct_statistics[RedChannel].mean); distortion+=area*QuantumScale*(p->green- image_statistics[GreenChannel].mean)*(q->green- reconstruct_statistics[GreenChannel].mean); distortion+=area*QuantumScale*(p->blue- image_statistics[BlueChannel].mean)*(q->blue- reconstruct_statistics[BlueChannel].mean); if (image->matte != MagickFalse) distortion+=area*QuantumScale*(p->opacity- image_statistics[OpacityChannel].mean)*(q->opacity- reconstruct_statistics[OpacityChannel].mean); if ((image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) distortion+=area*QuantumScale*(indexes[x]- image_statistics[OpacityChannel].mean)*(reconstruct_indexes[x]- reconstruct_statistics[OpacityChannel].mean); p++; q++; } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ gamma=image_statistics[AllChannels].standard_deviation* reconstruct_statistics[AllChannels].standard_deviation; gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); distortion=QuantumRange*gamma*distortion; number_channels=3; if (image->matte != MagickFalse) number_channels++; if (image->colorspace == CMYKColorspace) number_channels++; distortion=sqrt(distortion/number_channels); /* Free resources. */ image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(1.0-distortion); } static double GetSimilarityMetric(const Image *image,const Image *reference, const ChannelStatistics *reference_statistics,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { double distortion; Image *similarity_image; RectangleInfo geometry; SetGeometry(reference,&geometry); geometry.x=x_offset; geometry.y=y_offset; similarity_image=CropImage(image,&geometry,exception); if (similarity_image == (Image *) NULL) return(0.0); distortion=GetNCCDistortion(reference,similarity_image,reference_statistics, exception); similarity_image=DestroyImage(similarity_image); return(distortion); } MagickExport Image *SimilarityImage(Image *image,const Image *reference, RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *similarity_view; ChannelStatistics *reference_statistics; Image *similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference,offset); *similarity_metric=1.0; if ((reference->columns > image->columns) || (reference->rows > image->rows)) ThrowImageException(ImageError,"ImageSizeDiffers"); similarity_image=CloneImage(image,image->columns-reference->columns+1, image->rows-reference->rows+1,MagickTrue,exception); if (similarity_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(similarity_image,DirectClass) == MagickFalse) { InheritException(exception,&similarity_image->exception); similarity_image=DestroyImage(similarity_image); return((Image *) NULL); } /* Measure similarity of reference image against image. */ status=MagickTrue; progress=0; reference_statistics=GetImageChannelStatistics(reference,exception); similarity_view=AcquireCacheView(similarity_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++) { double similarity; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns, 1,exception); if (q == (const PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++) { similarity=GetSimilarityMetric(image,reference,reference_statistics,x,y, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif if (similarity < *similarity_metric) { *similarity_metric=similarity; offset->x=x; offset->y=y; } q->red=ClampToQuantum(QuantumRange-QuantumRange*similarity); q->green=q->red; q->blue=q->red; q++; } if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif proceed=SetImageProgress(image,SimilarityImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } similarity_view=DestroyCacheView(similarity_view); reference_statistics=(ChannelStatistics *) RelinquishMagickMemory( reference_statistics); return(similarity_image); }
Example_target_struct_map.2.c
/* * @@name: target_struct_map.2c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success * @@version: omp_5.0 */ #include <stdio.h> #include <stdlib.h> #define N 100 #pragma omp declare target int a; #pragma omp end declare target int main(){ int i; int *p; #pragma omp target data map(p) { p = (int *)malloc(sizeof(int)*N); for (i=0; i<N; i++) p[i] = i; a = 1; #pragma omp target map(p[:N]) // var a -- effective map(alloc:a) { // here and is persistent a=2; for (i=0; i<N; i++) p[i] *= a; a=3; } printf("a=%d, p[1],p[N-1]: %d %d\n", a, p[1], p[N-1]); // 1 2 198 #pragma omp target map(p[:N]) // a is persistent for (i=0; i<N; i++) p[i] += a; printf("a=%d, p[1],p[N-1]: %d %d\n", a, p[1], p[N-1]); // 1 5 201 } return 0; }
idaFoodWeb_kry_omp.c
/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example program for IDA: Food web problem, OpenMP, GMRES, * user-supplied preconditioner * * This example program uses SUNLinSol_SPGMR as the linear * solver, and IDACalcIC for initial condition calculation. * * The mathematical problem solved in this example is a DAE system * that arises from a system of partial differential equations after * spatial discretization. The PDE system is a food web population * model, with predator-prey interaction and diffusion on the unit * square in two dimensions. The dependent variable vector is: * * 1 2 ns * c = (c , c , ..., c ) , ns = 2 * np * * and the PDE's are as follows: * * i i i * dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np) * xx yy i * * i i * 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns) * xx yy i * * where the reaction terms R are: * * i ns j * R (x,y,c) = c * (b(i) + sum a(i,j)*c ) * i j=1 * * The number of species is ns = 2 * np, with the first np being * prey and the last np being predators. The coefficients a(i,j), * b(i), d(i) are: * * a(i,i) = -AA (all i) * a(i,j) = -GG (i <= np , j > np) * a(i,j) = EE (i > np, j <= np) * all other a(i,j) = 0 * b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np) * b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np) * d(i) = DPREY (i <= np) * d(i) = DPRED (i > np) * * The various scalar parameters required are set using '#define' * statements or directly in routine InitUserData. In this program, * np = 1, ns = 2. The boundary conditions are homogeneous Neumann: * normal derivative = 0. * * A polynomial in x and y is used to set the initial values of the * first np variables (the prey variables) at each x,y location, * while initial values for the remaining (predator) variables are * set to a flat value, which is corrected by IDACalcIC. * * The PDEs are discretized by central differencing on a MX by MY * mesh. * * The DAE system is solved by IDA using the SUNLinSol_SPGMR linear solver. * Output is printed at t = 0, .001, .01, .1, .4, .7, 1. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value for the number of threads from * the OMP_NUM_THREADS environment value: * % ./idaFoodWeb_kry_omp * To specify the number of threads at the command line, use * % ./idaFoodWeb_kry_omp num_threads * where num_threads is the desired number of threads. * * ----------------------------------------------------------------- * References: * [1] Peter N. Brown and Alan C. Hindmarsh, * Reduced Storage Matrix Methods in Stiff ODE systems, Journal * of Applied Mathematics and Computation, Vol. 31 (May 1989), * pp. 40-91. * * [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Using Krylov Methods in the Solution of Large-Scale * Differential-Algebraic Systems, SIAM J. Sci. Comput., 15 * (1994), pp. 1467-1488. * * [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Consistent Initial Condition Calculation for Differential- * Algebraic Systems, SIAM J. Sci. Comput., 19 (1998), * pp. 1495-1512. * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ida/ida.h> #include <sunlinsol/sunlinsol_spgmr.h> #include <nvector/nvector_openmp.h> #include <sundials/sundials_dense.h> #include <sundials/sundials_types.h> #ifdef _OPENMP #include <omp.h> #endif /* helpful macros */ #ifndef MAX #define MAX(A, B) ((A) > (B) ? (A) : (B)) #endif /* Problem Constants. */ #define NPREY 1 /* No. of prey (= no. of predators). */ #define NUM_SPECIES 2*NPREY #define PI RCONST(3.1415926535898) #define FOURPI (RCONST(4.0)*PI) #define MX 20 /* MX = number of x mesh points */ #define MY 20 /* MY = number of y mesh points */ #define NSMX (NUM_SPECIES * MX) #define NEQ (NUM_SPECIES*MX*MY) #define AA RCONST(1.0) /* Coefficient in above eqns. for a */ #define EE RCONST(10000.) /* Coefficient in above eqns. for a */ #define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */ #define BB RCONST(1.0) /* Coefficient in above eqns. for b */ #define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */ #define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */ #define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */ #define BETA RCONST(1000.) /* Coefficient beta in above eqns. */ #define AX RCONST(1.0) /* Total range of x variable */ #define AY RCONST(1.0) /* Total range of y variable */ #define RTOL RCONST(1.e-5) /* Relative tolerance */ #define ATOL RCONST(1.e-5) /* Absolute tolerance */ #define NOUT 6 /* Number of output times */ #define TMULT RCONST(10.0) /* Multiplier for tout values */ #define TADD RCONST(0.3) /* Increment for tout values */ #define ZERO RCONST(0.) #define ONE RCONST(1.0) /* * User-defined vector and accessor macro: IJ_Vptr. * IJ_Vptr is defined in order to express the underlying 3-D structure of * the dependent variable vector from its underlying 1-D storage (an N_Vector). * IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to * species index is = 0, x-index ix = i, and y-index jy = j. */ #define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX)) /* Type: UserData. Contains problem constants, etc. */ typedef struct { sunindextype Neq, ns, np, mx, my; realtype dx, dy, **acoef; realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES]; realtype **PP[MX][MY]; sunindextype *pivot[MX][MY]; N_Vector rates; N_Vector ewt; void *ida_mem; int nthreads; } *UserData; /* Prototypes for functions called by the IDA Solver. */ static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval, void *user_data); static int Precond(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, realtype cj, void *user_data); static int PSolve(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, N_Vector rvec, N_Vector zvec, realtype cj, realtype delta, void *user_data); /* Prototypes for private Helper Functions. */ static void InitUserData(UserData webdata); static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata); static void PrintHeader(int maxl, realtype rtol, realtype atol); static void PrintOutput(void *ida_mem, N_Vector c, realtype t); static void PrintFinalStats(void *ida_mem); static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata); static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata); static realtype dotprod(sunindextype size, realtype *x1, realtype *x2); static int check_retval(void *returnvalue, char *funcname, int opt); /* *-------------------------------------------------------------------- * MAIN PROGRAM *-------------------------------------------------------------------- */ int main(int argc, char *argv[]) { void *ida_mem; SUNLinearSolver LS; UserData webdata; N_Vector cc, cp, id; int iout, jx, jy, retval; int maxl; realtype rtol, atol, t0, tout, tret; int num_threads; SUNContext ctx; ida_mem = NULL; LS = NULL; webdata = NULL; cc = cp = id = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS */ #endif if (argc > 1) /* overwrithe with command line value, if supplied */ num_threads = (int) strtol(argv[1], NULL, 0); /* Create the SUNDIALS context object for this simulation */ retval = SUNContext_Create(NULL, &ctx); if (check_retval(&retval, "SUNContext_Create", 1)) return 1; /* Allocate and initialize user data block webdata. */ webdata = (UserData) malloc(sizeof *webdata); webdata->rates = N_VNew_OpenMP(NEQ, num_threads, ctx); webdata->acoef = SUNDlsMat_newDenseMat(NUM_SPECIES, NUM_SPECIES); webdata->ewt = N_VNew_OpenMP(NEQ, num_threads, ctx); for (jx = 0; jx < MX; jx++) { for (jy = 0; jy < MY; jy++) { (webdata->pivot)[jx][jy] = SUNDlsMat_newIndexArray(NUM_SPECIES); (webdata->PP)[jx][jy] = SUNDlsMat_newDenseMat(NUM_SPECIES, NUM_SPECIES); } } webdata->nthreads = num_threads; InitUserData(webdata); /* Allocate N-vectors and initialize cc, cp, and id. */ cc = N_VNew_OpenMP(NEQ, num_threads, ctx); if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1); cp = N_VNew_OpenMP(NEQ, num_threads, ctx); if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1); id = N_VNew_OpenMP(NEQ, num_threads, ctx); if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1); SetInitialProfiles(cc, cp, id, webdata); /* Set remaining inputs to IDAMalloc. */ t0 = ZERO; rtol = RTOL; atol = ATOL; /* Call IDACreate and IDAMalloc to initialize IDA. */ ida_mem = IDACreate(ctx); if(check_retval((void *)ida_mem, "IDACreate", 0)) return(1); retval = IDASetUserData(ida_mem, webdata); if(check_retval(&retval, "IDASetUserData", 1)) return(1); retval = IDASetId(ida_mem, id); if(check_retval(&retval, "IDASetId", 1)) return(1); retval = IDAInit(ida_mem, resweb, t0, cc, cp); if(check_retval(&retval, "IDAInit", 1)) return(1); retval = IDASStolerances(ida_mem, rtol, atol); if(check_retval(&retval, "IDASStolerances", 1)) return(1); webdata->ida_mem = ida_mem; /* Create SUNLinSol_SPGMR linear solver, attach to IDA, and set preconditioning routines. */ maxl = 16; /* max dimension of the Krylov subspace */ LS = SUNLinSol_SPGMR(cc, SUN_PREC_LEFT, maxl, ctx); /* IDA only allows left preconditioning */ if(check_retval((void *)LS, "SUNLinSol_SPGMR", 0)) return(1); retval = IDASetLinearSolver(ida_mem, LS, NULL); if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1); retval = IDASetPreconditioner(ida_mem, Precond, PSolve); if(check_retval(&retval, "IDASetPreconditioner", 1)) return(1); /* Call IDACalcIC (with default options) to correct the initial values. */ tout = RCONST(0.001); retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout); if(check_retval(&retval, "IDACalcIC", 1)) return(1); /* Print heading, basic parameters, and initial values. */ PrintHeader(maxl, rtol, atol); PrintOutput(ida_mem, cc, ZERO); /* Loop over iout, call IDASolve (normal mode), print selected output. */ for (iout = 1; iout <= NOUT; iout++) { retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL); if(check_retval(&retval, "IDASolve", 1)) return(retval); PrintOutput(ida_mem, cc, tret); if (iout < 3) tout *= TMULT; else tout += TADD; } /* Print final statistics and free memory. */ PrintFinalStats(ida_mem); printf("num_threads = %i\n\n", num_threads); /* Free memory */ IDAFree(&ida_mem); SUNLinSolFree(LS); N_VDestroy(cc); N_VDestroy(cp); N_VDestroy(id); SUNDlsMat_destroyMat(webdata->acoef); N_VDestroy(webdata->rates); N_VDestroy(webdata->ewt); for (jx = 0; jx < MX; jx++) { for (jy = 0; jy < MY; jy ++) { SUNDlsMat_destroyArray((webdata->pivot)[jx][jy]); SUNDlsMat_destroyMat((webdata->PP)[jx][jy]); } } free(webdata); SUNContext_Free(&ctx); return(0); } /* Define lines for readability in later routines */ #define acoef (webdata->acoef) #define bcoef (webdata->bcoef) #define cox (webdata->cox) #define coy (webdata->coy) /* *-------------------------------------------------------------------- * FUNCTIONS CALLED BY IDA *-------------------------------------------------------------------- */ /* * resweb: System residual function for predator-prey system. * This routine calls Fweb to get all the right-hand sides of the * equations, then loads the residual vector accordingly, * using cp in the case of prey species. */ static int resweb(realtype tt, N_Vector cc, N_Vector cp, N_Vector res, void *user_data) { sunindextype jx, jy, is, yloc, loc, np; realtype *resv, *cpv; UserData webdata; jx = jy = is = 0; webdata = (UserData)user_data; cpv = NV_DATA_OMP(cp); resv = NV_DATA_OMP(res); np = webdata->np; /* Call Fweb to set res to vector of right-hand sides. */ Fweb(tt, cc, res, webdata); /* Loop over all grid points, setting residual values appropriately for differential or algebraic components. */ #pragma omp parallel for default(shared) private(jy, jx, is, yloc, loc) schedule(static) num_threads(webdata->nthreads) for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) resv[loc+is] = cpv[loc+is] - resv[loc+is]; else resv[loc+is] = -resv[loc+is]; } } } return(0); } static int Precond(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, realtype cj, void *user_data) { int retval; sunindextype ret; realtype uround, xx, yy, del_x, del_y; realtype **Pxy, *ratesxy, *Pxycol, *cxy, *cpxy, *ewtxy, cctmp; realtype inc, fac, sqru, perturb_rates[NUM_SPECIES]; int is, js, jx, jy; void *ida_mem; N_Vector ewt; realtype hh; UserData webdata; webdata = (UserData) user_data; del_x = webdata->dx; del_y = webdata->dy; uround = UNIT_ROUNDOFF; sqru = sqrt(uround); ida_mem = webdata->ida_mem; ewt = webdata->ewt; retval = IDAGetErrWeights(ida_mem, ewt); if(check_retval(&retval, "IDAGetErrWeights", 1)) return(1); retval = IDAGetCurrentStep(ida_mem, &hh); if(check_retval(&retval, "IDAGetCurrentStep", 1)) return(1); for (jy = 0; jy < MY; jy++) { yy = jy * del_y; for (jx = 0; jx < MX; jx++) { xx = jx * del_x; Pxy = (webdata->PP)[jx][jy]; cxy = IJ_Vptr(cc, jx, jy); cpxy = IJ_Vptr(cp, jx, jy); ewtxy = IJ_Vptr(ewt, jx, jy); ratesxy = IJ_Vptr((webdata->rates), jx, jy); for (js = 0; js < NUM_SPECIES; js++) { inc = sqru*(MAX(fabs(cxy[js]), MAX(hh*fabs(cpxy[js]), ONE/ewtxy[js]))); cctmp = cxy[js]; cxy[js] += inc; fac = -ONE/inc; WebRates(xx, yy, cxy, perturb_rates, webdata); Pxycol = Pxy[js]; for (is = 0; is < NUM_SPECIES; is++) Pxycol[is] = (perturb_rates[is] - ratesxy[is])*fac; if (js < 1) Pxycol[js] += cj; cxy[js] = cctmp; } ret = SUNDlsMat_denseGETRF(Pxy, NUM_SPECIES, NUM_SPECIES, (webdata->pivot)[jx][jy]); if (ret != 0) return(1); } } return(0); } static int PSolve(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, N_Vector rvec, N_Vector zvec, realtype cj, realtype dalta, void *user_data) { realtype **Pxy, *zxy; sunindextype *pivot; sunindextype jx, jy; UserData webdata; jx = jy = 0; webdata = (UserData) user_data; N_VScale(ONE, rvec, zvec); #pragma omp parallel for collapse(2) default(shared) private(jx, jy, zxy, Pxy, pivot) schedule(static) num_threads(webdata->nthreads) for (jx = 0; jx < MX; jx++) { for (jy = 0; jy <MY; jy++) { zxy = IJ_Vptr(zvec, jx, jy); Pxy = (webdata->PP)[jx][jy]; pivot = (webdata->pivot)[jx][jy]; SUNDlsMat_denseGETRS(Pxy, NUM_SPECIES, pivot, zxy); } } return(0); } /* *-------------------------------------------------------------------- * PRIVATE FUNCTIONS *-------------------------------------------------------------------- */ /* * InitUserData: Load problem constants in webdata (of type UserData). */ static void InitUserData(UserData webdata) { sunindextype i, j, np; realtype *a1,*a2, *a3, *a4, dx2, dy2; webdata->mx = MX; webdata->my = MY; webdata->ns = NUM_SPECIES; webdata->np = NPREY; webdata->dx = AX/(MX-1); webdata->dy = AY/(MY-1); webdata->Neq= NEQ; /* Set up the coefficients a and b, and others found in the equations. */ np = webdata->np; dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy); for (i = 0; i < np; i++) { a1 = &(acoef[i][np]); a2 = &(acoef[i+np][0]); a3 = &(acoef[i][0]); a4 = &(acoef[i+np][np]); /* Fill in the portion of acoef in the four quadrants, row by row. */ for (j = 0; j < np; j++) { *a1++ = -GG; *a2++ = EE; *a3++ = ZERO; *a4++ = ZERO; } /* Reset the diagonal elements of acoef to -AA. */ acoef[i][i] = -AA; acoef[i+np][i+np] = -AA; /* Set coefficients for b and diffusion terms. */ bcoef[i] = BB; bcoef[i+np] = -BB; cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2; coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2; } } /* * SetInitialProfiles: Set initial conditions in cc, cp, and id. * A polynomial profile is used for the prey cc values, and a constant * (1.0e5) is loaded as the initial guess for the predator cc values. * The id values are set to 1 for the prey and 0 for the predators. * The prey cp values are set according to the given system, and * the predator cp values are set to zero. */ static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata) { sunindextype loc, yloc, is, jx, jy, np; realtype xx, yy, xyfactor; realtype *ccv, *cpv, *idv; ccv = NV_DATA_OMP(cc); cpv = NV_DATA_OMP(cp); idv = NV_DATA_OMP(id); np = webdata->np; /* Loop over grid, load cc values and id values. */ for (jy = 0; jy < MY; jy++) { yy = jy * webdata->dy; yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { xx = jx * webdata->dx; xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy); xyfactor *= xyfactor; loc = yloc + NUM_SPECIES*jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) { ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor; idv[loc+is] = ONE; } else { ccv[loc+is] = RCONST(1.0e5); idv[loc+is] = ZERO; } } } } /* Set c' for the prey by calling the function Fweb. */ Fweb(ZERO, cc, cp, webdata); /* Set c' for predators to 0. */ for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = np; is < NUM_SPECIES; is++) { cpv[loc+is] = ZERO; } } } } /* * Print first lines of output (problem description) */ static void PrintHeader(int maxl, realtype rtol, realtype atol) { printf("\nidaFoodWeb_kry_omp: Predator-prey DAE OpenMP example problem using Krylov solver for IDA \n\n"); printf("Number of species ns: %d", NUM_SPECIES); printf(" Mesh dimensions: %d x %d", MX, MY); printf(" System size: %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #else printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #endif printf("Linear solver: SUNLinSol_SPGMR, maxl = %d\n",maxl); printf("CalcIC called to correct initial predator concentrations.\n\n"); printf("-----------------------------------------------------------\n"); printf(" t bottom-left top-right"); printf(" | nst k h\n"); printf("-----------------------------------------------------------\n\n"); } /* * PrintOutput: Print output values at output time t = tt. * Selected run statistics are printed. Then values of the concentrations * are printed for the bottom left and top right grid points only. */ static void PrintOutput(void *ida_mem, N_Vector c, realtype t) { int i, kused, retval; long int nst; realtype *c_bl, *c_tr, hused; retval = IDAGetLastOrder(ida_mem, &kused); check_retval(&retval, "IDAGetLastOrder", 1); retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetLastStep(ida_mem, &hused); check_retval(&retval, "IDAGetLastStep", 1); c_bl = IJ_Vptr(c,0,0); c_tr = IJ_Vptr(c,MX-1,MY-1); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #else printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #endif printf("\n"); } /* * PrintFinalStats: Print final run data contained in iopt. */ static void PrintFinalStats(void *ida_mem) { long int nst, nre, sli, netf, nps, npevals, nrevalsLS; int retval; retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetNumLinIters(ida_mem, &sli); check_retval(&retval, "IDAGetNumLinIters", 1); retval = IDAGetNumResEvals(ida_mem, &nre); check_retval(&retval, "IDAGetNumResEvals", 1); retval = IDAGetNumErrTestFails(ida_mem, &netf); check_retval(&retval, "IDAGetNumErrTestFails", 1); retval = IDAGetNumPrecSolves(ida_mem, &nps); check_retval(&retval, "IDAGetNumPrecSolves", 1); retval = IDAGetNumPrecEvals(ida_mem, &npevals); check_retval(&retval, "IDAGetNumPrecEvals", 1); retval = IDAGetNumLinResEvals(ida_mem, &nrevalsLS); check_retval(&retval, "IDAGetNumLinResEvals", 1); printf("-----------------------------------------------------------\n"); printf("Final run statistics: \n\n"); printf("Number of steps = %ld\n", nst); printf("Number of residual evaluations = %ld\n", nre); printf("Number of Preconditioner evaluations = %ld\n", npevals); printf("Number of linear iterations = %ld\n", sli); printf("Number of error test failures = %ld\n", netf); printf("Number of precond solve fun called = %ld\n", nps); } /* * Fweb: Rate function for the food-web problem. * This routine computes the right-hand sides of the system equations, * consisting of the diffusion term and interaction term. * The interaction term is computed by the function WebRates. */ static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata) { sunindextype jx, jy, is, idyu, idyl, idxu, idxl; realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui; /* Loop over grid points, evaluate interaction vector (length ns), form diffusion difference terms, and load crate. */ jx = jy = is = 0; for (jy = 0; jy < MY; jy++) { yy = (webdata->dy) * jy ; idyu = (jy!=MY-1) ? NSMX : -NSMX; idyl = (jy!= 0 ) ? NSMX : -NSMX; for (jx = 0; jx < MX; jx++) { xx = (webdata->dx) * jx; idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES; idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES; cxy = IJ_Vptr(cc,jx,jy); ratesxy = IJ_Vptr(webdata->rates,jx,jy); cratexy = IJ_Vptr(crate,jx,jy); /* Get interaction vector at this grid point. */ WebRates(xx, yy, cxy, ratesxy, webdata); /* Loop over species, do differencing, load crate segment. */ #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads) for (is = 0; is < NUM_SPECIES; is++) { /* Differencing in y. */ dcyli = *(cxy+is) - *(cxy - idyl + is) ; dcyui = *(cxy + idyu + is) - *(cxy+is); /* Differencing in x. */ dcxli = *(cxy+is) - *(cxy - idxl + is); dcxui = *(cxy + idxu +is) - *(cxy+is); /* Compute the crate values at (xx,yy). */ cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) + ratesxy[is]; } /* End is loop */ } /* End of jx loop */ } /* End of jy loop */ } /* * WebRates: Evaluate reaction rates at a given spatial point. * At a given (x,y), evaluate the array of ns reaction terms R. */ static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata) { int is; realtype fac; for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]); fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy); for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] ); } /* * dotprod: dot product routine for realtype arrays, for use by WebRates. */ static realtype dotprod(sunindextype size, realtype *x1, realtype *x2) { sunindextype i; realtype *xx1, *xx2, temp = ZERO; xx1 = x1; xx2 = x2; for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++); return(temp); } /* * Check function return value... * opt == 0 means SUNDIALS function allocates memory so check if * returned NULL pointer * opt == 1 means SUNDIALS function returns an integer value so check if * retval < 0 * opt == 2 means function allocates memory so check if returned * NULL pointer */ static int check_retval(void *returnvalue, char *funcname, int opt) { int *retval; if (opt == 0 && returnvalue == NULL) { /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } else if (opt == 1) { /* Check if retval < 0 */ retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); } } else if (opt == 2 && returnvalue == NULL) { /* Check if function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
lloyds_par8.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <stdbool.h> #include <omp.h> #include "csvparser.h" void vector_init(double *a, int length) { for (int i = 0; i < length; i++) { a[i] = 0; } } void vector_copy(double *dst, double *src, int length) { for (int i = 0; i < length; i++) { dst[i] = src[i]; } } void vector_add(double *dst, double *a, double *b, int length) { for (int i = 0; i < length; i++) { dst[i] = a[i] + b[i]; } } void vector_elementwise_avg(double *dst, double *a, int denominator, int length) { for (int i = 0; i < length; i++) { dst[i] = a[i] / denominator; } } // Program should take K, a data set (.csv), a delimiter, // a binary flag data_contains_header, and a binary flag to drop labels int main(int argc, char *argv[]){ // Seed for consistent cluster center selection // In a working implementation, seeding would be variable (e.g. time(NULL)) srand(111); CsvParser *reader; CsvRow *row; int i,j; if(argc < 6){ printf("Incorrect number of args. Should be 5, received %d\n", argc - 1); exit(1); } int K = atoi(argv[1]); char *data_fp = argv[2]; char *delimiter = argv[3]; int has_header_row = atoi(argv[4]); int drop_labels = atoi(argv[5]); // Take in data set reader = CsvParser_new(data_fp, delimiter, has_header_row); // Get number of columns row = CsvParser_getRow(reader); int num_cols = CsvParser_getNumFields(row); CsvParser_destroy_row(row); if (drop_labels){ num_cols--; } // Get number of rows like lazy people int num_rows = 1; while ((row = CsvParser_getRow(reader))){ num_rows++; CsvParser_destroy_row(row); } // Torch the CsvParser and start again so we can read data in. CsvParser_destroy(reader); reader = CsvParser_new(data_fp, delimiter, has_header_row); double **data_matrix = malloc(num_rows * sizeof(double *)); for (int i = 0; i < num_rows; i++) { data_matrix[i] = malloc(num_cols * sizeof(double)); } int row_index = 0; while ((row = CsvParser_getRow(reader))){ const char **row_fields = CsvParser_getFields(row); for (int col_index = 0; col_index < num_cols; col_index++) { data_matrix[row_index][col_index] = atof(row_fields[col_index]); } CsvParser_destroy_row(row); row_index++; } CsvParser_destroy(reader); // Initialize some cluster centers from random rows in our data // Given the fact that we will usually have way more rows than centers, we can // probably just roll a number and reroll if we already rolled it. Collisions // should be relatively infrequent bool collided; double centers[K][num_cols]; if (argc == 7) { int center_indices[3] = {12, 67, 106}; for (i = 0; i < K; i ++) { vector_copy(centers[i], data_matrix[center_indices[i]], num_cols); } } else { for (i = 0; i < K; i++) { int center_indices[K]; collided = true; while (collided) { center_indices[i] = rand() % num_rows; collided = false; for (j = 0; j < i; j++) { if (center_indices[j] == center_indices[i]) { collided = true; break; } } vector_copy(centers[i], data_matrix[center_indices[i]], num_cols); } } } printf("Initial cluster centers:\n"); for (int i = 0; i < K; i++) { for (int j = 0; j < num_cols; j++) { printf("%f ", centers[i][j]); } printf("\n"); } printf("\n"); int num_iterations = 0; int *clusterings = calloc(num_rows, sizeof(int)); bool changes; double tstart = omp_get_wtime(); while (1) { // Assign points to cluster centers changes = false; omp_set_num_threads(8); int center, observation, new_center, col; double idx_diff, current_diff, best_diff; #pragma omp parallel for \ private(center, observation, idx_diff, current_diff, best_diff, new_center, col) \ shared(num_rows, K, data_matrix, centers) for (observation = 0; observation < num_rows; observation++) { best_diff = INFINITY; for (center = 0; center < K; center++) { current_diff = 0; for (col = 0; col < num_cols; col++) { idx_diff = data_matrix[observation][col] - centers[center][col]; current_diff += idx_diff * idx_diff; } if (current_diff < best_diff) { best_diff = current_diff; new_center = center; } } if (clusterings[observation] != new_center) { // NOTE: There is an acceptable data race on changes. Threads only ever // set it to true; lost updates are inconsequential. No need to slow // things down for safety. changes = true; clusterings[observation] = new_center; } } // If we didn't change any cluster assignments, we're at convergence if (!changes) { break; } num_iterations++; // Find cluster means and reassign centers int cluster_index, element, elements_in_cluster; double cluster_means[num_cols]; #pragma omp parallel for \ private(cluster_index, element, elements_in_cluster, cluster_means) \ shared(num_rows, clusterings, data_matrix, K) for (cluster_index = 0; cluster_index < K; cluster_index++) { elements_in_cluster = 0; vector_init(cluster_means, num_cols); // Aggregate in-cluster values we can use to take the clusterings mean for (element = 0; element < num_rows; element++) { if (clusterings[element] == cluster_index) { vector_add(cluster_means, cluster_means, data_matrix[element], num_cols); elements_in_cluster++; } } // Finish calculating cluster mean, and overwrite centers with the new value vector_elementwise_avg(cluster_means, cluster_means, elements_in_cluster, num_cols); vector_copy(centers[cluster_index], cluster_means, num_cols); } } double tend = omp_get_wtime(); printf("\nFinal cluster centers:\n"); for (int i = 0; i < K; i++) { for (int j = 0; j < num_cols; j++) { printf("%f ", centers[i][j]); } printf("\n"); } printf("\nNum iterations: %d\n", num_iterations); printf("Time taken for %d clusters: %f seconds\n", K, tend - tstart); for (int i = 0; i < num_rows; i++) { free(data_matrix[i]); } free(data_matrix); free(clusterings); exit(0); }
morphology.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y % % MM MM O O R R P P H H O O L O O G Y Y % % M M M O O RRRR PPPP HHHHH O O L O O G GGG Y % % M M O O R R P H H O O L O O G G Y % % M M OOO R R P H H OOO LLLLL OOO GGG Y % % % % % % MagickCore Morphology Methods % % % % Software Design % % Anthony Thyssen % % January 2010 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Morphology is the application of various kernels, of any size or shape, to an % image in various ways (typically binary, but not always). % % Convolution (weighted sum or average) is just one specific type of % morphology. Just one that is very common for image bluring and sharpening % effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring. % % This module provides not only a general morphology function, and the ability % to apply more advanced or iterative morphologies, but also functions for the % generation of many different types of kernel arrays from user supplied % arguments. Prehaps even the generation of a kernel from a small image. */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/prepress.h" #include "MagickCore/quantize.h" #include "MagickCore/resource_.h" #include "MagickCore/registry.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* Other global definitions used by module. */ #define Minimize(assign,value) assign=MagickMin(assign,value) #define Maximize(assign,value) assign=MagickMax(assign,value) /* Integer Factorial Function - for a Binomial kernel */ #if 1 static inline size_t fact(size_t n) { size_t f,l; for(f=1, l=2; l <= n; f=f*l, l++); return(f); } #elif 1 /* glibc floating point alternatives */ #define fact(n) ((size_t)tgamma((double)n+1)) #else #define fact(n) ((size_t)lgamma((double)n+1)) #endif /* Currently these are only internal to this module */ static void CalcKernelMetaData(KernelInfo *), ExpandMirrorKernelInfo(KernelInfo *), ExpandRotateKernelInfo(KernelInfo *, const double), RotateKernelInfo(KernelInfo *, double); /* Quick function to find last kernel in a kernel list */ static inline KernelInfo *LastKernelInfo(KernelInfo *kernel) { while (kernel->next != (KernelInfo *) NULL) kernel=kernel->next; return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelInfo() takes the given string (generally supplied by the % user) and converts it into a Morphology/Convolution Kernel. This allows % users to specify a kernel from a number of pre-defined kernels, or to fully % specify their own kernel for a specific Convolution or Morphology % Operation. % % The kernel so generated can be any rectangular array of floating point % values (doubles) with the 'control point' or 'pixel being affected' % anywhere within that array of values. % % Previously IM was restricted to a square of odd size using the exact % center as origin, this is no longer the case, and any rectangular kernel % with any value being declared the origin. This in turn allows the use of % highly asymmetrical kernels. % % The floating point values in the kernel can also include a special value % known as 'nan' or 'not a number' to indicate that this value is not part % of the kernel array. This allows you to shaped the kernel within its % rectangular area. That is 'nan' values provide a 'mask' for the kernel % shape. However at least one non-nan value must be provided for correct % working of a kernel. % % The returned kernel should be freed using the DestroyKernelInfo() when you % are finished with it. Do not free this memory yourself. % % Input kernel defintion strings can consist of any of three types. % % "name:args[[@><]" % Select from one of the built in kernels, using the name and % geometry arguments supplied. See AcquireKernelBuiltIn() % % "WxH[+X+Y][@><]:num, num, num ..." % a kernel of size W by H, with W*H floating point numbers following. % the 'center' can be optionally be defined at +X+Y (such that +0+0 % is top left corner). If not defined the pixel in the center, for % odd sizes, or to the immediate top or left of center for even sizes % is automatically selected. % % "num, num, num, num, ..." % list of floating point numbers defining an 'old style' odd sized % square kernel. At least 9 values should be provided for a 3x3 % square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc. % Values can be space or comma separated. This is not recommended. % % You can define a 'list of kernels' which can be used by some morphology % operators A list is defined as a semi-colon separated list kernels. % % " kernel ; kernel ; kernel ; " % % Any extra ';' characters, at start, end or between kernel defintions are % simply ignored. % % The special flags will expand a single kernel, into a list of rotated % kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree % cyclic rotations, while a '>' will generate a list of 90-degree rotations. % The '<' also exands using 90-degree rotates, but giving a 180-degree % reflected kernel before the +/- 90-degree rotations, which can be important % for Thinning operations. % % Note that 'name' kernels will start with an alphabetic character while the % new kernel specification has a ':' character in its specification string. % If neither is the case, it is assumed an old style of a simple list of % numbers generating a odd-sized square kernel has been given. % % The format of the AcquireKernal method is: % % KernelInfo *AcquireKernelInfo(const char *kernel_string) % % A description of each parameter follows: % % o kernel_string: the Morphology/Convolution kernel wanted. % */ /* This was separated so that it could be used as a separate ** array input handling function, such as for -color-matrix */ static KernelInfo *ParseKernelArray(const char *kernel_string) { KernelInfo *kernel; char token[MagickPathExtent]; const char *p, *end; register ssize_t i; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ MagickStatusType flags; GeometryInfo args; kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) ResetMagickMemory(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = UserDefinedKernel; kernel->next = (KernelInfo *) NULL; kernel->signature=MagickCoreSignature; if (kernel_string == (const char *) NULL) return(kernel); /* find end of this specific kernel definition string */ end = strchr(kernel_string, ';'); if ( end == (char *) NULL ) end = strchr(kernel_string, '\0'); /* clear flags - for Expanding kernel lists thorugh rotations */ flags = NoValue; /* Has a ':' in argument - New user kernel specification FUTURE: this split on ':' could be done by StringToken() */ p = strchr(kernel_string, ':'); if ( p != (char *) NULL && p < end) { /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, kernel_string, (size_t) (p-kernel_string)); token[p-kernel_string] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); /* Size handling and checks of geometry settings */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 1.0; /* then width = 1 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ kernel->width = (size_t)args.rho; kernel->height = (size_t)args.sigma; /* Offset Handling and Checks */ if ( args.xi < 0.0 || args.psi < 0.0 ) return(DestroyKernelInfo(kernel)); kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi : (ssize_t) (kernel->width-1)/2; kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi : (ssize_t) (kernel->height-1)/2; if ( kernel->x >= (ssize_t) kernel->width || kernel->y >= (ssize_t) kernel->height ) return(DestroyKernelInfo(kernel)); p++; /* advance beyond the ':' */ } else { /* ELSE - Old old specification, forming odd-square kernel */ /* count up number of values given */ p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ for (i=0; p < end; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); } /* set the size of the kernel - old sized square */ kernel->width = kernel->height= (size_t) sqrt((double) i+1.0); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ } /* Read in the kernel values from rest of input string argument */ kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); kernel->minimum=MagickMaximumValue; kernel->maximum=(-MagickMaximumValue); kernel->negative_range = kernel->positive_range = 0.0; for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); if ( LocaleCompare("nan",token) == 0 || LocaleCompare("-",token) == 0 ) { kernel->values[i] = nan; /* this value is not part of neighbourhood */ } else { kernel->values[i] = StringToDouble(token,(char **) NULL); ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } } /* sanity check -- no more values in kernel definition */ GetNextToken(p,&p,MagickPathExtent,token); if ( *token != '\0' && *token != ';' && *token != '\'' ) return(DestroyKernelInfo(kernel)); #if 0 /* this was the old method of handling a incomplete kernel */ if ( i < (ssize_t) (kernel->width*kernel->height) ) { Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); for ( ; i < (ssize_t) (kernel->width*kernel->height); i++) kernel->values[i]=0.0; } #else /* Number of values for kernel was not enough - Report Error */ if ( i < (ssize_t) (kernel->width*kernel->height) ) return(DestroyKernelInfo(kernel)); #endif /* check that we recieved at least one real (non-nan) value! */ if (kernel->minimum == MagickMaximumValue) return(DestroyKernelInfo(kernel)); if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */ ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */ else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */ else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */ return(kernel); } static KernelInfo *ParseKernelName(const char *kernel_string, ExceptionInfo *exception) { char token[MagickPathExtent]; const char *p, *end; GeometryInfo args; KernelInfo *kernel; MagickStatusType flags; ssize_t type; /* Parse special 'named' kernel */ GetNextToken(kernel_string,&p,MagickPathExtent,token); type=ParseCommandOption(MagickKernelOptions,MagickFalse,token); if ( type < 0 || type == UserDefinedKernel ) return((KernelInfo *) NULL); /* not a valid named kernel */ while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';')) p++; end = strchr(p, ';'); /* end of this kernel defintion */ if ( end == (char *) NULL ) end = strchr(p, '\0'); /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, p, (size_t) (end-p)); token[end-p] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif /* special handling of missing values in input string */ switch( type ) { /* Shape Kernel Defaults */ case UnityKernel: if ( (flags & WidthValue) == 0 ) args.rho = 1.0; /* Default scale = 1.0, zero is valid */ break; case SquareKernel: case DiamondKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: if ( (flags & HeightValue) == 0 ) args.sigma = 1.0; /* Default scale = 1.0, zero is valid */ break; case RingKernel: if ( (flags & XValue) == 0 ) args.xi = 1.0; /* Default scale = 1.0, zero is valid */ break; case RectangleKernel: /* Rectangle - set size defaults */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 3; /* then width = 3 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ if ( (flags & XValue) == 0 ) /* center offset if not defined */ args.xi = (double)(((ssize_t)args.rho-1)/2); if ( (flags & YValue) == 0 ) args.psi = (double)(((ssize_t)args.sigma-1)/2); break; /* Distance Kernel Defaults */ case ChebyshevKernel: case ManhattanKernel: case OctagonalKernel: case EuclideanKernel: if ( (flags & HeightValue) == 0 ) /* no distance scale */ args.sigma = 100.0; /* default distance scaling */ else if ( (flags & AspectValue ) != 0 ) /* '!' flag */ args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */ else if ( (flags & PercentValue ) != 0 ) /* '%' flag */ args.sigma *= QuantumRange/100.0; /* percentage of color range */ break; default: break; } kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception); if ( kernel == (KernelInfo *) NULL ) return(kernel); /* global expand to rotated kernel list - only for single kernels */ if ( kernel->next == (KernelInfo *) NULL ) { if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 45.0); else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); } return(kernel); } MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string, ExceptionInfo *exception) { KernelInfo *kernel, *new_kernel; char *kernel_cache, token[MagickPathExtent]; const char *p; if (kernel_string == (const char *) NULL) return(ParseKernelArray(kernel_string)); p=kernel_string; kernel_cache=(char *) NULL; if (*kernel_string == '@') { kernel_cache=FileToString(kernel_string+1,~0UL,exception); if (kernel_cache == (char *) NULL) return((KernelInfo *) NULL); p=(const char *) kernel_cache; } kernel=NULL; while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0') { /* ignore extra or multiple ';' kernel separators */ if (*token != ';') { /* tokens starting with alpha is a Named kernel */ if (isalpha((int) ((unsigned char) *token)) != 0) new_kernel=ParseKernelName(p,exception); else /* otherwise a user defined kernel array */ new_kernel=ParseKernelArray(p); /* Error handling -- this is not proper error handling! */ if (new_kernel == (KernelInfo *) NULL) { if (kernel != (KernelInfo *) NULL) kernel=DestroyKernelInfo(kernel); return((KernelInfo *) NULL); } /* initialise or append the kernel list */ if (kernel == (KernelInfo *) NULL) kernel=new_kernel; else LastKernelInfo(kernel)->next=new_kernel; } /* look for the next kernel in list */ p=strchr(p,';'); if (p == (char *) NULL) break; p++; } if (kernel_cache != (char *) NULL) kernel_cache=DestroyString(kernel_cache); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l B u i l t I n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelBuiltIn() returned one of the 'named' built-in types of % kernels used for special purposes such as gaussian blurring, skeleton % pruning, and edge distance determination. % % They take a KernelType, and a set of geometry style arguments, which were % typically decoded from a user supplied string, or from a more complex % Morphology Method that was requested. % % The format of the AcquireKernalBuiltIn method is: % % KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, % const GeometryInfo args) % % A description of each parameter follows: % % o type: the pre-defined type of kernel wanted % % o args: arguments defining or modifying the kernel % % Convolution Kernels % % Unity % The a No-Op or Scaling single element kernel. % % Gaussian:{radius},{sigma} % Generate a two-dimensional gaussian kernel, as used by -gaussian. % The sigma for the curve is required. The resulting kernel is % normalized, % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % NOTE: that the 'radius' is optional, but if provided can limit (clip) % the final size of the resulting kernel to a square 2*radius+1 in size. % The radius should be at least 2 times that of the sigma value, or % sever clipping and aliasing may result. If not given or set to 0 the % radius will be determined so as to produce the best minimal error % result, which is usally much larger than is normally needed. % % LoG:{radius},{sigma} % "Laplacian of a Gaussian" or "Mexician Hat" Kernel. % The supposed ideal edge detection, zero-summing kernel. % % An alturnative to this kernel is to use a "DoG" with a sigma ratio of % approx 1.6 (according to wikipedia). % % DoG:{radius},{sigma1},{sigma2} % "Difference of Gaussians" Kernel. % As "Gaussian" but with a gaussian produced by 'sigma2' subtracted % from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1. % The result is a zero-summing kernel. % % Blur:{radius},{sigma}[,{angle}] % Generates a 1 dimensional or linear gaussian blur, at the angle given % (current restricted to orthogonal angles). If a 'radius' is given the % kernel is clipped to a width of 2*radius+1. Kernel can be rotated % by a 90 degree angle. % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % Note that two convolutions with two "Blur" kernels perpendicular to % each other, is equivalent to a far larger "Gaussian" kernel with the % same sigma value, However it is much faster to apply. This is how the % "-blur" operator actually works. % % Comet:{width},{sigma},{angle} % Blur in one direction only, much like how a bright object leaves % a comet like trail. The Kernel is actually half a gaussian curve, % Adding two such blurs in opposite directions produces a Blur Kernel. % Angle can be rotated in multiples of 90 degrees. % % Note that the first argument is the width of the kernel and not the % radius of the kernel. % % Binomial:[{radius}] % Generate a discrete kernel using a 2 dimentional Pascel's Triangle % of values. Used for special forma of image filters. % % # Still to be implemented... % # % # Filter2D % # Filter1D % # Set kernel values using a resize filter, and given scale (sigma) % # Cylindrical or Linear. Is this possible with an image? % # % % Named Constant Convolution Kernels % % All these are unscaled, zero-summing kernels by default. As such for % non-HDRI version of ImageMagick some form of normalization, user scaling, % and biasing the results is recommended, to prevent the resulting image % being 'clipped'. % % The 3x3 kernels (most of these) can be circularly rotated in multiples of % 45 degrees to generate the 8 angled varients of each of the kernels. % % Laplacian:{type} % Discrete Lapacian Kernels, (without normalization) % Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood) % Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood) % Type 2 : 3x3 with center:4 edge:1 corner:-2 % Type 3 : 3x3 with center:4 edge:-2 corner:1 % Type 5 : 5x5 laplacian % Type 7 : 7x7 laplacian % Type 15 : 5x5 LoG (sigma approx 1.4) % Type 19 : 9x9 LoG (sigma approx 1.4) % % Sobel:{angle} % Sobel 'Edge' convolution kernel (3x3) % | -1, 0, 1 | % | -2, 0,-2 | % | -1, 0, 1 | % % Roberts:{angle} % Roberts convolution kernel (3x3) % | 0, 0, 0 | % | -1, 1, 0 | % | 0, 0, 0 | % % Prewitt:{angle} % Prewitt Edge convolution kernel (3x3) % | -1, 0, 1 | % | -1, 0, 1 | % | -1, 0, 1 | % % Compass:{angle} % Prewitt's "Compass" convolution kernel (3x3) % | -1, 1, 1 | % | -1,-2, 1 | % | -1, 1, 1 | % % Kirsch:{angle} % Kirsch's "Compass" convolution kernel (3x3) % | -3,-3, 5 | % | -3, 0, 5 | % | -3,-3, 5 | % % FreiChen:{angle} % Frei-Chen Edge Detector is based on a kernel that is similar to % the Sobel Kernel, but is designed to be isotropic. That is it takes % into account the distance of the diagonal in the kernel. % % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | % | 1, 0, -1 | % % FreiChen:{type},{angle} % % Frei-Chen Pre-weighted kernels... % % Type 0: default un-nomalized version shown above. % % Type 1: Orthogonal Kernel (same as type 11 below) % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 2: Diagonal form of Kernel... % | 1, sqrt(2), 0 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 0, -sqrt(2) -1 | % % However this kernel is als at the heart of the FreiChen Edge Detection % Process which uses a set of 9 specially weighted kernel. These 9 % kernels not be normalized, but directly applied to the image. The % results is then added together, to produce the intensity of an edge in % a specific direction. The square root of the pixel value can then be % taken as the cosine of the edge, and at least 2 such runs at 90 degrees % from each other, both the direction and the strength of the edge can be % determined. % % Type 10: All 9 of the following pre-weighted kernels... % % Type 11: | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 12: | 1, sqrt(2), 1 | % | 0, 0, 0 | / 2*sqrt(2) % | 1, sqrt(2), 1 | % % Type 13: | sqrt(2), -1, 0 | % | -1, 0, 1 | / 2*sqrt(2) % | 0, 1, -sqrt(2) | % % Type 14: | 0, 1, -sqrt(2) | % | -1, 0, 1 | / 2*sqrt(2) % | sqrt(2), -1, 0 | % % Type 15: | 0, -1, 0 | % | 1, 0, 1 | / 2 % | 0, -1, 0 | % % Type 16: | 1, 0, -1 | % | 0, 0, 0 | / 2 % | -1, 0, 1 | % % Type 17: | 1, -2, 1 | % | -2, 4, -2 | / 6 % | -1, -2, 1 | % % Type 18: | -2, 1, -2 | % | 1, 4, 1 | / 6 % | -2, 1, -2 | % % Type 19: | 1, 1, 1 | % | 1, 1, 1 | / 3 % | 1, 1, 1 | % % The first 4 are for edge detection, the next 4 are for line detection % and the last is to add a average component to the results. % % Using a special type of '-1' will return all 9 pre-weighted kernels % as a multi-kernel list, so that you can use them directly (without % normalization) with the special "-set option:morphology:compose Plus" % setting to apply the full FreiChen Edge Detection Technique. % % If 'type' is large it will be taken to be an actual rotation angle for % the default FreiChen (type 0) kernel. As such FreiChen:45 will look % like a Sobel:45 but with 'sqrt(2)' instead of '2' values. % % WARNING: The above was layed out as per % http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf % But rotated 90 degrees so direction is from left rather than the top. % I have yet to find any secondary confirmation of the above. The only % other source found was actual source code at % http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf % Neigher paper defineds the kernels in a way that looks locical or % correct when taken as a whole. % % Boolean Kernels % % Diamond:[{radius}[,{scale}]] % Generate a diamond shaped kernel with given radius to the points. % Kernel size will again be radius*2+1 square and defaults to radius 1, % generating a 3x3 kernel that is slightly larger than a square. % % Square:[{radius}[,{scale}]] % Generate a square shaped kernel of size radius*2+1, and defaulting % to a 3x3 (radius 1). % % Octagon:[{radius}[,{scale}]] % Generate octagonal shaped kernel of given radius and constant scale. % Default radius is 3 producing a 7x7 kernel. A radius of 1 will result % in "Diamond" kernel. % % Disk:[{radius}[,{scale}]] % Generate a binary disk, thresholded at the radius given, the radius % may be a float-point value. Final Kernel size is floor(radius)*2+1 % square. A radius of 5.3 is the default. % % NOTE: That a low radii Disk kernels produce the same results as % many of the previously defined kernels, but differ greatly at larger % radii. Here is a table of equivalences... % "Disk:1" => "Diamond", "Octagon:1", or "Cross:1" % "Disk:1.5" => "Square" % "Disk:2" => "Diamond:2" % "Disk:2.5" => "Octagon" % "Disk:2.9" => "Square:2" % "Disk:3.5" => "Octagon:3" % "Disk:4.5" => "Octagon:4" % "Disk:5.4" => "Octagon:5" % "Disk:6.4" => "Octagon:6" % All other Disk shapes are unique to this kernel, but because a "Disk" % is more circular when using a larger radius, using a larger radius is % preferred over iterating the morphological operation. % % Rectangle:{geometry} % Simply generate a rectangle of 1's with the size given. You can also % specify the location of the 'control point', otherwise the closest % pixel to the center of the rectangle is selected. % % Properly centered and odd sized rectangles work the best. % % Symbol Dilation Kernels % % These kernel is not a good general morphological kernel, but is used % more for highlighting and marking any single pixels in an image using, % a "Dilate" method as appropriate. % % For the same reasons iterating these kernels does not produce the % same result as using a larger radius for the symbol. % % Plus:[{radius}[,{scale}]] % Cross:[{radius}[,{scale}]] % Generate a kernel in the shape of a 'plus' or a 'cross' with % a each arm the length of the given radius (default 2). % % NOTE: "plus:1" is equivalent to a "Diamond" kernel. % % Ring:{radius1},{radius2}[,{scale}] % A ring of the values given that falls between the two radii. % Defaults to a ring of approximataly 3 radius in a 7x7 kernel. % This is the 'edge' pixels of the default "Disk" kernel, % More specifically, "Ring" -> "Ring:2.5,3.5,1.0" % % Hit and Miss Kernels % % Peak:radius1,radius2 % Find any peak larger than the pixels the fall between the two radii. % The default ring of pixels is as per "Ring". % Edges % Find flat orthogonal edges of a binary shape % Corners % Find 90 degree corners of a binary shape % Diagonals:type % A special kernel to thin the 'outside' of diagonals % LineEnds:type % Find end points of lines (for pruning a skeletion) % Two types of lines ends (default to both) can be searched for % Type 0: All line ends % Type 1: single kernel for 4-conneected line ends % Type 2: single kernel for simple line ends % LineJunctions % Find three line junctions (within a skeletion) % Type 0: all line junctions % Type 1: Y Junction kernel % Type 2: Diagonal T Junction kernel % Type 3: Orthogonal T Junction kernel % Type 4: Diagonal X Junction kernel % Type 5: Orthogonal + Junction kernel % Ridges:type % Find single pixel ridges or thin lines % Type 1: Fine single pixel thick lines and ridges % Type 2: Find two pixel thick lines and ridges % ConvexHull % Octagonal Thickening Kernel, to generate convex hulls of 45 degrees % Skeleton:type % Traditional skeleton generating kernels. % Type 1: Tradional Skeleton kernel (4 connected skeleton) % Type 2: HIPR2 Skeleton kernel (8 connected skeleton) % Type 3: Thinning skeleton based on a ressearch paper by % Dan S. Bloomberg (Default Type) % ThinSE:type % A huge variety of Thinning Kernels designed to preserve conectivity. % many other kernel sets use these kernels as source definitions. % Type numbers are 41-49, 81-89, 481, and 482 which are based on % the super and sub notations used in the source research paper. % % Distance Measuring Kernels % % Different types of distance measuring methods, which are used with the % a 'Distance' morphology method for generating a gradient based on % distance from an edge of a binary shape, though there is a technique % for handling a anti-aliased shape. % % See the 'Distance' Morphological Method, for information of how it is % applied. % % Chebyshev:[{radius}][x{scale}[%!]] % Chebyshev Distance (also known as Tchebychev or Chessboard distance) % is a value of one to any neighbour, orthogonal or diagonal. One why % of thinking of it is the number of squares a 'King' or 'Queen' in % chess needs to traverse reach any other position on a chess board. % It results in a 'square' like distance function, but one where % diagonals are given a value that is closer than expected. % % Manhattan:[{radius}][x{scale}[%!]] % Manhattan Distance (also known as Rectilinear, City Block, or the Taxi % Cab distance metric), it is the distance needed when you can only % travel in horizontal or vertical directions only. It is the % distance a 'Rook' in chess would have to travel, and results in a % diamond like distances, where diagonals are further than expected. % % Octagonal:[{radius}][x{scale}[%!]] % An interleving of Manhatten and Chebyshev metrics producing an % increasing octagonally shaped distance. Distances matches those of % the "Octagon" shaped kernel of the same radius. The minimum radius % and default is 2, producing a 5x5 kernel. % % Euclidean:[{radius}][x{scale}[%!]] % Euclidean distance is the 'direct' or 'as the crow flys' distance. % However by default the kernel size only has a radius of 1, which % limits the distance to 'Knight' like moves, with only orthogonal and % diagonal measurements being correct. As such for the default kernel % you will get octagonal like distance function. % % However using a larger radius such as "Euclidean:4" you will get a % much smoother distance gradient from the edge of the shape. Especially % if the image is pre-processed to include any anti-aliasing pixels. % Of course a larger kernel is slower to use, and not always needed. % % The first three Distance Measuring Kernels will only generate distances % of exact multiples of {scale} in binary images. As such you can use a % scale of 1 without loosing any information. However you also need some % scaling when handling non-binary anti-aliased shapes. % % The "Euclidean" Distance Kernel however does generate a non-integer % fractional results, and as such scaling is vital even for binary shapes. % */ MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, const GeometryInfo *args,ExceptionInfo *exception) { KernelInfo *kernel; register ssize_t i; register ssize_t u, v; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ /* Generate a new empty kernel if needed */ kernel=(KernelInfo *) NULL; switch(type) { case UndefinedKernel: /* These should not call this function */ case UserDefinedKernel: assert("Should not call this function" != (char *) NULL); break; case LaplacianKernel: /* Named Descrete Convolution Kernels */ case SobelKernel: /* these are defined using other kernels */ case RobertsKernel: case PrewittKernel: case CompassKernel: case KirschKernel: case FreiChenKernel: case EdgesKernel: /* Hit and Miss kernels */ case CornersKernel: case DiagonalsKernel: case LineEndsKernel: case LineJunctionsKernel: case RidgesKernel: case ConvexHullKernel: case SkeletonKernel: case ThinSEKernel: break; /* A pre-generated kernel is not needed */ #if 0 /* set to 1 to do a compile-time check that we haven't missed anything */ case UnityKernel: case GaussianKernel: case DoGKernel: case LoGKernel: case BlurKernel: case CometKernel: case BinomialKernel: case DiamondKernel: case SquareKernel: case RectangleKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: case RingKernel: case PeaksKernel: case ChebyshevKernel: case ManhattanKernel: case OctangonalKernel: case EuclideanKernel: #else default: #endif /* Generate the base Kernel Structure */ kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) ResetMagickMemory(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = type; kernel->next = (KernelInfo *) NULL; kernel->signature=MagickCoreSignature; break; } switch(type) { /* Convolution Kernels */ case UnityKernel: { kernel->height = kernel->width = (size_t) 1; kernel->x = kernel->y = (ssize_t) 0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(1,sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); kernel->maximum = kernel->values[0] = args->rho; break; } break; case GaussianKernel: case DoGKernel: case LoGKernel: { double sigma = fabs(args->sigma), sigma2 = fabs(args->xi), A, B, R; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else if ( (type != DoGKernel) || (sigma >= sigma2) ) kernel->width = GetOptimalKernelWidth2D(args->rho,sigma); else kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2); kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* WARNING: The following generates a 'sampled gaussian' kernel. * What we really want is a 'discrete gaussian' kernel. * * How to do this is I don't know, but appears to be basied on the * Error Function 'erf()' (intergral of a gaussian) */ if ( type == GaussianKernel || type == DoGKernel ) { /* Calculate a Gaussian, OR positive half of a DoG */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } if ( type == DoGKernel ) { /* Subtract a Negative Gaussian for "Difference of Gaussian" */ if ( sigma2 > MagickEpsilon ) { sigma = sigma2; /* simplify loop expressions */ A = 1.0/(2.0*sigma*sigma); B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0; } if ( type == LoGKernel ) { /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { R = ((double)(u*u+v*v))*A; kernel->values[i] = (1-R)*exp(-R)*B; } } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } /* Note the above kernels may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, and thus ** producing a very bright kernel. ** ** Normalization will still be needed. */ /* Normalize the 2D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); break; } case BlurKernel: { double sigma = fabs(args->sigma), alpha, beta; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else kernel->width = GetOptimalKernelWidth1D(args->rho,sigma); kernel->height = 1; kernel->x = (ssize_t) (kernel->width-1)/2; kernel->y = 0; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); #if 1 #define KernelRank 3 /* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix). ** It generates a gaussian 3 times the width, and compresses it into ** the expected range. This produces a closer normalization of the ** resulting kernel, especially for very low sigma values. ** As such while wierd it is prefered. ** ** I am told this method originally came from Photoshop. ** ** A properly normalized curve is generated (apart from edge clipping) ** even though we later normalize the result (for edge clipping) ** to allow the correct generation of a "Difference of Blurs". */ /* initialize */ v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */ (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); /* Calculate a Positive 1D Gaussian */ if ( sigma > MagickEpsilon ) { sigma *= KernelRank; /* simplify loop expressions */ alpha = 1.0/(2.0*sigma*sigma); beta= (double) (1.0/(MagickSQ2PI*sigma )); for ( u=-v; u <= v; u++) { kernel->values[(u+v)/KernelRank] += exp(-((double)(u*u))*alpha)*beta; } } else /* special case - generate a unity kernel */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; #else /* Direct calculation without curve averaging This is equivelent to a KernelRank of 1 */ /* Calculate a Positive Gaussian */ if ( sigma > MagickEpsilon ) { alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ beta = 1.0/(MagickSQ2PI*sigma); for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u))*alpha)*beta; } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } #endif /* Note the above kernel may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, as a ** result of not generating a actual 'discrete' kernel, and thus ** producing a very bright 'impulse'. ** ** Becuase of these two factors Normalization is required! */ /* Normalize the 1D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); /* rotate the 1D kernel by given angle */ RotateKernelInfo(kernel, args->xi ); break; } case CometKernel: { double sigma = fabs(args->sigma), A; if ( args->rho < 1.0 ) kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1; else kernel->width = (size_t)args->rho; kernel->x = kernel->y = 0; kernel->height = 1; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* A comet blur is half a 1D gaussian curve, so that the object is ** blurred in one direction only. This may not be quite the right ** curve to use so may change in the future. The function must be ** normalised after generation, which also resolves any clipping. ** ** As we are normalizing and not subtracting gaussians, ** there is no need for a divisor in the gaussian formula ** ** It is less comples */ if ( sigma > MagickEpsilon ) { #if 1 #define KernelRank 3 v = (ssize_t) kernel->width*KernelRank; /* start/end points */ (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*sizeof(*kernel->values)); sigma *= KernelRank; /* simplify the loop expression */ A = 1.0/(2.0*sigma*sigma); /* B = 1.0/(MagickSQ2PI*sigma); */ for ( u=0; u < v; u++) { kernel->values[u/KernelRank] += exp(-((double)(u*u))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ } for (i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i]; #else A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */ /* B = 1.0/(MagickSQ2PI*sigma); */ for ( i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i] = exp(-((double)(i*i))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ #endif } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; } kernel->minimum = 0.0; kernel->maximum = kernel->values[0]; kernel->negative_range = 0.0; ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */ RotateKernelInfo(kernel, args->xi); /* Rotate by angle */ break; } case BinomialKernel: { size_t order_f; if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; order_f = fact(kernel->width-1); kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=0; v < (ssize_t)kernel->height; v++) { size_t alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) ); for ( u=0; u < (ssize_t)kernel->width; u++, i++) kernel->positive_range += kernel->values[i] = (double) (alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) )); } kernel->minimum = 1.0; kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width]; kernel->negative_range = 0.0; break; } /* Convolution Kernels - Well Known Named Constant Kernels */ case LaplacianKernel: { switch ( (int) args->rho ) { case 0: default: /* laplacian square filter -- default */ kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1"); break; case 1: /* laplacian diamond filter */ kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0"); break; case 2: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); break; case 3: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1"); break; case 5: /* a 5x5 laplacian */ kernel=ParseKernelArray( "5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4"); break; case 7: /* a 7x7 laplacian */ kernel=ParseKernelArray( "7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" ); break; case 15: /* a 5x5 LoG (sigma approx 1.4) */ kernel=ParseKernelArray( "5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0"); break; case 19: /* a 9x9 LoG (sigma approx 1.4) */ /* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */ kernel=ParseKernelArray( "9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; break; } case SobelKernel: { /* Simple Sobel Kernel */ kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case RobertsKernel: { kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case PrewittKernel: { kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case CompassKernel: { kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case KirschKernel: { kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case FreiChenKernel: /* Direction is set to be left to right positive */ /* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */ /* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */ { switch ( (int) args->rho ) { default: case 0: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +(MagickRealType) MagickSQ2; kernel->values[5] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ break; case 2: kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2; kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 10: { kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception); if (kernel == (KernelInfo *) NULL) return(kernel); break; } case 1: case 11: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +(MagickRealType) MagickSQ2; kernel->values[5] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 12: kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = +(MagickRealType) MagickSQ2; kernel->values[7] = +(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 13: kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[0] = +(MagickRealType) MagickSQ2; kernel->values[8] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 14: kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[2] = -(MagickRealType) MagickSQ2; kernel->values[6] = +(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 15: kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 16: kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 17: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 18: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 19: kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/3.0, NoValue); break; } if ( fabs(args->sigma) >= MagickEpsilon ) /* Rotate by correctly supplied 'angle' */ RotateKernelInfo(kernel, args->sigma); else if ( args->rho > 30.0 || args->rho < -30.0 ) /* Rotate by out of bounds 'type' */ RotateKernelInfo(kernel, args->rho); break; } /* Boolean or Shaped Kernels */ case DiamondKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case SquareKernel: case RectangleKernel: { double scale; if ( type == SquareKernel ) { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = (size_t) (2*args->rho+1); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; scale = args->sigma; } else { /* NOTE: user defaults set in "AcquireKernelInfo()" */ if ( args->rho < 1.0 || args->sigma < 1.0 ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->width = (size_t)args->rho; kernel->height = (size_t)args->sigma; if ( args->xi < 0.0 || args->xi > (double)kernel->width || args->psi < 0.0 || args->psi > (double)kernel->height ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->x = (ssize_t) args->xi; kernel->y = (ssize_t) args->psi; scale = 1.0; } kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values to scale given */ u=(ssize_t) (kernel->width*kernel->height); for ( i=0; i < u; i++) kernel->values[i] = scale; kernel->minimum = kernel->maximum = scale; /* a flat shape */ kernel->positive_range = scale*u; break; } case OctagonKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= ((long)kernel->x + (long)(kernel->x/2)) ) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case DiskKernel: { ssize_t limit = (ssize_t)(args->rho*args->rho); if (args->rho < 0.4) /* default radius approx 4.3 */ kernel->width = kernel->height = 9L, limit = 18L; else kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ((u*u+v*v) <= limit) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case PlusKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } case CrossKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == v || u == -v) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } /* HitAndMiss Kernels */ case RingKernel: case PeaksKernel: { ssize_t limit1, limit2, scale; if (args->rho < args->sigma) { kernel->width = ((size_t)args->sigma)*2+1; limit1 = (ssize_t)(args->rho*args->rho); limit2 = (ssize_t)(args->sigma*args->sigma); } else { kernel->width = ((size_t)args->rho)*2+1; limit1 = (ssize_t)(args->sigma*args->sigma); limit2 = (ssize_t)(args->rho*args->rho); } if ( limit2 <= 0 ) kernel->width = 7L, limit1 = 7L, limit2 = 11L; kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */ scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi); for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { ssize_t radius=u*u+v*v; if (limit1 < radius && radius <= limit2) kernel->positive_range += kernel->values[i] = (double) scale; else kernel->values[i] = nan; } kernel->minimum = kernel->maximum = (double) scale; if ( type == PeaksKernel ) { /* set the central point in the middle */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; kernel->maximum = 1.0; } break; } case EdgesKernel: { kernel=AcquireKernelInfo("ThinSE:482",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */ break; } case CornersKernel: { kernel=AcquireKernelInfo("ThinSE:87",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */ break; } case DiagonalsKernel: { switch ( (int) args->rho ) { case 0: default: { KernelInfo *new_kernel; kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; ExpandMirrorKernelInfo(kernel); return(kernel); } case 1: kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); break; case 2: kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineEndsKernel: { /* Kernels for finding the end of thin lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all end of lines */ return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception)); case 1: /* kernel for 4-connected line ends - no rotation */ kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-"); break; case 2: /* kernel to add for 8-connected lines - no rotation */ kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1"); break; case 3: /* kernel to add for orthogonal line ends - does not find corners */ kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0"); break; case 4: /* traditional line end - fails on last T end */ kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineJunctionsKernel: { /* kernels for finding the junctions of multiple lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all line junctions */ return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception)); case 1: /* Y Junction */ kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-"); break; case 2: /* Diagonal T Junctions */ kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1"); break; case 3: /* Orthogonal T Junctions */ kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-"); break; case 4: /* Diagonal X Junctions */ kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1"); break; case 5: /* Orthogonal X Junctions - minimal diamond kernel */ kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case RidgesKernel: { /* Ridges - Ridge finding kernels */ KernelInfo *new_kernel; switch ( (int) args->rho ) { case 1: default: kernel=ParseKernelArray("3x1:0,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */ break; case 2: kernel=ParseKernelArray("4x1:0,1,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */ /* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */ /* Unfortunatally we can not yet rotate a non-square kernel */ /* But then we can't flip a non-symetrical kernel either */ new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; break; } break; } case ConvexHullKernel: { KernelInfo *new_kernel; /* first set of 8 kernels */ kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* append the mirror versions too - no flip function yet */ new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; ExpandRotateKernelInfo(new_kernel, 90.0); LastKernelInfo(kernel)->next = new_kernel; break; } case SkeletonKernel: { switch ( (int) args->rho ) { case 1: default: /* Traditional Skeleton... ** A cyclically rotated single kernel */ kernel=AcquireKernelInfo("ThinSE:482",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */ break; case 2: /* HIPR Variation of the cyclic skeleton ** Corners of the traditional method made more forgiving, ** but the retain the same cyclic order. */ kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception); if (kernel == (KernelInfo *) NULL) return(kernel); if (kernel->next == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); kernel->type = type; kernel->next->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */ break; case 3: /* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf */ kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43", exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->next->type = type; kernel->next->next->type = type; ExpandMirrorKernelInfo(kernel); /* 12 kernels total */ break; } break; } case ThinSEKernel: { /* Special kernels for general thinning, while preserving connections ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf ** And ** http://tpgit.github.com/Leptonica/ccthin_8c_source.html ** ** Note kernels do not specify the origin pixel, allowing them ** to be used for both thickening and thinning operations. */ switch ( (int) args->rho ) { /* SE for 4-connected thinning */ case 41: /* SE_4_1 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1"); break; case 42: /* SE_4_2 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-"); break; case 43: /* SE_4_3 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1"); break; case 44: /* SE_4_4 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-"); break; case 45: /* SE_4_5 */ kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-"); break; case 46: /* SE_4_6 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1"); break; case 47: /* SE_4_7 */ kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-"); break; case 48: /* SE_4_8 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1"); break; case 49: /* SE_4_9 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1"); break; /* SE for 8-connected thinning - negatives of the above */ case 81: /* SE_8_0 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-"); break; case 82: /* SE_8_2 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-"); break; case 83: /* SE_8_3 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-"); break; case 84: /* SE_8_4 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-"); break; case 85: /* SE_8_5 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-"); break; case 86: /* SE_8_6 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1"); break; case 87: /* SE_8_7 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-"); break; case 88: /* SE_8_8 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-"); break; case 89: /* SE_8_9 */ kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-"); break; /* Special combined SE kernels */ case 423: /* SE_4_2 , SE_4_3 Combined Kernel */ kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-"); break; case 823: /* SE_8_2 , SE_8_3 Combined Kernel */ kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-"); break; case 481: /* SE_48_1 - General Connected Corner Kernel */ kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-"); break; default: case 482: /* SE_48_2 - General Edge Kernel */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } /* Distance Measuring Kernels */ case ChebyshevKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*MagickMax(fabs((double)u),fabs((double)v)) ); kernel->maximum = kernel->values[0]; break; } case ManhattanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*(labs((long) u)+labs((long) v)) ); kernel->maximum = kernel->values[0]; break; } case OctagonalKernel: { if (args->rho < 2.0) kernel->width = kernel->height = 5; /* default/minimum radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { double r1 = MagickMax(fabs((double)u),fabs((double)v)), r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5); kernel->positive_range += kernel->values[i] = args->sigma*MagickMax(r1,r2); } kernel->maximum = kernel->values[0]; break; } case EuclideanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*sqrt((double)(u*u+v*v)) ); kernel->maximum = kernel->values[0]; break; } default: { /* No-Op Kernel - Basically just a single pixel on its own */ kernel=ParseKernelArray("1:1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = UndefinedKernel; break; } break; } return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneKernelInfo() creates a new clone of the given Kernel List so that its % can be modified without effecting the original. The cloned kernel should % be destroyed using DestoryKernelInfo() when no longer needed. % % The format of the CloneKernelInfo method is: % % KernelInfo *CloneKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be cloned % */ MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel) { register ssize_t i; KernelInfo *new_kernel; assert(kernel != (KernelInfo *) NULL); new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (new_kernel == (KernelInfo *) NULL) return(new_kernel); *new_kernel=(*kernel); /* copy values in structure */ /* replace the values with a copy of the values */ new_kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values))); if (new_kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(new_kernel)); for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) new_kernel->values[i]=kernel->values[i]; /* Also clone the next kernel in the kernel list */ if ( kernel->next != (KernelInfo *) NULL ) { new_kernel->next = CloneKernelInfo(kernel->next); if ( new_kernel->next == (KernelInfo *) NULL ) return(DestroyKernelInfo(new_kernel)); } return(new_kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyKernelInfo() frees the memory used by a Convolution/Morphology % kernel. % % The format of the DestroyKernelInfo method is: % % KernelInfo *DestroyKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be destroyed % */ MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel) { assert(kernel != (KernelInfo *) NULL); if (kernel->next != (KernelInfo *) NULL) kernel->next=DestroyKernelInfo(kernel->next); kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values); kernel=(KernelInfo *) RelinquishMagickMemory(kernel); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d M i r r o r K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandMirrorKernelInfo() takes a single kernel, and expands it into a % sequence of 90-degree rotated kernels but providing a reflected 180 % rotatation, before the -/+ 90-degree rotations. % % This special rotation order produces a better, more symetrical thinning of % objects. % % The format of the ExpandMirrorKernelInfo method is: % % void ExpandMirrorKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ #if 0 static void FlopKernelInfo(KernelInfo *kernel) { /* Do a Flop by reversing each row. */ size_t y; register ssize_t x,r; register double *k,t; for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width) for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--) t=k[x], k[x]=k[r], k[r]=t; kernel->x = kernel->width - kernel->x - 1; angle = fmod(angle+180.0, 360.0); } #endif static void ExpandMirrorKernelInfo(KernelInfo *kernel) { KernelInfo *clone, *last; last = kernel; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 180); /* flip */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 90); /* transpose */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 180); /* flop */ LastKernelInfo(last)->next = clone; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating % incrementally by the angle given, until the kernel repeats. % % WARNING: 45 degree rotations only works for 3x3 kernels. % While 90 degree roatations only works for linear and square kernels % % The format of the ExpandRotateKernelInfo method is: % % void ExpandRotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ /* Internal Routine - Return true if two kernels are the same */ static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1, const KernelInfo *kernel2) { register size_t i; /* check size and origin location */ if ( kernel1->width != kernel2->width || kernel1->height != kernel2->height || kernel1->x != kernel2->x || kernel1->y != kernel2->y ) return MagickFalse; /* check actual kernel values */ for (i=0; i < (kernel1->width*kernel1->height); i++) { /* Test for Nan equivalence */ if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) ) return MagickFalse; if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) ) return MagickFalse; /* Test actual values are equivalent */ if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon ) return MagickFalse; } return MagickTrue; } static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle) { KernelInfo *clone, *last; last = kernel; DisableMSCWarning(4127) while(1) { RestoreMSCWarning clone = CloneKernelInfo(last); RotateKernelInfo(clone, angle); if ( SameKernelInfo(kernel, clone) != MagickFalse ) break; LastKernelInfo(last)->next = clone; last = clone; } clone = DestroyKernelInfo(clone); /* kernel has repeated - junk the clone */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a l c M e t a K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only, % using the kernel values. This should only ne used if it is not possible to % calculate that meta-data in some easier way. % % It is important that the meta-data is correct before ScaleKernelInfo() is % used to perform kernel normalization. % % The format of the CalcKernelMetaData method is: % % void CalcKernelMetaData(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % WARNING: Minimum and Maximum values are assumed to include zero, even if % zero is not part of the kernel (as in Gaussian Derived kernels). This % however is not true for flat-shaped morphological kernels. % % WARNING: Only the specific kernel pointed to is modified, not a list of % multiple kernels. % % This is an internal function and not expected to be useful outside this % module. This could change however. */ static void CalcKernelMetaData(KernelInfo *kernel) { register size_t i; kernel->minimum = kernel->maximum = 0.0; kernel->negative_range = kernel->positive_range = 0.0; for (i=0; i < (kernel->width*kernel->height); i++) { if ( fabs(kernel->values[i]) < MagickEpsilon ) kernel->values[i] = 0.0; ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y A p p l y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyApply() applies a morphological method, multiple times using % a list of multiple kernels. This is the method that should be called by % other 'operators' that internally use morphology operations as part of % their processing. % % It is basically equivalent to as MorphologyImage() (see below) but without % any user controls. This allows internel programs to use this method to % perform a specific task without possible interference by any API user % supplied settings. % % It is MorphologyImage() task to extract any such user controls, and % pass them to this function for processing. % % More specifically all given kernels should already be scaled, normalised, % and blended appropriatally before being parred to this routine. The % appropriate bias, and compose (typically 'UndefinedComposeOp') given. % % The format of the MorphologyApply method is: % % Image *MorphologyApply(const Image *image,MorphologyMethod method, % const ssize_t iterations,const KernelInfo *kernel, % const CompositeMethod compose,const double bias, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the source image % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % % o compose: How to handle or merge multi-kernel results. % If 'UndefinedCompositeOp' use default for the Morphology method. % If 'NoCompositeOp' force image to be re-iterated by each kernel. % Otherwise merge the results using the compose method given. % % o bias: Convolution Output Bias. % % o exception: return any errors or warnings in this structure. % */ static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image, const MorphologyMethod method,const KernelInfo *kernel,const double bias, ExceptionInfo *exception) { #define MorphologyTag "Morphology/Image" CacheView *image_view, *morphology_view; OffsetInfo offset; register ssize_t j, y; size_t *changes, changed, width; MagickBooleanType status; MagickOffsetType progress; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(morphology_image != (Image *) NULL); assert(morphology_image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(morphology_image,exception); width=image->columns+kernel->width-1; offset.x=0; offset.y=0; switch (method) { case ConvolveMorphology: case DilateMorphology: case DilateIntensityMorphology: case IterativeDistanceMorphology: { /* Kernel needs to used with reflection about origin. */ offset.x=(ssize_t) kernel->width-kernel->x-1; offset.y=(ssize_t) kernel->height-kernel->y-1; break; } case ErodeMorphology: case ErodeIntensityMorphology: case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: { offset.x=kernel->x; offset.y=kernel->y; break; } default: { assert("Not a Primitive Morphology Method" != (char *) NULL); break; } } changed=0; changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(), sizeof(*changes)); if (changes == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changes[j]=0; if ((method == ConvolveMorphology) && (kernel->width == 1)) { register ssize_t x; /* Special handling (for speed) of vertical (blur) kernels. This performs its handling in columns rather than in rows. This is only done for convolve as it is the only method that generates very large 1-D vertical kernels (such as a 'BlurKernel') */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,morphology_image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t r; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+ kernel->height-1,exception); q=GetCacheViewAuthenticPixels(morphology_view,x,0,1, morphology_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*offset.y; for (r=0; r < (ssize_t) image->rows; r++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait morphology_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t v; size_t count; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); morphology_traits=GetPixelChannelTraits(morphology_image,channel); if ((traits == UndefinedPixelTrait) || (morphology_traits == UndefinedPixelTrait)) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) == 0)) { SetPixelChannel(morphology_image,channel,p[center+i],q); continue; } k=(&kernel->values[kernel->height-1]); pixels=p; pixel=bias; gamma=0.0; count=0; if ((morphology_traits & BlendPixelTrait) == 0) for (v=0; v < (ssize_t) kernel->height; v++) { if (!IsNaN(*k)) { pixel+=(*k)*pixels[i]; gamma+=(*k); count++; } k--; pixels+=GetPixelChannels(image); } else for (v=0; v < (ssize_t) kernel->height; v++) { if (!IsNaN(*k)) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=alpha*(*k)*pixels[i]; gamma+=alpha*(*k); count++; } k--; pixels+=GetPixelChannels(image); } if (fabs(pixel-p[center+i]) > MagickEpsilon) changes[id]++; gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height/count; SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma* pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(morphology_image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphologyPrimitive) #endif proceed=SetImageProgress(image,MorphologyTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_image->type=image->type; morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changed+=changes[j]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : 0); } /* Normal handling of horizontal or rectangular kernels (row by row). */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,morphology_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width, kernel->height,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) (GetPixelChannels(image)*width*offset.y+ GetPixelChannels(image)*offset.x); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, intensity, maximum, minimum, pixel; PixelChannel channel; PixelTrait morphology_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; size_t count; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); morphology_traits=GetPixelChannelTraits(morphology_image,channel); if ((traits == UndefinedPixelTrait) || (morphology_traits == UndefinedPixelTrait)) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) == 0)) { SetPixelChannel(morphology_image,channel,p[center+i],q); continue; } pixels=p; maximum=0.0; minimum=(double) QuantumRange; count=kernel->width*kernel->height; switch (method) { case ConvolveMorphology: pixel=bias; break; case HitAndMissMorphology: pixel=(double) QuantumRange; break; case ThinningMorphology: pixel=(double) QuantumRange; break; case ThickenMorphology: pixel=(double) QuantumRange; break; case ErodeMorphology: pixel=(double) QuantumRange; break; case DilateMorphology: pixel=0.0; break; case ErodeIntensityMorphology: case DilateIntensityMorphology: case IterativeDistanceMorphology: { pixel=(double) p[center+i]; break; } default: pixel=0; break; } gamma=1.0; switch (method) { case ConvolveMorphology: { /* Weighted Average of pixels using reflected kernel For correct working of this operation for asymetrical kernels, the kernel needs to be applied in its reflected form. That is its values needs to be reversed. Correlation is actually the same as this but without reflecting the kernel, and thus 'lower-level' that Convolution. However as Convolution is the more common method used, and it does not really cost us much in terms of processing to use a reflected kernel, so it is Convolution that is implemented. Correlation will have its kernel reflected before calling this function to do a Convolve. For more details of Correlation vs Convolution see http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf */ k=(&kernel->values[kernel->width*kernel->height-1]); count=0; if ((morphology_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { pixel+=(*k)*pixels[i]; count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } /* Alpha blending. */ gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=alpha*(*k)*pixels[i]; gamma+=alpha*(*k); count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case ErodeMorphology: { /* Minimum value within kernel neighbourhood. The kernel is not reflected for this operation. In normal Greyscale Morphology, the kernel value should be added to the real value, this is currently not done, due to the nature of the boolean kernels being used. */ k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { if ((double) pixels[i] < pixel) pixel=(double) pixels[i]; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case DilateMorphology: { /* Maximum value within kernel neighbourhood. For correct working of this operation for asymetrical kernels, the kernel needs to be applied in its reflected form. That is its values needs to be reversed. In normal Greyscale Morphology, the kernel value should be added to the real value, this is currently not done, due to the nature of the boolean kernels being used. */ count=0; k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k > 0.5)) { if ((double) pixels[i] > pixel) pixel=(double) pixels[i]; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: { /* Minimum of foreground pixel minus maxumum of background pixels. The kernel is not reflected for this operation, and consists of both foreground and background pixel neighbourhoods, 0.0 for background, and 1.0 for foreground with either Nan or 0.5 values for don't care. This never produces a meaningless negative result. Such results cause Thinning/Thicken to not work correctly when used against a greyscale image. */ count=0; k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if (*k > 0.7) { if ((double) pixels[i] < pixel) pixel=(double) pixels[i]; } else if (*k < 0.3) { if ((double) pixels[i] > maximum) maximum=(double) pixels[i]; } count++; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } pixel-=maximum; if (pixel < 0.0) pixel=0.0; if (method == ThinningMorphology) pixel=(double) p[center+i]-pixel; else if (method == ThickenMorphology) pixel+=(double) p[center+i]+pixel; break; } case ErodeIntensityMorphology: { /* Select pixel with minimum intensity within kernel neighbourhood. The kernel is not reflected for this operation. */ count=0; k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { intensity=(double) GetPixelIntensity(image,pixels); if (intensity < minimum) { pixel=(double) pixels[i]; minimum=intensity; } count++; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case DilateIntensityMorphology: { /* Select pixel with maximum intensity within kernel neighbourhood. The kernel is not reflected for this operation. */ count=0; k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { intensity=(double) GetPixelIntensity(image,pixels); if (intensity > maximum) { pixel=(double) pixels[i]; maximum=intensity; } count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case IterativeDistanceMorphology: { /* Compute th iterative distance from black edge of a white image shape. Essentually white values are decreased to the smallest 'distance from edge' it can find. It works by adding kernel values to the neighbourhood, and and select the minimum value found. The kernel is rotated before use, so kernel distances match resulting distances, when a user provided asymmetric kernel is applied. This code is nearly identical to True GrayScale Morphology but not quite. GreyDilate Kernel values added, maximum value found Kernel is rotated before use. GrayErode: Kernel values subtracted and minimum value found No kernel rotation used. Note the the Iterative Distance method is essentially a GrayErode, but with negative kernel values, and kernel rotation applied. */ count=0; k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case UndefinedMorphology: default: break; } if (fabs(pixel-p[center+i]) > MagickEpsilon) changes[id]++; gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height*kernel->width/count; SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(morphology_image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphologyPrimitive) #endif proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changed+=changes[j]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : -1); } /* This is almost identical to the MorphologyPrimative() function above, but applies the primitive directly to the actual image using two passes, once in each direction, with the results of the previous (and current) row being re-used. That is after each row is 'Sync'ed' into the image, the next row makes use of those values as part of the calculation of the next row. It repeats, but going in the oppisite (bottom-up) direction. Because of this 're-use of results' this function can not make use of multi- threaded, parellel processing. */ static ssize_t MorphologyPrimitiveDirect(Image *image, const MorphologyMethod method,const KernelInfo *kernel, ExceptionInfo *exception) { CacheView *morphology_view, *image_view; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; size_t width, changed; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; changed=0; progress=0; switch(method) { case DistanceMorphology: case VoronoiMorphology: { /* Kernel reflected about origin. */ offset.x=(ssize_t) kernel->width-kernel->x-1; offset.y=(ssize_t) kernel->height-kernel->y-1; break; } default: { offset.x=kernel->x; offset.y=kernel->y; break; } } /* Two views into same image, do not thread. */ image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(image,exception); width=image->columns+kernel->width-1; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; /* Read virtual pixels, and authentic pixels, from the same image! We read using virtual to get virtual pixel handling, but write back into the same image. Only top half of kernel is processed as we do a single pass downward through the image iterating the distance function as we go. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t) offset.y+1,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) (GetPixelChannels(image)*width*offset.y+ GetPixelChannels(image)*offset.x); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) == 0)) continue; pixels=p; pixel=(double) QuantumRange; switch (method) { case DistanceMorphology: { k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v <= offset.y; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q-offset.x*GetPixelChannels(image); for (u=0; u < offset.x; u++) { if (!IsNaN(*k) && ((x+u-offset.x) >= 0)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } break; } case VoronoiMorphology: { k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < offset.y; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q-offset.x*GetPixelChannels(image); for (u=0; u < offset.x; u++) { if (!IsNaN(*k) && ((x+u-offset.x) >= 0)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } break; } default: break; } if (fabs(pixel-q[i]) > MagickEpsilon) changed++; q[i]=ClampToQuantum(pixel); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); /* Do the reverse pass through the image. */ image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(image,exception); for (y=(ssize_t) image->rows-1; y >= 0; y--) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; /* Read virtual pixels, and authentic pixels, from the same image. We read using virtual to get virtual pixel handling, but write back into the same image. Only the bottom half of the kernel is processed as we up the image. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t) kernel->y+1,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } p+=(image->columns-1)*GetPixelChannels(image); q+=(image->columns-1)*GetPixelChannels(image); center=(ssize_t) (offset.x*GetPixelChannels(image)); for (x=(ssize_t) image->columns-1; x >= 0; x--) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) == 0)) continue; pixels=p; pixel=(double) QuantumRange; switch (method) { case DistanceMorphology: { k=(&kernel->values[kernel->width*(kernel->y+1)-1]); for (v=offset.y; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]); pixels=q; for (u=offset.x+1; u < (ssize_t) kernel->width; u++) { pixels+=GetPixelChannels(image); if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; } break; } case VoronoiMorphology: { k=(&kernel->values[kernel->width*(kernel->y+1)-1]); for (v=offset.y; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q; for (u=offset.x+1; u < (ssize_t) kernel->width; u++) { pixels+=GetPixelChannels(image); if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; } break; } default: break; } if (fabs(pixel-q[i]) > MagickEpsilon) changed++; q[i]=ClampToQuantum(pixel); } p-=GetPixelChannels(image); q-=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); return(status ? (ssize_t) changed : -1); } /* Apply a Morphology by calling one of the above low level primitive application functions. This function handles any iteration loops, composition or re-iteration of results, and compound morphology methods that is based on multiple low-level (staged) morphology methods. Basically this provides the complex glue between the requested morphology method and raw low-level implementation (above). */ MagickPrivate Image *MorphologyApply(const Image *image, const MorphologyMethod method, const ssize_t iterations, const KernelInfo *kernel, const CompositeOperator compose,const double bias, ExceptionInfo *exception) { CompositeOperator curr_compose; Image *curr_image, /* Image we are working with or iterating */ *work_image, /* secondary image for primitive iteration */ *save_image, /* saved image - for 'edge' method only */ *rslt_image; /* resultant image - after multi-kernel handling */ KernelInfo *reflected_kernel, /* A reflected copy of the kernel (if needed) */ *norm_kernel, /* the current normal un-reflected kernel */ *rflt_kernel, /* the current reflected kernel (if needed) */ *this_kernel; /* the kernel being applied */ MorphologyMethod primitive; /* the current morphology primitive being applied */ CompositeOperator rslt_compose; /* multi-kernel compose method for results to use */ MagickBooleanType special, /* do we use a direct modify function? */ verbose; /* verbose output of results */ size_t method_loop, /* Loop 1: number of compound method iterations (norm 1) */ method_limit, /* maximum number of compound method iterations */ kernel_number, /* Loop 2: the kernel number being applied */ stage_loop, /* Loop 3: primitive loop for compound morphology */ stage_limit, /* how many primitives are in this compound */ kernel_loop, /* Loop 4: iterate the kernel over image */ kernel_limit, /* number of times to iterate kernel */ count, /* total count of primitive steps applied */ kernel_changed, /* total count of changed using iterated kernel */ method_changed; /* total count of changed over method iteration */ ssize_t changed; /* number pixels changed by last primitive operation */ char v_info[MagickPathExtent]; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); count = 0; /* number of low-level morphology primitives performed */ if ( iterations == 0 ) return((Image *) NULL); /* null operation - nothing to do! */ kernel_limit = (size_t) iterations; if ( iterations < 0 ) /* negative interations = infinite (well alomst) */ kernel_limit = image->columns>image->rows ? image->columns : image->rows; verbose = IsStringTrue(GetImageArtifact(image,"debug")); /* initialise for cleanup */ curr_image = (Image *) image; curr_compose = image->compose; (void) curr_compose; work_image = save_image = rslt_image = (Image *) NULL; reflected_kernel = (KernelInfo *) NULL; /* Initialize specific methods * + which loop should use the given iteratations * + how many primitives make up the compound morphology * + multi-kernel compose method to use (by default) */ method_limit = 1; /* just do method once, unless otherwise set */ stage_limit = 1; /* assume method is not a compound */ special = MagickFalse; /* assume it is NOT a direct modify primitive */ rslt_compose = compose; /* and we are composing multi-kernels as given */ switch( method ) { case SmoothMorphology: /* 4 primitive compound morphology */ stage_limit = 4; break; case OpenMorphology: /* 2 primitive compound morphology */ case OpenIntensityMorphology: case TopHatMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case EdgeMorphology: stage_limit = 2; break; case HitAndMissMorphology: rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */ /* FALL THUR */ case ThinningMorphology: case ThickenMorphology: method_limit = kernel_limit; /* iterate the whole method */ kernel_limit = 1; /* do not do kernel iteration */ break; case DistanceMorphology: case VoronoiMorphology: special = MagickTrue; /* use special direct primative */ break; default: break; } /* Apply special methods with special requirments ** For example, single run only, or post-processing requirements */ if ( special != MagickFalse ) { rslt_image=CloneImage(image,0,0,MagickTrue,exception); if (rslt_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse) goto error_cleanup; changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception); if (verbose != MagickFalse) (void) (void) FormatLocaleFile(stderr, "%s:%.20g.%.20g #%.20g => Changed %.20g\n", CommandOptionToMnemonic(MagickMorphologyOptions, method), 1.0,0.0,1.0, (double) changed); if ( changed < 0 ) goto error_cleanup; if ( method == VoronoiMorphology ) { /* Preserve the alpha channel of input image - but turned it off */ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel, exception); (void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp, MagickTrue,0,0,exception); (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel, exception); } goto exit_cleanup; } /* Handle user (caller) specified multi-kernel composition method */ if ( compose != UndefinedCompositeOp ) rslt_compose = compose; /* override default composition for method */ if ( rslt_compose == UndefinedCompositeOp ) rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */ /* Some methods require a reflected kernel to use with primitives. * Create the reflected kernel for those methods. */ switch ( method ) { case CorrelateMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case SmoothMorphology: reflected_kernel = CloneKernelInfo(kernel); if (reflected_kernel == (KernelInfo *) NULL) goto error_cleanup; RotateKernelInfo(reflected_kernel,180); break; default: break; } /* Loops around more primitive morpholgy methods ** erose, dilate, open, close, smooth, edge, etc... */ /* Loop 1: iterate the compound method */ method_loop = 0; method_changed = 1; while ( method_loop < method_limit && method_changed > 0 ) { method_loop++; method_changed = 0; /* Loop 2: iterate over each kernel in a multi-kernel list */ norm_kernel = (KernelInfo *) kernel; this_kernel = (KernelInfo *) kernel; rflt_kernel = reflected_kernel; kernel_number = 0; while ( norm_kernel != NULL ) { /* Loop 3: Compound Morphology Staging - Select Primative to apply */ stage_loop = 0; /* the compound morphology stage number */ while ( stage_loop < stage_limit ) { stage_loop++; /* The stage of the compound morphology */ /* Select primitive morphology for this stage of compound method */ this_kernel = norm_kernel; /* default use unreflected kernel */ primitive = method; /* Assume method is a primitive */ switch( method ) { case ErodeMorphology: /* just erode */ case EdgeInMorphology: /* erode and image difference */ primitive = ErodeMorphology; break; case DilateMorphology: /* just dilate */ case EdgeOutMorphology: /* dilate and image difference */ primitive = DilateMorphology; break; case OpenMorphology: /* erode then dialate */ case TopHatMorphology: /* open and image difference */ primitive = ErodeMorphology; if ( stage_loop == 2 ) primitive = DilateMorphology; break; case OpenIntensityMorphology: primitive = ErodeIntensityMorphology; if ( stage_loop == 2 ) primitive = DilateIntensityMorphology; break; case CloseMorphology: /* dilate, then erode */ case BottomHatMorphology: /* close and image difference */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; if ( stage_loop == 2 ) primitive = ErodeMorphology; break; case CloseIntensityMorphology: this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateIntensityMorphology; if ( stage_loop == 2 ) primitive = ErodeIntensityMorphology; break; case SmoothMorphology: /* open, close */ switch ( stage_loop ) { case 1: /* start an open method, which starts with Erode */ primitive = ErodeMorphology; break; case 2: /* now Dilate the Erode */ primitive = DilateMorphology; break; case 3: /* Reflect kernel a close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; break; case 4: /* Finish the Close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ErodeMorphology; break; } break; case EdgeMorphology: /* dilate and erode difference */ primitive = DilateMorphology; if ( stage_loop == 2 ) { save_image = curr_image; /* save the image difference */ curr_image = (Image *) image; primitive = ErodeMorphology; } break; case CorrelateMorphology: /* A Correlation is a Convolution with a reflected kernel. ** However a Convolution is a weighted sum using a reflected ** kernel. It may seem stange to convert a Correlation into a ** Convolution as the Correlation is the simplier method, but ** Convolution is much more commonly used, and it makes sense to ** implement it directly so as to avoid the need to duplicate the ** kernel when it is not required (which is typically the ** default). */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ConvolveMorphology; break; default: break; } assert( this_kernel != (KernelInfo *) NULL ); /* Extra information for debugging compound operations */ if (verbose != MagickFalse) { if ( stage_limit > 1 ) (void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions,method),(double) method_loop,(double) stage_loop); else if ( primitive != method ) (void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions, method),(double) method_loop); else v_info[0] = '\0'; } /* Loop 4: Iterate the kernel with primitive */ kernel_loop = 0; kernel_changed = 0; changed = 1; while ( kernel_loop < kernel_limit && changed > 0 ) { kernel_loop++; /* the iteration of this kernel */ /* Create a clone as the destination image, if not yet defined */ if ( work_image == (Image *) NULL ) { work_image=CloneImage(image,0,0,MagickTrue,exception); if (work_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse) goto error_cleanup; } /* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */ count++; changed = MorphologyPrimitive(curr_image, work_image, primitive, this_kernel, bias, exception); if (verbose != MagickFalse) { if ( kernel_loop > 1 ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */ (void) (void) FormatLocaleFile(stderr, "%s%s%s:%.20g.%.20g #%.20g => Changed %.20g", v_info,CommandOptionToMnemonic(MagickMorphologyOptions, primitive),(this_kernel == rflt_kernel ) ? "*" : "", (double) (method_loop+kernel_loop-1),(double) kernel_number, (double) count,(double) changed); } if ( changed < 0 ) goto error_cleanup; kernel_changed += changed; method_changed += changed; /* prepare next loop */ { Image *tmp = work_image; /* swap images for iteration */ work_image = curr_image; curr_image = tmp; } if ( work_image == image ) work_image = (Image *) NULL; /* replace input 'image' */ } /* End Loop 4: Iterate the kernel with primitive */ if (verbose != MagickFalse && kernel_changed != (size_t)changed) (void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed); if (verbose != MagickFalse && stage_loop < stage_limit) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */ #if 0 (void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image); (void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image); (void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image); (void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image); (void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image); #endif } /* End Loop 3: Primative (staging) Loop for Coumpound Methods */ /* Final Post-processing for some Compound Methods ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** Turn off SVG composition 'alpha blending'. */ switch( method ) { case EdgeOutMorphology: case EdgeInMorphology: case TopHatMorphology: case BottomHatMorphology: if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n%s: Difference with original image",CommandOptionToMnemonic( MagickMorphologyOptions, method) ); (void) CompositeImage(curr_image,image,DifferenceCompositeOp, MagickTrue,0,0,exception); break; case EdgeMorphology: if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic( MagickMorphologyOptions, method) ); (void) CompositeImage(curr_image,save_image,DifferenceCompositeOp, MagickTrue,0,0,exception); save_image = DestroyImage(save_image); /* finished with save image */ break; default: break; } /* multi-kernel handling: re-iterate, or compose results */ if ( kernel->next == (KernelInfo *) NULL ) rslt_image = curr_image; /* just return the resulting image */ else if ( rslt_compose == NoCompositeOp ) { if (verbose != MagickFalse) { if ( this_kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " (re-iterate)"); else (void) FormatLocaleFile(stderr, " (done)"); } rslt_image = curr_image; /* return result, and re-iterate */ } else if ( rslt_image == (Image *) NULL) { if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, " (save for compose)"); rslt_image = curr_image; curr_image = (Image *) image; /* continue with original image */ } else { /* Add the new 'current' result to the composition ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** IE: Turn off SVG composition 'alpha blending'. */ if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, " (compose \"%s\")", CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) ); (void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue, 0,0,exception); curr_image = DestroyImage(curr_image); curr_image = (Image *) image; /* continue with original image */ } if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n"); /* loop to the next kernel in a multi-kernel list */ norm_kernel = norm_kernel->next; if ( rflt_kernel != (KernelInfo *) NULL ) rflt_kernel = rflt_kernel->next; kernel_number++; } /* End Loop 2: Loop over each kernel */ } /* End Loop 1: compound method interation */ goto exit_cleanup; /* Yes goto's are bad, but it makes cleanup lot more efficient */ error_cleanup: if ( curr_image == rslt_image ) curr_image = (Image *) NULL; if ( rslt_image != (Image *) NULL ) rslt_image = DestroyImage(rslt_image); exit_cleanup: if ( curr_image == rslt_image || curr_image == image ) curr_image = (Image *) NULL; if ( curr_image != (Image *) NULL ) curr_image = DestroyImage(curr_image); if ( work_image != (Image *) NULL ) work_image = DestroyImage(work_image); if ( save_image != (Image *) NULL ) save_image = DestroyImage(save_image); if ( reflected_kernel != (KernelInfo *) NULL ) reflected_kernel = DestroyKernelInfo(reflected_kernel); return(rslt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyImage() applies a user supplied kernel to the image according to % the given mophology method. % % This function applies any and all user defined settings before calling % the above internal function MorphologyApply(). % % User defined settings include... % * Output Bias for Convolution and correlation ("-define convolve:bias=??") % * Kernel Scale/normalize settings ("-define convolve:scale=??") % This can also includes the addition of a scaled unity kernel. % * Show Kernel being applied ("-define morphology:showkernel=1") % % Other operators that do not want user supplied options interfering, % especially "convolve:bias" and "morphology:showkernel" should use % MorphologyApply() directly. % % The format of the MorphologyImage method is: % % Image *MorphologyImage(const Image *image,MorphologyMethod method, % const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o kernel: An array of double representing the morphology kernel. % Warning: kernel may be normalized for the Convolve method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod method,const ssize_t iterations, const KernelInfo *kernel,ExceptionInfo *exception) { const char *artifact; CompositeOperator compose; double bias; Image *morphology_image; KernelInfo *curr_kernel; curr_kernel = (KernelInfo *) kernel; bias=0.0; compose = UndefinedCompositeOp; /* use default for method */ /* Apply Convolve/Correlate Normalization and Scaling Factors. * This is done BEFORE the ShowKernelInfo() function is called so that * users can see the results of the 'option:convolve:scale' option. */ if ( method == ConvolveMorphology || method == CorrelateMorphology ) { /* Get the bias value as it will be needed */ artifact = GetImageArtifact(image,"convolve:bias"); if ( artifact != (const char *) NULL) { if (IsGeometry(artifact) == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "convolve:bias",artifact); else bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0); } /* Scale kernel according to user wishes */ artifact = GetImageArtifact(image,"convolve:scale"); if ( artifact != (const char *) NULL ) { if (IsGeometry(artifact) == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "convolve:scale",artifact); else { if ( curr_kernel == kernel ) curr_kernel = CloneKernelInfo(kernel); if (curr_kernel == (KernelInfo *) NULL) return((Image *) NULL); ScaleGeometryKernelInfo(curr_kernel, artifact); } } } /* display the (normalized) kernel via stderr */ artifact=GetImageArtifact(image,"morphology:showkernel"); if (IsStringTrue(artifact) != MagickFalse) ShowKernelInfo(curr_kernel); /* Override the default handling of multi-kernel morphology results * If 'Undefined' use the default method * If 'None' (default for 'Convolve') re-iterate previous result * Otherwise merge resulting images using compose method given. * Default for 'HitAndMiss' is 'Lighten'. */ { ssize_t parse; artifact = GetImageArtifact(image,"morphology:compose"); if ( artifact != (const char *) NULL) { parse=ParseCommandOption(MagickComposeOptions, MagickFalse,artifact); if ( parse < 0 ) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'", "morphology:compose",artifact); else compose=(CompositeOperator)parse; } } /* Apply the Morphology */ morphology_image = MorphologyApply(image,method,iterations, curr_kernel,compose,bias,exception); /* Cleanup and Exit */ if ( curr_kernel != kernel ) curr_kernel=DestroyKernelInfo(curr_kernel); return(morphology_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateKernelInfo() rotates the kernel by the angle given. % % Currently it is restricted to 90 degree angles, of either 1D kernels % or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels. % It will ignore usless rotations for specific 'named' built-in kernels. % % The format of the RotateKernelInfo method is: % % void RotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is currently internal to this module only, but can be exported % to other modules if needed. */ static void RotateKernelInfo(KernelInfo *kernel, double angle) { /* angle the lower kernels first */ if ( kernel->next != (KernelInfo *) NULL) RotateKernelInfo(kernel->next, angle); /* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical ** ** TODO: expand beyond simple 90 degree rotates, flips and flops */ /* Modulus the angle */ angle = fmod(angle, 360.0); if ( angle < 0 ) angle += 360.0; if ( 337.5 < angle || angle <= 22.5 ) return; /* Near zero angle - no change! - At least not at this time */ /* Handle special cases */ switch (kernel->type) { /* These built-in kernels are cylindrical kernels, rotating is useless */ case GaussianKernel: case DoGKernel: case LoGKernel: case DiskKernel: case PeaksKernel: case LaplacianKernel: case ChebyshevKernel: case ManhattanKernel: case EuclideanKernel: return; /* These may be rotatable at non-90 angles in the future */ /* but simply rotating them in multiples of 90 degrees is useless */ case SquareKernel: case DiamondKernel: case PlusKernel: case CrossKernel: return; /* These only allows a +/-90 degree rotation (by transpose) */ /* A 180 degree rotation is useless */ case BlurKernel: if ( 135.0 < angle && angle <= 225.0 ) return; if ( 225.0 < angle && angle <= 315.0 ) angle -= 180; break; default: break; } /* Attempt rotations by 45 degrees -- 3x3 kernels only */ if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 ) { if ( kernel->width == 3 && kernel->height == 3 ) { /* Rotate a 3x3 square by 45 degree angle */ double t = kernel->values[0]; kernel->values[0] = kernel->values[3]; kernel->values[3] = kernel->values[6]; kernel->values[6] = kernel->values[7]; kernel->values[7] = kernel->values[8]; kernel->values[8] = kernel->values[5]; kernel->values[5] = kernel->values[2]; kernel->values[2] = kernel->values[1]; kernel->values[1] = t; /* rotate non-centered origin */ if ( kernel->x != 1 || kernel->y != 1 ) { ssize_t x,y; x = (ssize_t) kernel->x-1; y = (ssize_t) kernel->y-1; if ( x == y ) x = 0; else if ( x == 0 ) x = -y; else if ( x == -y ) y = 0; else if ( y == 0 ) y = x; kernel->x = (ssize_t) x+1; kernel->y = (ssize_t) y+1; } angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */ kernel->angle = fmod(kernel->angle+45.0, 360.0); } else perror("Unable to rotate non-3x3 kernel by 45 degrees"); } if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 ) { if ( kernel->width == 1 || kernel->height == 1 ) { /* Do a transpose of a 1 dimensional kernel, ** which results in a fast 90 degree rotation of some type. */ ssize_t t; t = (ssize_t) kernel->width; kernel->width = kernel->height; kernel->height = (size_t) t; t = kernel->x; kernel->x = kernel->y; kernel->y = t; if ( kernel->width == 1 ) { angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else { angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */ kernel->angle = fmod(kernel->angle+270.0, 360.0); } } else if ( kernel->width == kernel->height ) { /* Rotate a square array of values by 90 degrees */ { register ssize_t i,j,x,y; register MagickRealType *k,t; k=kernel->values; for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--) for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--) { t = k[i+j*kernel->width]; k[i+j*kernel->width] = k[j+x*kernel->width]; k[j+x*kernel->width] = k[x+y*kernel->width]; k[x+y*kernel->width] = k[y+i*kernel->width]; k[y+i*kernel->width] = t; } } /* rotate the origin - relative to center of array */ { register ssize_t x,y; x = (ssize_t) (kernel->x*2-kernel->width+1); y = (ssize_t) (kernel->y*2-kernel->height+1); kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2; kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2; } angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else perror("Unable to rotate a non-square, non-linear kernel 90 degrees"); } if ( 135.0 < angle && angle <= 225.0 ) { /* For a 180 degree rotation - also know as a reflection * This is actually a very very common operation! * Basically all that is needed is a reversal of the kernel data! * And a reflection of the origon */ MagickRealType t; register MagickRealType *k; ssize_t i, j; k=kernel->values; j=(ssize_t) (kernel->width*kernel->height-1); for (i=0; i < j; i++, j--) t=k[i], k[i]=k[j], k[j]=t; kernel->x = (ssize_t) kernel->width - kernel->x - 1; kernel->y = (ssize_t) kernel->height - kernel->y - 1; angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */ kernel->angle = fmod(kernel->angle+180.0, 360.0); } /* At this point angle should at least between -45 (315) and +45 degrees * In the future some form of non-orthogonal angled rotates could be * performed here, posibily with a linear kernel restriction. */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e G e o m e t r y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleGeometryKernelInfo() takes a geometry argument string, typically % provided as a "-set option:convolve:scale {geometry}" user setting, % and modifies the kernel according to the parsed arguments of that setting. % % The first argument (and any normalization flags) are passed to % ScaleKernelInfo() to scale/normalize the kernel. The second argument % is then passed to UnityAddKernelInfo() to add a scled unity kernel % into the scaled/normalized kernel. % % The format of the ScaleGeometryKernelInfo method is: % % void ScaleGeometryKernelInfo(KernelInfo *kernel, % const double scaling_factor,const MagickStatusType normalize_flags) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % o geometry: % The geometry string to parse, typically from the user provided % "-set option:convolve:scale {geometry}" setting. % */ MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel, const char *geometry) { MagickStatusType flags; GeometryInfo args; SetGeometryInfo(&args); flags = ParseGeometry(geometry, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/ args.rho *= 0.01, args.sigma *= 0.01; if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */ args.rho = 1.0; if ( (flags & SigmaValue) == 0 ) args.sigma = 0.0; /* Scale/Normalize the input kernel */ ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags); /* Add Unity Kernel, for blending with original */ if ( (flags & SigmaValue) != 0 ) UnityAddKernelInfo(kernel, args.sigma); return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleKernelInfo() scales the given kernel list by the given amount, with or % without normalization of the sum of the kernel values (as per given flags). % % By default (no flags given) the values within the kernel is scaled % directly using given scaling factor without change. % % If either of the two 'normalize_flags' are given the kernel will first be % normalized and then further scaled by the scaling factor value given. % % Kernel normalization ('normalize_flags' given) is designed to ensure that % any use of the kernel scaling factor with 'Convolve' or 'Correlate' % morphology methods will fall into -1.0 to +1.0 range. Note that for % non-HDRI versions of IM this may cause images to have any negative results % clipped, unless some 'bias' is used. % % More specifically. Kernels which only contain positive values (such as a % 'Gaussian' kernel) will be scaled so that those values sum to +1.0, % ensuring a 0.0 to +1.0 output range for non-HDRI images. % % For Kernels that contain some negative values, (such as 'Sharpen' kernels) % the kernel will be scaled by the absolute of the sum of kernel values, so % that it will generally fall within the +/- 1.0 range. % % For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel % will be scaled by just the sum of the postive values, so that its output % range will again fall into the +/- 1.0 range. % % For special kernels designed for locating shapes using 'Correlate', (often % only containing +1 and -1 values, representing foreground/brackground % matching) a special normalization method is provided to scale the positive % values separately to those of the negative values, so the kernel will be % forced to become a zero-sum kernel better suited to such searches. % % WARNING: Correct normalization of the kernel assumes that the '*_range' % attributes within the kernel structure have been correctly set during the % kernels creation. % % NOTE: The values used for 'normalize_flags' have been selected specifically % to match the use of geometry options, so that '!' means NormalizeValue, '^' % means CorrelateNormalizeValue. All other GeometryFlags values are ignored. % % The format of the ScaleKernelInfo method is: % % void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor, % const MagickStatusType normalize_flags ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scaling_factor: % multiply all values (after normalization) by this factor if not % zero. If the kernel is normalized regardless of any flags. % % o normalize_flags: % GeometryFlags defining normalization method to use. % specifically: NormalizeValue, CorrelateNormalizeValue, % and/or PercentValue % */ MagickExport void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,const GeometryFlags normalize_flags) { register double pos_scale, neg_scale; register ssize_t i; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags); /* Normalization of Kernel */ pos_scale = 1.0; if ( (normalize_flags&NormalizeValue) != 0 ) { if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon ) /* non-zero-summing kernel (generally positive) */ pos_scale = fabs(kernel->positive_range + kernel->negative_range); else /* zero-summing kernel */ pos_scale = kernel->positive_range; } /* Force kernel into a normalized zero-summing kernel */ if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) { pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon ) ? kernel->positive_range : 1.0; neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon ) ? -kernel->negative_range : 1.0; } else neg_scale = pos_scale; /* finialize scaling_factor for positive and negative components */ pos_scale = scaling_factor/pos_scale; neg_scale = scaling_factor/neg_scale; for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) if (!IsNaN(kernel->values[i])) kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale; /* convolution output range */ kernel->positive_range *= pos_scale; kernel->negative_range *= neg_scale; /* maximum and minimum values in kernel */ kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale; kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale; /* swap kernel settings if user's scaling factor is negative */ if ( scaling_factor < MagickEpsilon ) { double t; t = kernel->positive_range; kernel->positive_range = kernel->negative_range; kernel->negative_range = t; t = kernel->maximum; kernel->maximum = kernel->minimum; kernel->minimum = 1; } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h o w K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShowKernelInfo() outputs the details of the given kernel defination to % standard error, generally due to a users 'morphology:showkernel' option % request. % % The format of the ShowKernel method is: % % void ShowKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickPrivate void ShowKernelInfo(const KernelInfo *kernel) { const KernelInfo *k; size_t c, i, u, v; for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) { (void) FormatLocaleFile(stderr, "Kernel"); if ( kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c ); (void) FormatLocaleFile(stderr, " \"%s", CommandOptionToMnemonic(MagickKernelOptions, k->type) ); if ( fabs(k->angle) >= MagickEpsilon ) (void) FormatLocaleFile(stderr, "@%lg", k->angle); (void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long) k->width,(unsigned long) k->height,(long) k->x,(long) k->y); (void) FormatLocaleFile(stderr, " with values from %.*lg to %.*lg\n", GetMagickPrecision(), k->minimum, GetMagickPrecision(), k->maximum); (void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg", GetMagickPrecision(), k->negative_range, GetMagickPrecision(), k->positive_range); if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Zero-Summing)\n"); else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Normalized)\n"); else (void) FormatLocaleFile(stderr, " (Sum %.*lg)\n", GetMagickPrecision(), k->positive_range+k->negative_range); for (i=v=0; v < k->height; v++) { (void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v ); for (u=0; u < k->width; u++, i++) if (IsNaN(k->values[i])) (void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan"); else (void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3, GetMagickPrecision(), (double) k->values[i]); (void) FormatLocaleFile(stderr,"\n"); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n i t y A d d K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel % to the given pre-scaled and normalized Kernel. This in effect adds that % amount of the original image into the resulting convolution kernel. This % value is usually provided by the user as a percentage value in the % 'convolve:scale' setting. % % The resulting effect is to convert the defined kernels into blended % soft-blurs, unsharp kernels or into sharpening kernels. % % The format of the UnityAdditionKernelInfo method is: % % void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scale: % scaling factor for the unity kernel to be added to % the given kernel. % */ MagickExport void UnityAddKernelInfo(KernelInfo *kernel, const double scale) { /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) UnityAddKernelInfo(kernel->next, scale); /* Add the scaled unity kernel to the existing kernel */ kernel->values[kernel->x+kernel->y*kernel->width] += scale; CalcKernelMetaData(kernel); /* recalculate the meta-data */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z e r o K e r n e l N a n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroKernelNans() replaces any special 'nan' value that may be present in % the kernel with a zero value. This is typically done when the kernel will % be used in special hardware (GPU) convolution processors, to simply % matters. % % The format of the ZeroKernelNans method is: % % void ZeroKernelNans (KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickPrivate void ZeroKernelNans(KernelInfo *kernel) { register size_t i; /* do the other kernels in a multi-kernel list first */ if (kernel->next != (KernelInfo *) NULL) ZeroKernelNans(kernel->next); for (i=0; i < (kernel->width*kernel->height); i++) if (IsNaN(kernel->values[i])) kernel->values[i]=0.0; return; }
kvstore_dist_server.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file mxnet_node.h * \brief implement mxnet nodes */ #ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #include <mxnet/c_api.h> #include <mxnet/kvstore.h> #include <ps/ps.h> #include <queue> #include <string> #include <mutex> #include <condition_variable> #include <memory> #include <functional> #include <future> #include <vector> #include "../profiler/profiler.h" #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/init_op.h" namespace mxnet { namespace kvstore { // maintain same order in frontend. enum class CommandType { kController, kSetMultiPrecision, kStopServer, kSyncMode, kSetGradientCompression, kSetProfilerParams }; enum class RequestType { kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull }; struct DataHandleType { RequestType requestType; int dtype; }; /*! * Uses Cantor pairing function to generate a unique number given two numbers. * This number can also be inverted to find the unique pair whose Cantor value is this number. * Ref: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function * \param requestType RequestType * \param dtype integer * \return Cantor value of arguments */ static int GetCommandType(RequestType requestType, int d) { int m = static_cast<int>(requestType); return (((m + d) * (m + d + 1)) / 2) + d; } /*! * Unpairs Cantor value and finds the two integers used to pair. * Then returns DataHandleType object with those numbers. * \param cmd DataHandleCommand generated by GetCommandType function * \return DataHandleType */ static DataHandleType DepairDataHandleType(int cmd) { int w = std::floor((std::sqrt(8 * cmd + 1) - 1)/2); int t = ((w * w) + w) / 2; int y = cmd - t; int x = w - y; CHECK_GE(x, 0); CHECK_GE(y, 0); DataHandleType type; type.requestType = static_cast<RequestType>(x); type.dtype = y; return type; } /** * \brief executor runs a function using the thread called \ref Start */ class Executor { public: /** * \brief start the executor */ void Start() { std::unique_lock<std::mutex> lk(mu_); while (true) { cond_.wait(lk, [this]{return !queue_.empty();}); Block blk = std::move(queue_.front()); queue_.pop(); lk.unlock(); if (blk.f) { blk.f(); blk.p->set_value(); } else { blk.p->set_value(); break; } lk.lock(); } } /** * \brief function */ typedef std::function<void()> Func; /** * \brief let the thread called \ref Start to exec a function. threadsafe */ void Exec(const Func& func) { Block blk(func); auto fut = blk.p->get_future(); { std::lock_guard<std::mutex> lk(mu_); queue_.push(std::move(blk)); cond_.notify_one(); } fut.wait(); } /** * \brief stop the thread, threadsafe */ void Stop() { Exec(Func()); } private: struct Block { explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { } Func f; std::shared_ptr<std::promise<void>> p; }; std::queue<Block> queue_; std::mutex mu_; std::condition_variable cond_; }; class KVStoreDistServer { public: KVStoreDistServer() { using namespace std::placeholders; ps_server_ = new ps::KVServer<char>(0); static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle( std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2)); ps_server_->set_request_handle( std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3)); sync_mode_ = false; gradient_compression_ = std::make_shared<GradientCompression>(); log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false); } ~KVStoreDistServer() { profiler::Profiler::Get()->SetState(profiler::Profiler::ProfilerState(0)); delete ps_server_; } void set_controller(const KVStore::Controller& controller) { CHECK(controller); controller_ = controller; } void set_updater(const KVStore::Updater& updater) { CHECK(updater); updater_ = updater; } /** * \brief blocked until received the command \a kSyncMode */ void Run() { exec_.Start(); } private: struct UpdateBuf { std::vector<ps::KVMeta> request; NDArray merged; // temp_array is used to cast received values as float32 for computation if required NDArray temp_array; }; void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) { CommandType recved_type = static_cast<CommandType>(recved.head); switch (recved_type) { case CommandType::kStopServer: exec_.Stop(); break; case CommandType::kSyncMode: sync_mode_ = true; break; case CommandType::kSetGradientCompression: gradient_compression_->DecodeParams(recved.body); break; case CommandType::kSetProfilerParams: // last char is the type of profiler command ProcessServerProfilerCommands(static_cast<KVStoreServerProfilerCommand> (recved.body.back() - '0'), recved.body); break; case CommandType::kSetMultiPrecision: // uses value 1 for message id from frontend if (!multi_precision_) { multi_precision_ = true; CreateMultiPrecisionCopies(); } break; case CommandType::kController: // this uses value 0 for message id from frontend // let the main thread to execute ctrl, which is necessary for python exec_.Exec([this, recved]() { CHECK(controller_); controller_(recved.head, recved.body); }); break; } app->Response(recved); } /* * For keys already initialized, if necessary create stored_realt. * This will only be used if by some wrong usage of kvstore, * some keys are initialized before optimizer is set. */ void CreateMultiPrecisionCopies() { for (auto const &stored_entry : store_) { const int key = stored_entry.first; const NDArray &stored = stored_entry.second; if (stored.dtype() != mshadow::kFloat32) { auto &stored_realt = store_realt_[key]; if (stored.storage_type() == kRowSparseStorage) { stored_realt = NDArray(kRowSparseStorage, stored.shape(), stored.ctx(), true, mshadow::kFloat32); } else { stored_realt = NDArray(stored.shape(), stored.ctx(), false, mshadow::kFloat32); } auto &update = update_buf_[key]; if (!update.merged.is_none()) { if (update.merged.storage_type() == kRowSparseStorage) { update.merged = NDArray(kRowSparseStorage, update.merged.shape(), update.merged.ctx(), true, mshadow::kFloat32); } else { update.merged = NDArray(update.merged.shape(), update.merged.ctx(), false, mshadow::kFloat32); } } CHECK(update.request.size() == 0) << ps::MyRank() << "Multiprecision mode can not be set while pushes are underway." << "Please set optimizer before pushing keys." << key << " " << update.request.size(); CopyFromTo(stored, stored_realt); } } for (auto const &stored_realt_entry : store_realt_) { stored_realt_entry.second.WaitToRead(); } } void ProcessServerProfilerCommands(KVStoreServerProfilerCommand type, const std::string& body) { switch (type) { case KVStoreServerProfilerCommand::kSetConfig: SetProfilerConfig(body.substr(0, body.size() - 1)); break; case KVStoreServerProfilerCommand::kState: MXSetProfilerState(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kPause: MXProfilePause(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kDump: MXDumpProfile(static_cast<int>(body.front() - '0')); break; } } void SetProfilerConfig(std::string params_str) { std::vector<std::string> elems; mxnet::kvstore::split(params_str, ',', std::back_inserter(elems)); std::vector<const char*> ckeys; std::vector<const char*> cvals; ckeys.reserve(elems.size()); cvals.reserve(elems.size()); for (size_t i=0; i < elems.size(); i++) { std::vector<std::string> parts; mxnet::kvstore::split(elems[i], ':', std::back_inserter(parts)); CHECK_EQ(parts.size(), 2) << "Improper profiler config passed from worker"; CHECK(!parts[0].empty()) << "ProfilerConfig parameter is empty"; CHECK(!parts[1].empty()) << "ProfilerConfig value is empty for parameter "<< parts[0]; if (parts[0] == "filename") { parts[1] = "rank" + std::to_string(ps::MyRank()) + "_" + parts[1]; } char* ckey = new char[parts[0].length() + 1]; std::snprintf(ckey, parts[0].length() + 1, "%s", parts[0].c_str()); ckeys.push_back(ckey); char* cval = new char[parts[1].length() + 1]; std::snprintf(cval, parts[1].length() + 1, "%s", parts[1].c_str()); cvals.push_back(cval); } MXSetProfilerConfig(elems.size(), &ckeys[0], &cvals[0]); for (size_t i=0; i < ckeys.size(); i++) { delete[] ckeys[i]; delete[] cvals[i]; } } void DataHandleEx(const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { DataHandleType type = DepairDataHandleType(req_meta.cmd); switch (type.requestType) { case RequestType::kRowSparsePushPull: DataHandleRowSparse(type, req_meta, req_data, server); break; case RequestType::kCompressedPushPull: DataHandleCompressed(type, req_meta, req_data, server); break; case RequestType::kDefaultPushPull: DataHandleDefault(type, req_meta, req_data, server); break; } } inline bool has_multi_precision_copy(const DataHandleType type) { return multi_precision_ && type.dtype != mshadow::kFloat32; } inline void ApplyUpdates(const DataHandleType type, const int key, UpdateBuf *update_buf, ps::KVServer<char>* server) { if (!sync_mode_ || update_buf->request.size() == (size_t) ps::NumWorkers()) { // let the main thread to execute updater_, which is necessary for python auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array; if (updater_ && key < KVStore::GetMaxAllowedKeyForUpdate()) { exec_.Exec([this, key, &update, &stored](){ CHECK(updater_); updater_(key, update, &stored); }); } else if(sync_mode_ && updater_ && key >= KVStore::GetMaxAllowedKeyForUpdate()){ // push divide operator to average the value // TODO is it possible to do in place??? // TODO should we average over number of GPUs stored = update_buf->merged / ((size_t) ps::NumWorkers()); } else { CHECK(sync_mode_) << "Updater needs to be set for async mode"; // if no updater, just copy CopyFromTo(update_buf->merged, &stored); } if (log_verbose_) { LOG(INFO) << "sent response to " << update_buf->request.size() << " workers"; } for (const auto& req : update_buf->request) { server->Response(req); } update_buf->request.clear(); if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]); stored.WaitToRead(); } else { update_buf->merged.WaitToRead(); } } void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices, const int64_t master_key, const int64_t num_rows) { indices[0] = 0; for (int64_t i = 1; i <= num_rows; i++) { int key = DecodeKey(keys[i]); auto row_id = key - master_key; indices[i - 1] = row_id; } } void AccumulateRowSparseGrads(const DataHandleType type, const NDArray& recved, UpdateBuf* updateBuf) { NDArray out(kRowSparseStorage, updateBuf->merged.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) CopyFromTo(recved, updateBuf->temp_array); const NDArray& to_merge = has_multi_precision_copy(type) ? updateBuf->temp_array : recved; // accumulate row_sparse gradients using namespace mshadow; Engine::Get()->PushAsync( [to_merge, updateBuf, out](RunContext ctx, Engine::CallbackOnComplete on_complete) { op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>( {}, {}, {to_merge, updateBuf->merged}, {kWriteTo}, {out}); on_complete(); }, to_merge.ctx(), {to_merge.var(), updateBuf->merged.var()}, {out.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); CopyFromTo(out, &(updateBuf->merged), 0); updateBuf->merged.WaitToRead(); } void RowSparsePullResponse(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { if (log_verbose_) LOG(INFO) << "pull: " << master_key; ps::KVPairs<char> response; if (num_rows == 0) { std::vector<int> lens(req_data.keys.size(), 0); response.keys = req_data.keys; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); return; } const NDArray& stored = store_[master_key]; if (has_multi_precision_copy(type)) stored.WaitToRead(); CHECK(!stored.is_none()) << "init " << master_key << " first"; auto shape = stored.shape(); auto unit_len = shape.ProdShape(1, shape.ndim()); const int num_bytes = mshadow::mshadow_sizeof(type.dtype); const int unit_size = unit_len * num_bytes; const char* data = static_cast<char *> (stored.data().dptr_); auto len = num_rows * unit_size; // concat values response.vals.resize(len); #pragma omp parallel for for (size_t i = 1; i <= num_rows; i++) { int key = DecodeKey(req_data.keys[i]); int64_t row_id = key - master_key; const auto src = data + row_id * unit_size; auto begin = (i - 1) * unit_size; auto end = i * unit_size; response.vals.segment(begin, end).CopyFrom(src, unit_size); } // setup response response.keys = req_data.keys; std::vector<int> lens(req_data.keys.size(), unit_len); lens[0] = 0; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); } void InitRowSparseStored(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { auto& stored = has_multi_precision_copy(type) ? store_realt_[master_key] : store_[master_key]; int dtype = type.dtype; int num_bytes = mshadow::mshadow_sizeof(dtype); auto unit_len = req_data.lens[1] / num_bytes; CHECK_GT(unit_len, 0); size_t ds[] = {num_rows, (size_t) unit_len}; TShape dshape(ds, ds + 2); CHECK_EQ(req_data.vals.size(), num_rows * unit_len * num_bytes); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); stored = NDArray(kRowSparseStorage, dshape, Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) { store_[master_key] = NDArray(kRowSparseStorage, dshape, Context(), true, type.dtype); } Engine::Get()->PushAsync( [this, recved, stored, type](RunContext ctx, Engine::CallbackOnComplete on_complete) { NDArray rsp = stored; stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])}); mshadow::Stream<cpu> *s = ctx.get_stream<cpu>(); using namespace mxnet::op; nnvm::dim_t nnr = rsp.shape()[0]; MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, { IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>(); mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx); }); TBlob rsp_data = rsp.data(); // copies or casts as appropriate ndarray::Copy<cpu, cpu>(recved.data(), &rsp_data, Context(), Context(), RunContext()); on_complete(); }, recved.ctx(), {recved.var()}, {stored.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); if (has_multi_precision_copy(type)) { CopyFromTo(stored, store_[master_key]); store_[master_key].WaitToRead(); } stored.WaitToRead(); server->Response(req_meta); } void DataHandleRowSparse(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { int master_key = DecodeKey(req_data.keys[0]); auto num_rows = req_data.keys.size() - 1; auto& stored = store_[master_key]; if (req_meta.push) { CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty"; CHECK_EQ(req_data.lens[0], 0); if (stored.is_none()) { if (log_verbose_) LOG(INFO) << "initial push: " << master_key; // initialization CHECK_GT(num_rows, 0) << "init with empty data is not supported"; InitRowSparseStored(type, master_key, num_rows, req_meta, req_data, server); return; } else { if (log_verbose_) LOG(INFO) << "push: " << master_key << " " << req_data.keys; auto& updates = update_buf_[master_key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(kRowSparseStorage, stored.shape(), Context(), false, mshadow::kFloat32); } if (num_rows == 0) { if (sync_mode_) { if (updates.request.empty()) { // reset to zeros int merged_dtype = has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype; updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, merged_dtype); } // else nothing to aggregate updates.request.push_back(req_meta); ApplyUpdates(type, master_key, &updates, server); } else { server->Response(req_meta); } } else { auto unit_len = req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype); CHECK_GT(unit_len, 0); // indices std::vector<int64_t> indices(num_rows); DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows); // data TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask); size_t ds[] = {(size_t) num_rows, (size_t) unit_len}; TShape dshape(ds, ds + 2); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) // row_sparse NDArray NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0); if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); AccumulateRowSparseGrads(type, recved, &updates); } updates.request.push_back(req_meta); ApplyUpdates(type, master_key, &updates, server); } } } else { // pull RowSparsePullResponse(type, master_key, num_rows, req_meta, req_data, server); } } void DefaultStorageResponse(const DataHandleType type, const int key, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { ps::KVPairs<char> response; const NDArray& stored = store_[key]; CHECK(!stored.is_none()) << "init " << key << " first"; // as server returns when store_realt is ready in this case if (has_multi_precision_copy(type)) stored.WaitToRead(); auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype()); response.keys = req_data.keys; response.lens = {len}; // TODO(mli) try to remove this CopyFrom response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len); server->Response(req_meta, response); } void DataHandleCompressed(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { CHECK_EQ(type.dtype, mshadow::kFloat32) << "Gradient compression is currently supported for fp32 only"; if (req_meta.push) { // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished // first for dummy key which represents original size of array, whose len is 0 CHECK_EQ(req_data.keys.size(), (size_t)2); CHECK_EQ(req_data.lens.size(), (size_t)2); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]); int original_size = DecodeKey(req_data.keys[0]); int key = DecodeKey(req_data.keys[1]); auto& stored = store_[key]; size_t ds[] = {(size_t)req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype)}; TShape dshape(ds, ds + 1); TBlob recv_blob(reinterpret_cast<real_t*>(req_data.vals.data()), dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); NDArray decomp_buf = decomp_buf_[key]; dshape = TShape{(int64_t) original_size}; if (decomp_buf.is_none()) { decomp_buf = NDArray(dshape, Context()); } if (stored.is_none()) { stored = NDArray(dshape, Context()); gradient_compression_->Dequantize(recved, &stored, 0); server->Response(req_meta); stored.WaitToRead(); } else if (sync_mode_) { // synced push auto& merged = update_buf_[key]; if (merged.merged.is_none()) { merged.merged = NDArray(dshape, Context()); } if (merged.request.size() == 0) { gradient_compression_->Dequantize(recved, &merged.merged, 0); } else { gradient_compression_->Dequantize(recved, &decomp_buf, 0); merged.merged += decomp_buf; } merged.request.push_back(req_meta); ApplyUpdates(type, key, &merged, server); } else { // async push gradient_compression_->Dequantize(recved, &decomp_buf, 0); exec_.Exec([this, key, &decomp_buf, &stored]() { CHECK(updater_); updater_(key, decomp_buf, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull CHECK_EQ(req_data.keys.size(), (size_t)1); CHECK_EQ(req_data.lens.size(), (size_t)0); int key = DecodeKey(req_data.keys[0]); DefaultStorageResponse(type, key, req_meta, req_data, server); } } void DataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { // do some check CHECK_EQ(req_data.keys.size(), (size_t)1); if (req_meta.push) { CHECK_EQ(req_data.lens.size(), (size_t)1); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]); } int key = DecodeKey(req_data.keys[0]); auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished if (req_meta.push) { size_t ds[] = {(size_t) req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)}; TShape dshape(ds, ds + 1); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); if (stored.is_none()) { // initialization stored = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); CopyFromTo(recved, &stored, 0); server->Response(req_meta); if (has_multi_precision_copy(type)) { auto& stored_dtype = store_[key]; stored_dtype = NDArray(dshape, Context(), false, type.dtype); CopyFromTo(stored, stored_dtype); stored_dtype.WaitToRead(); } stored.WaitToRead(); } else { auto &updates = update_buf_[key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32); } if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); updates.merged += updates.temp_array; } else { updates.merged += recved; } } updates.request.push_back(req_meta); ApplyUpdates(type, key, &updates, server); } } else { DefaultStorageResponse(type, key, req_meta, req_data, server); } } int DecodeKey(ps::Key key) { auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()]; return key - kr.begin(); } /** * \brief user defined mode for push */ bool sync_mode_; KVStore::Controller controller_; KVStore::Updater updater_; /** * \brief store_ contains the value at kvstore for each key */ std::unordered_map<int, NDArray> store_; std::unordered_map<int, NDArray> store_realt_; /** * \brief merge_buf_ is a buffer used if sync_mode is true. It represents * values from different workers being merged. The store will be updated * to this value when values from all workers are pushed into this buffer. */ std::unordered_map<int, UpdateBuf> update_buf_; /** * \brief decomp_buf_ is a buffer into which compressed values are * decompressed before merging to the store. used when compress_!='none' */ std::unordered_map<int, NDArray> decomp_buf_; Executor exec_; ps::KVServer<char>* ps_server_; // whether to LOG verbose information bool log_verbose_; /* * \brief whether to use multi precision mode. * in multi precision mode, all weights are stored as float32. * any gradient received will be cast to float32 before accumulation and updating of weights. */ bool multi_precision_; /** * \brief gradient compression object. * starts with none, used after SetGradientCompression sets the type * currently there is no support for unsetting gradient compression */ std::shared_ptr<kvstore::GradientCompression> gradient_compression_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
main.c
#include <stdio.h> #include <omp.h> #include <time.h> #include <stdlib.h> void mergesort(int arr[],int p,int f); void mergeparallel(int arr[],int p,int f,int stop); void merge(int vet[],int p,int m,int f); const int max_threads = 256; const int stopmax = 32; /* * A ideia do codigo eh criar 2 threads a cada recursao que eh chamada, * e jogar cada metade para uma das threads. Quando o numero de recursoes * chamadas for igual ao numero maximo de threads, eh porque o codigo * comecou a ficar sequencial */ void swap(int *a, int i, int j) { int t = a[i]; a[i] = a[j]; a[j] = t; } int partition(int *a, int left,int right,int pivo) { int pos, i; swap(a, pivo, right); pos = left; for(i = left; i < right; i++) { if (a[i] < a[right]) { swap(a, i, pos); pos++; } } swap(a, right, pos); return pos; } void quickParallel(int *a, int left, int right, int stop) { if (left < right) { int pivo = (left + right) / 2; int pos = partition(a,left,right,pivo); if (stop > 1) { // Se stop > 1, eh pq ainda estamos lidando com uma chamada paralela #pragma omp parallel sections { #pragma omp section quickParallel(a, left, pos - 1, stop/2); #pragma omp section quickParallel(a, pos + 1, right, stop/2); } } else { // A partir daqui, o numero de chamadas recursivas eh igual ao de threads, o que faz o codigo ser sequencial quickParallel(a, left, pos - 1, stop); quickParallel(a, pos + 1, right, stop); } } } void quickSequential(int *a, int left, int right) { if (left < right) { int pivo = (left + right) / 2; int pos = partition(a,left,right,pivo); quickSequential(a, left, pos - 1); quickSequential(a, pos + 1, right); } } void mergeparallel(int arr[],int p,int f,int stop) { if(p<f) { int m = (f+p)/2; if(stop > 1) { #pragma omp parallel sections { #pragma omp section mergeparallel(arr,p,m,stop/2); #pragma omp section mergeparallel(arr,m+1,f,stop/2); } } else { mergeparallel(arr,p,m,stop); mergeparallel(arr,m+1,f,stop); } merge(arr,p,m,f); } } void mergesort(int arr[],int p,int f) { if(p<f) { int m = (f+p)/2; mergesort(arr,p,m); mergesort(arr,m+1,f); merge(arr,p,m,f); } } void merge(int vet[],int p,int m,int f) { int com1 = p, com2 = m+1, comAux = 0, tam = f-p+1; int *vetAux; vetAux= (int*)malloc(tam*sizeof(int)); while(com1 <= m && com2 <= f) { if(vet[com1] < vet[com2]) { vetAux[comAux] = vet[com1]; com1++; } else { vetAux[comAux] = vet[com2]; com2++; } comAux++; } while(com1 <= m) { vetAux[comAux] = vet[com1]; comAux++; com1++; } while(com2 <= f) { vetAux[comAux] = vet[com2]; comAux++; com2++; } for(comAux = p;comAux <= f; comAux++) { vet[comAux] = vetAux[comAux-p]; } free(vetAux); } ///Mergesort//////////////////////////////////////////////////////// int IsSort(int *array, int size) { int i; for(i = 1; i < size; i++) if(array[i-1] > array[i]) return 0; return 1; } void shuff(int arr[],int size) { srand(time(NULL)); for (int i = 0; i < size; i++) arr[i] = rand()%size; } int main(int argc, char** argv) { int size = 1000000, *array; array = malloc(size* sizeof(int)); double start, end; int MAX_THREADS = max_threads; // Caution! for (int threads = 1; threads <= MAX_THREADS; threads *= 2) { omp_set_num_threads(threads); // threads equals to 1 should be sequential... for (int nested = 0; nested <= 1; nested++) { // false or true omp_set_nested( nested ); for (int stop = 2; stop <= stopmax; stop *= 2) { shuff(array,size); start = omp_get_wtime(); quickParallel(array, 0, size - 1, stop); end = omp_get_wtime(); printf("Quicksort\n"); printf("Tempo: %.3f threads: %d nested: %d stop: %d\n", end - start, threads, nested, stop); if(IsSort(array, size) == 1) printf("Result: Sorted\n"); else printf("Result: Not Sorted\n"); shuff(array,size); start = omp_get_wtime(); quickSequential(array, 0, size - 1); end = omp_get_wtime(); printf("Quicksort sequencial\n"); printf("Tempo: %.3f threads: %d nested: %d stop: %d\n", end - start, threads, nested, stop); if(IsSort(array, size) == 1) printf("Result: Sorted\n"); else printf("Result: Not Sorted\n"); } } } printf("===========================================================\n"); printf("===========================================================\n"); printf("===========================================================\n"); MAX_THREADS = max_threads; for(int threads = 1; threads <= MAX_THREADS; threads *= 2) { omp_set_num_threads(threads); for(int nested = 0; nested <= 1;nested++) { omp_set_nested(nested); for(int stop = 2; stop <= stopmax; stop *= 2) { shuff(array,size); start = omp_get_wtime(); mergeparallel(array,0,size-1,stop); end = omp_get_wtime(); printf("Mergesort\n"); printf("Tempo: %.3f threads: %d nested: %d stop: %d\n",end-start,threads,nested,stop); if(IsSort(array,size)==1) printf("Result: Sorted\n"); else printf("Result: Not Sorted\n"); shuff(array,size); start = omp_get_wtime(); mergesort(array,0,size-1); end = omp_get_wtime(); printf("Mergesort sequencial\n"); printf("Tempo: %.3f threads: %d nested: %d stop: %d\n",end-start,threads,nested,stop); if(IsSort(array,size)==1) printf("Result: Sorted\n"); else printf("Result: Not Sorted\n"); } } } return 0; } /*Área de respostas Se rodar o arquivo como >>saida vai montar a tabela Tamanho do vetor: 1000000 unidades resultados: Os melhores resultados para ambos foi entre 0.040 e 0.050 segundos quando as threads estavam como 2, o atributo nested não influenciou significativamente o resultado. */
SmoothPointValuesFilter.h
/* * MIT License * * Copyright (c) 2018-2019 Benjamin Köhler * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #ifndef BK_SMOOTHPOINTVALUESFILTER_H #define BK_SMOOTHPOINTVALUESFILTER_H #include <vector> #ifdef BK_EMIT_PROGRESS #include <bk/Progress> #include <bk/Localization> #endif #include <bkDataset/lib/bkDataset_export.h> namespace bk { class BKDATASET_EXPORT SmoothPointValuesFilter { //==================================================================================================== //===== DEFINITIONS //==================================================================================================== using self_type = SmoothPointValuesFilter; //==================================================================================================== //===== MEMBERS //==================================================================================================== unsigned int _num_iterations; double _lambda; double _mu; //==================================================================================================== //===== CONSTRUCTORS & DESTRUCTOR //==================================================================================================== public: /// @{ -------------------------------------------------- CTOR SmoothPointValuesFilter(); SmoothPointValuesFilter(const self_type& other); SmoothPointValuesFilter(self_type&& other) noexcept; /// @} /// @{ -------------------------------------------------- DTOR ~SmoothPointValuesFilter(); /// @} //==================================================================================================== //===== GETTER //==================================================================================================== /// @{ -------------------------------------------------- GET NUM ITERATIONS [[nodiscard]] unsigned int num_iterations() const; /// @} /// @{ -------------------------------------------------- GET LAMBDA [[nodiscard]] double lambda() const; /// @} /// @{ -------------------------------------------------- GET MU [[nodiscard]] double mu() const; /// @} //==================================================================================================== //===== SETTER //==================================================================================================== /// @{ -------------------------------------------------- OPERATOR = [[maybe_unused]] auto operator=(const self_type& other) -> self_type&; [[maybe_unused]] auto operator=(self_type&& other) noexcept -> self_type&; /// @} /// @{ -------------------------------------------------- SET NUM ITERATIONS void set_num_iterations(unsigned int numIterations); /// @} /// @{ -------------------------------------------------- SET LAMBDA void set_lambda(double lambda); /// @} /// @{ -------------------------------------------------- SET MU void set_mu(double mu); /// @} //==================================================================================================== //===== FUNCTIONS //==================================================================================================== /// @{ -------------------------------------------------- APPLY template<typename TDataObject, typename T> [[nodiscard]] std::vector <T> apply(TDataObject& d, std::vector <T>&& data_vector_copy, const T& zero_val = T()) const { if (_num_iterations == 0) { return std::vector<T>(); } const unsigned int numPoints = d.geometry().num_points(); if (numPoints < 3) { return std::vector<T>(); } #ifdef BK_EMIT_PROGRESS Progress& prog = bk_progress.emplace_task(1 + _num_iterations, ___("Smoothing point values")); #endif // determine neighbor ids of each point std::vector <std::vector<unsigned int>> points_neighbor_ids(numPoints); for (unsigned int pointId = 0; pointId < numPoints; ++pointId) { points_neighbor_ids[pointId] = d.topology().neighbors_of_point(pointId); } // alternating temp point vectors std::vector <T> v0(std::move(data_vector_copy)); std::vector <T> v1(v0.size()); // run for (unsigned int iteration_cnt = 0; iteration_cnt < _num_iterations; ++iteration_cnt) { std::vector <T>& read = iteration_cnt % 2 == 0 ? v0 : v1; std::vector <T>& write = iteration_cnt % 2 == 0 ? v1 : v0; #pragma omp parallel for for (unsigned int id = 0; id < numPoints; ++id) { const T& val = read[id]; const std::vector<unsigned int>& neighbor_ids = points_neighbor_ids[id]; const unsigned int num_neighbors = neighbor_ids.size(); write[id] = val; if (num_neighbors != 0) { T center = zero_val; for (unsigned int n = 0; n < num_neighbors; ++n) { center += read[neighbor_ids[n]]; } center /= num_neighbors; const T dirToCenter = center - val; write[id] += (iteration_cnt % 2 == 0 ? _lambda : _mu) * dirToCenter; } // if (num_neighbors != 0) } // for id : numPoints #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif } // for iterations /* * write result */ std::vector <T> res = std::move(_num_iterations % 2 == 0 ? v0 : v1); #ifdef BK_EMIT_PROGRESS prog.set_finished(); #endif return res; } /// @} }; // class SmoothPointValuesFilter } // namespace bk #endif //BK_SMOOTHPOINTVALUESFILTER_H
5879852542.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; }; int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads) { int (*restrict block_sizes) __attribute__ ((aligned (64))) = (int (*)) block_sizes_vec->data; float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src)[save_src_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_vec->size[1]])save_src_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; float(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (float(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__((aligned(64))) = (float(*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]])usol_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", x0_blk0_size, y0_blk0_size, xb_size, yb_size); int sf = 4; // half the space order int t_blk_size = 2 * sf * (time_M - time_m); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t0 = (time + 1) % (3), t1 = (time) % (3), t2 = (time + 2) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); /* Begin section0 */ #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, usol, vp : 64) for (int z = z_m; z <= z_M; z += 1) { float r14 = -2.84722222F * usol[t1][x - time + 8][y - time + 8][z + 8]; float r13 = 1.0 / dt; float r12 = 1.0 / (dt * dt); float r11 = 1.0 / (vp[x - time + 8][y - time + 8][z + 8] * vp[x - time + 8][y - time + 8][z + 8]); usol[t0][x - time + 8][y - time + 8][z + 8] = (r11 * (-r12 * (-2.0F * usol[t1][x - time + 8][y - time + 8][z + 8] + usol[t2][x - time + 8][y - time + 8][z + 8])) + r13 * (damp[x - time + 1][y - time + 1][z + 1] * usol[t1][x - time + 8][y - time + 8][z + 8]) + (r14 - 1.78571429e-3F * (usol[t1][x - time + 8][y - time + 8][z + 4] + usol[t1][x - time + 8][y - time + 8][z + 12]) + 2.53968254e-2F * (usol[t1][x - time + 8][y - time + 8][z + 5] + usol[t1][x - time + 8][y - time + 8][z + 11]) - 2.0e-1F * (usol[t1][x - time + 8][y - time + 8][z + 6] + usol[t1][x - time + 8][y - time + 8][z + 10]) + 1.6F * (usol[t1][x - time + 8][y - time + 8][z + 7] + usol[t1][x - time + 8][y - time + 8][z + 9])) / ((h_z * h_z)) + (r14 - 1.78571429e-3F * (usol[t1][x - time + 8][y - time + 4][z + 8] + usol[t1][x - time + 8][y - time + 12][z + 8]) + 2.53968254e-2F * (usol[t1][x - time + 8][y - time + 5][z + 8] + usol[t1][x - time + 8][y - time + 11][z + 8]) - 2.0e-1F * (usol[t1][x - time + 8][y - time + 6][z + 8] + usol[t1][x - time + 8][y - time + 10][z + 8]) + 1.6F * (usol[t1][x - time + 8][y - time + 7][z + 8] + usol[t1][x - time + 8][y - time + 9][z + 8])) / ((h_y * h_y)) + (r14 - 1.78571429e-3F * (usol[t1][x - time + 4][y - time + 8][z + 8] + usol[t1][x - time + 12][y - time + 8][z + 8]) + 2.53968254e-2F * (usol[t1][x - time + 5][y - time + 8][z + 8] + usol[t1][x - time + 11][y - time + 8][z + 8]) - 2.0e-1F * (usol[t1][x - time + 6][y - time + 8][z + 8] + usol[t1][x - time + 10][y - time + 8][z + 8]) + 1.6F * (usol[t1][x - time + 7][y - time + 8][z + 8] + usol[t1][x - time + 9][y - time + 8][z + 8])) / ((h_x * h_x))) / (r11 * r12 + r13 * damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, usol, vp : 64) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time]-1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; usol[t0][x - time + 8][y - time + 8][zind + 8] += r0; } } } } } } } } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; return 0; }
Example_affinity.3.c
/* * @@name: affinity.3c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success * @@version: omp_4.0 */ void work(); int main() { #pragma omp parallel proc_bind(close) num_threads(4) { work(); } return 0; }
GB_binop__bxor_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bxor_uint64 // A.*B function (eWiseMult): GB_AemultB__bxor_uint64 // A*D function (colscale): GB_AxD__bxor_uint64 // D*A function (rowscale): GB_DxB__bxor_uint64 // C+=B function (dense accum): GB_Cdense_accumB__bxor_uint64 // C+=b function (dense accum): GB_Cdense_accumb__bxor_uint64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxor_uint64 // C=scalar+B GB_bind1st__bxor_uint64 // C=scalar+B' GB_bind1st_tran__bxor_uint64 // C=A+scalar GB_bind2nd__bxor_uint64 // C=A'+scalar GB_bind2nd_tran__bxor_uint64 // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x) ^ (y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_UINT64 || GxB_NO_BXOR_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bxor_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bxor_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bxor_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__bxor_uint64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__bxor_uint64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bxor_uint64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bxor_uint64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bxor_uint64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t bij = Bx [p] ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bxor_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB_bind1st_tran__bxor_uint64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB_bind2nd_tran__bxor_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
conv.h
#ifndef CONV_H #define CONV_H namespace TSnap { /// Sequentially converts the table into a graph with links from nodes in \c SrcCol to those in \c DstCol. template<class PGraph> PGraph ToGraph(PTable Table, const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy) { PGraph Graph = PGraph::TObj::New(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); // make single pass over all rows in the table if (NodeType == atInt) { for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx]; TInt DVal = (Table->IntCols)[DstColIdx][CurrRowIdx]; //Using AddNodeUnchecked ensures that no error is thrown when the same node is seen twice Graph->AddNodeUnchecked(SVal); Graph->AddNodeUnchecked(DVal); Graph->AddEdgeUnchecked(SVal, DVal); } } else if (NodeType == atFlt) { // node values - i.e. the unique values of src/dst col //THashSet<TInt> IntNodeVals; // for both int and string node attr types. THash<TFlt, TInt> FltNodeVals; for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal, DVal; TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal); TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal); Graph->AddEdge(SVal, DVal); } } else { for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value TInt DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value //Using AddNodeUnchecked ensures that no error is thrown when the same node is seen twice Graph->AddNodeUnchecked(SVal); Graph->AddNodeUnchecked(DVal); Graph->AddEdgeUnchecked(SVal, DVal); } } Graph->SortNodeAdjV(); return Graph; } /// Converts the Table into a graph with edges from \c SrcCol to \c DstCol, and attribute vector /// defined by the arguments. template<class PGraph> PGraph ToNetwork(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& SrcAttrV, TStrV& DstAttrV, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { PGraph Graph = PGraph::TObj::New(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); //Table->AddGraphAttributeV(SrcAttrV, false, true, false); //Table->AddGraphAttributeV(DstAttrV, false, false, true); //Table->AddGraphAttributeV(EdgeAttrV, true, false, true); // node values - i.e. the unique values of src/dst col //THashSet<TInt> IntNodeVals; // for both int and string node attr types. THash<TFlt, TInt> FltNodeVals; // node attributes THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; // make single pass over all rows in the table for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal, DVal; if (NodeType == atFlt) { TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal); TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal); } else if (NodeType == atInt || NodeType == atStr) { if (NodeType == atInt) { SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx]; DVal = (Table->IntCols)[DstColIdx][CurrRowIdx]; } else { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value } if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); } if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); } //CheckAndAddIntNode(Graph, IntNodeVals, SVal); //CheckAndAddIntNode(Graph, IntNodeVals, DVal); } // add edge and edge attributes Graph->AddEdge(SVal, DVal, CurrRowIdx); // Aggregate edge attributes and add to graph for (TInt i = 0; i < EdgeAttrV.Len(); i++) { TStr ColName = EdgeAttrV[i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName); break; } } // get src and dst node attributes into hashmaps if ((Table->SrcNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SVal, Table->SrcNodeAttrV, CurrRowIdx, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } if ((Table->DstNodeAttrV).Len() > 0) { Table->AddNodeAttributes(DVal, Table->DstNodeAttrV, CurrRowIdx, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } } // aggregate node attributes and add to graph if ((Table->SrcNodeAttrV).Len() > 0 || (Table->DstNodeAttrV).Len() > 0) { for (TNEANet::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) { TInt NId = NodeI.GetId(); if (NodeIntAttrs.IsKey(NId)) { TStrIntVH IntAttrVals = NodeIntAttrs.GetDat(NId); for (TStrIntVH::TIter it = IntAttrVals.BegI(); it < IntAttrVals.EndI(); it++) { TInt AttrVal = Table->AggregateVector<TInt>(it.GetDat(), AggrPolicy); Graph->AddIntAttrDatN(NId, AttrVal, it.GetKey()); } } if (NodeFltAttrs.IsKey(NId)) { TStrFltVH FltAttrVals = NodeFltAttrs.GetDat(NId); for (TStrFltVH::TIter it = FltAttrVals.BegI(); it < FltAttrVals.EndI(); it++) { TFlt AttrVal = Table->AggregateVector<TFlt>(it.GetDat(), AggrPolicy); Graph->AddFltAttrDatN(NId, AttrVal, it.GetKey()); } } if (NodeStrAttrs.IsKey(NId)) { TStrStrVH StrAttrVals = NodeStrAttrs.GetDat(NId); for (TStrStrVH::TIter it = StrAttrVals.BegI(); it < StrAttrVals.EndI(); it++) { TStr AttrVal = Table->AggregateVector<TStr>(it.GetDat(), AggrPolicy); Graph->AddStrAttrDatN(NId, AttrVal, it.GetKey()); } } } } return Graph; } /// Calls ToNetwork with an empty attribute vector. Convenience wrapper. template<class PGraph> PGraph ToNetwork(PTable Table, const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy) { TStrV V; return ToNetwork<PGraph>(Table, SrcCol, DstCol, V, AggrPolicy); } #ifdef GCC_ATOMIC /// Performs table to graph conversion in parallel using the sort-first algorithm. This is the recommended method to use. template<class PGraphMP> PGraphMP ToGraphMP(PTable Table, const TStr& SrcCol, const TStr& DstCol) { // double start = omp_get_wtime(); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt NumRows = Table->NumValidRows; TIntV SrcCol1, DstCol1, SrcCol2, DstCol2; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { SrcCol2.Reserve(NumRows, NumRows); } #pragma omp section { DstCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } } // double endResize = omp_get_wtime(); // printf("Resize time = %f\n", endResize-start); TIntPrV Partitions; Table->GetPartitionRanges(Partitions, omp_get_max_threads()); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); SrcCol2[RowId] = RowI.GetIntAttr(SrcColIdx); DstCol1[RowId] = RowI.GetIntAttr(DstColIdx); DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); SrcCol2[RowId] = RowI.GetStrMapById(SrcColIdx); DstCol1[RowId] = RowI.GetStrMapById(DstColIdx); DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); RowI++; } } } omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp single nowait { #pragma omp task untied shared(SrcCol1, DstCol1) { TTable::QSortKeyVal(SrcCol1, DstCol1, 0, NumRows-1); } } #pragma omp single nowait { #pragma omp task untied shared(SrcCol2, DstCol2) { TTable::QSortKeyVal(DstCol2, SrcCol2, 0, NumRows-1); } } #pragma omp taskwait } // TTable::PSRSKeyVal(SrcCol1, DstCol1, 0, NumRows-1); // TTable::PSRSKeyVal(DstCol2, SrcCol2, 0, NumRows-1); // TInt IsS = TTable::CheckSortedKeyVal(SrcCol1, DstCol1, 0, NumRows-1); // TInt IsD = TTable::CheckSortedKeyVal(DstCol2, SrcCol2, 0, NumRows-1); // printf("IsSorted = %d %d\n", IsS.Val, IsD.Val); // double endSort = omp_get_wtime(); // printf("Sort time = %f\n", endSort-endCopy); //return TNGraphMP::New(10, 100); TInt NumThreads = omp_get_max_threads(); TInt PartSize = (NumRows/NumThreads); TIntV SrcOffsets, DstOffsets; SrcOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) { CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); } } SrcOffsets.Add(NumRows); DstOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) { CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); } } DstOffsets.Add(NumRows); TInt SrcPartCnt = SrcOffsets.Len()-1; TInt DstPartCnt = DstOffsets.Len()-1; // for (TInt i = 0; i < SrcOffsets.Len(); i++) { // printf("%d ", SrcOffsets[i].Val); // } // printf("\n"); // for (TInt i = 0; i < DstOffsets.Len(); i++) { // printf("%d ", DstOffsets[i].Val); // } // printf("\n"); TIntV SrcNodeCounts, DstNodeCounts; SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt); DstNodeCounts.Reserve(DstPartCnt, DstPartCnt); #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { SrcNodeCounts[i] = 1; TInt CurrNode = SrcCol1[SrcOffsets[i]]; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { SrcNodeCounts[i]++; CurrNode = SrcCol1[j]; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { DstNodeCounts[i] = 1; TInt CurrNode = DstCol2[DstOffsets[i]]; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { DstNodeCounts[i]++; CurrNode = DstCol2[j]; } } } } } // for (TInt i = 0; i < SrcNodeCounts.Len(); i++) { // printf("%d ", SrcNodeCounts[i].Val); // } // printf("\n"); // for (TInt i = 0; i < DstNodeCounts.Len(); i++) { // printf("%d ", DstNodeCounts[i].Val); // } // printf("\n"); TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < SrcPartCnt; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < DstPartCnt; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val); TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { TInt CurrNode = SrcCol1[SrcOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { CurrNode = SrcCol1[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { TInt CurrNode = DstCol2[DstOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]); TInt CurrCount = 1; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { CurrNode = DstCol2[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } // double endNode = omp_get_wtime(); // printf("Node time = %f\n", endNode-endSort); TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); // double endNodeResize = omp_get_wtime(); // printf("(NodeResize time = %f)\n", endNodeResize-endNode); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } // double endMerge = omp_get_wtime(); // printf("Merge time = %f\n", endMerge-endNode); TInt NumNodes = Nodes.Len(); // printf("NumNodes = %d\n", NumNodes.Val); PGraphMP Graph = TNGraphMP::New(NumNodes, NumRows); NumThreads = 1; int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,Delta) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = DstCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } //printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val); OutVV[m].Reserve(Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = SrcCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } //printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val); InVV[m].Reserve(Sz); } //double endTr = omp_get_wtime(); //printf("Thread=%d, i=%d, t=%f\n", omp_get_thread_num(), m, endTr-startTr); } // double endAlloc = omp_get_wtime(); // printf("Alloc time = %f\n", endAlloc-endMerge); NumThreads = omp_get_max_threads(); Delta = (NumNodes+NumThreads-1)/(10*NumThreads); omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(dynamic) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = DstCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } //printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val); OutVV[m].CopyUniqueFrom(DstCol1, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = SrcCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } //printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val); InVV[m].CopyUniqueFrom(SrcCol2, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); //double endTr = omp_get_wtime(); //printf("Thread=%d, i=%d, t=%f\n", omp_get_thread_num(), m, endTr-startTr); } Graph->SetNodes(NumNodes); // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } /// Performs table to graph conversion in parallel. Uses the hash-first method, which is less optimal, use ToGraphMP instead. #ifdef GCC_ATOMIC template<class PGraphMP> PGraphMP ToGraphMP3(PTable Table, const TStr& SrcCol, const TStr& DstCol) { PNGraphMP Graph; int MaxThreads = omp_get_max_threads(); int Length, Threads, Delta, Nodes, Last; uint64_t NumNodesEst; TInt SrcColIdx, DstColIdx; TIntV InVec, OutVec; SrcColIdx = Table->GetColIdx(SrcCol); DstColIdx = Table->GetColIdx(DstCol); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); /* Estimate number of nodes in the graph */ int NumRows = Table->Next.Len(); double Load = 10; int sz = NumRows / Load; int *buckets = (int *)malloc(sz * sizeof(int)); #pragma omp parallel for for (int i = 0; i < sz; i++) buckets[i] = 0; if (NodeType == atInt) { #pragma omp parallel for for (int i = 0; i < NumRows; i++) { int vert = Table->IntCols[DstColIdx][i]; buckets[vert % sz] = 1; } } else if (NodeType == atStr ) { #pragma omp parallel for for (int i = 0; i < NumRows; i++) { int vert = (Table->StrColMaps)[DstColIdx][i]; buckets[vert % sz] = 1; } } int cnt = 0; #pragma omp parallel for reduction(+:cnt) for (int i = 0; i < sz; i++) { if (buckets[i] == 0) cnt += 1; } NumNodesEst = sz * log ((double)sz / cnt); free (buckets); /* Until we correctly estimate the number of nodes */ while (1) { Graph = TNGraphMP::New(NumNodesEst, 100); Length = Graph->Reserved(); Threads = MaxThreads/2; Delta = (Length + Threads - 1) / Threads; OutVec.Gen(Length); InVec.Gen(Length); /* build the node hash table, count the size of edge lists */ Last = NumRows; Nodes = 0; omp_set_num_threads(Threads); #pragma omp parallel for schedule(static, Delta) for (int CurrRowIdx = 0; CurrRowIdx < Last; CurrRowIdx++) { if ((uint64_t) Nodes + 1000 >= NumNodesEst) { /* need bigger hash table */ continue; } TInt SVal, DVal; if (NodeType == atInt) { SVal = Table->IntCols[SrcColIdx][CurrRowIdx]; DVal = Table->IntCols[DstColIdx][CurrRowIdx]; } else if (NodeType == atStr ) { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; } int SrcIdx = abs((SVal.GetPrimHashCd()) % Length); if (!Graph->AddOutEdge1(SrcIdx, SVal, DVal)) { #pragma omp critical { Nodes++; } } __sync_fetch_and_add(&OutVec[SrcIdx].Val, 1); int DstIdx = abs((DVal.GetPrimHashCd()) % Length); if (!Graph->AddInEdge1(DstIdx, SVal, DVal)) { #pragma omp critical { Nodes++; } } __sync_fetch_and_add(&InVec[DstIdx].Val, 1); } if ((uint64_t) Nodes + 1000 >= NumNodesEst) { /* We need to double our num nodes estimate */ Graph.Clr(); InVec.Clr(); OutVec.Clr(); NumNodesEst *= 2; } else { break; } } Graph->SetNodes(Nodes); uint Edges = 0; for (int i = 0; i < Length; i++) { Edges += OutVec[i] + InVec[i]; } for (int Idx = 0; Idx < Length; Idx++) { if (OutVec[Idx] > 0 || InVec[Idx] > 0) { Graph->ReserveNodeDegs(Idx, InVec[Idx], OutVec[Idx]); } } /* assign edges */ Length = Graph->Reserved(); Threads = MaxThreads; Delta = (Length + Threads - 1) / Threads; omp_set_num_threads(Threads); #pragma omp parallel for schedule(static,Delta) for (int CurrRowIdx = 0; CurrRowIdx < Last; CurrRowIdx++) { TInt SVal, DVal; if (NodeType == atInt) { SVal = Table->IntCols[SrcColIdx][CurrRowIdx]; DVal = Table->IntCols[DstColIdx][CurrRowIdx]; } else if (NodeType == atStr) { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; } Graph->AddOutEdge2(SVal, DVal); Graph->AddInEdge2(SVal, DVal); } /* sort edges */ Length = Graph->Reserved(); Threads = MaxThreads*2; Delta = (Length + Threads - 1) / Threads; omp_set_num_threads(Threads); #pragma omp parallel for schedule(dynamic) for (int Idx = 0; Idx < Length; Idx++) { if (OutVec[Idx] > 0 || InVec[Idx] > 0) { Graph->SortEdges(Idx, InVec[Idx], OutVec[Idx]); } } return Graph; } /// Does Table to Network conversion in parallel using the sort-first algorithm. This is the recommended method to use. template<class PGraphMP> inline PGraphMP ToNetworkMP(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& SrcAttrV, TStrV& DstAttrV, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { TStopwatch* Sw = TStopwatch::GetInstance(); Sw->Start(TStopwatch::AllocateColumnCopies); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TInt NumRows = Table->GetNumValidRows(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2; THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol2.Reserve(NumRows, NumRows); } } Sw->Stop(TStopwatch::AllocateColumnCopies); Sw->Start(TStopwatch::CopyColumns); TIntPrV Partitions; Table->GetPartitionRanges(Partitions, omp_get_max_threads()); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } Sw->Stop(TStopwatch::CopyColumns); Sw->Start(TStopwatch::Sort); omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(SrcCol1, EdgeCol1) #endif { TTable::QSortKeyVal(SrcCol1, EdgeCol1, 0, NumRows-1); } } #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(EdgeCol2, DstCol2) #endif { TTable::QSortKeyVal(DstCol2, EdgeCol2, 0, NumRows-1); } } #ifndef GLib_WIN32 #pragma omp taskwait #endif } Sw->Stop(TStopwatch::Sort); Sw->Start(TStopwatch::Group); TInt NumThreads = omp_get_max_threads(); TInt PartSize = (NumRows/NumThreads); // Find the offset of all partitions, each of which contains a list of rows. // Nodes from same sources or destinations are ensured to be kept within same partition. TIntV SrcOffsets, DstOffsets; SrcOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) { // ensure that rows from the same sources are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); } } SrcOffsets.Add(NumRows); DstOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) { // ensure that rows to the same destinations are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); } } DstOffsets.Add(NumRows); TInt SrcPartCnt = SrcOffsets.Len()-1; // number of partitions TInt DstPartCnt = DstOffsets.Len()-1; // number of partitions // count the number of source nodes and destination nodes in each partition TIntV SrcNodeCounts, DstNodeCounts; SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt); DstNodeCounts.Reserve(DstPartCnt, DstPartCnt); #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { SrcNodeCounts[i] = 1; TInt CurrNode = SrcCol1[SrcOffsets[i]]; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { SrcNodeCounts[i]++; CurrNode = SrcCol1[j]; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { DstNodeCounts[i] = 1; TInt CurrNode = DstCol2[DstOffsets[i]]; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { DstNodeCounts[i]++; CurrNode = DstCol2[j]; } } } } } TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < SrcPartCnt; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < DstPartCnt; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val); // find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } // Find the starting offset of each node (in both src and dst) #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { TInt CurrNode = SrcCol1[SrcOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { CurrNode = SrcCol1[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { TInt CurrNode = DstCol2[DstOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]); TInt CurrCount = 1; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { CurrNode = DstCol2[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } Sw->Stop(TStopwatch::Group); Sw->Start(TStopwatch::MergeNeighborhoods); // Find the combined neighborhood (both out-neighbors and in-neighbors) of each node TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } Sw->Stop(TStopwatch::MergeNeighborhoods); Sw->Start(TStopwatch::AddNeighborhoods); TInt NumNodes = Nodes.Len(); PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows); // NumThreads = omp_get_max_threads(); // int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); // omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,100) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = EdgeCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } OutVV[m].Reserve(Sz); OutVV[m].CopyUniqueFrom(EdgeCol1, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = EdgeCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } InVV[m].Reserve(Sz); InVV[m].CopyUniqueFrom(EdgeCol2, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); } Graph->SetNodes(NumNodes); Sw->Stop(TStopwatch::AddNeighborhoods); Sw->Start(TStopwatch::AddEdges); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetIntAttr(SrcColIdx); TInt DstId = RowI.GetIntAttr(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) { TStr ColName = EdgeAttrV[ea_i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(RowId, Table->IntCols[Index][RowId], ColName); break; case atFlt: Graph->AddFltAttrDatE(RowId, Table->FltCols[Index][RowId], ColName); break; case atStr: Graph->AddStrAttrDatE(RowId, Table->GetStrVal(Index, RowId), ColName); break; } } if ((Table->SrcNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SrcId, Table->SrcNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } if ((Table->DstNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SrcId, Table->DstNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetStrMapById(SrcColIdx); TInt DstId = RowI.GetStrMapById(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) { TStr ColName = EdgeAttrV[ea_i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(RowId, Table->IntCols[Index][RowId], ColName); break; case atFlt: Graph->AddFltAttrDatE(RowId, Table->FltCols[Index][RowId], ColName); break; case atStr: Graph->AddStrAttrDatE(RowId, Table->GetStrVal(Index, RowId), ColName); break; } } if ((Table->SrcNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SrcId, Table->SrcNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } if ((Table->DstNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SrcId, Table->DstNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } } } } // aggregate node attributes and add to graph if ((Table->SrcNodeAttrV).Len() > 0 || (Table->DstNodeAttrV).Len() > 0) { for (typename PGraphMP::TObj::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) { TInt NId = NodeI.GetId(); if (NodeIntAttrs.IsKey(NId)) { TStrIntVH IntAttrVals = NodeIntAttrs.GetDat(NId); for (TStrIntVH::TIter it = IntAttrVals.BegI(); it < IntAttrVals.EndI(); it++) { TInt AttrVal = Table->AggregateVector<TInt>(it.GetDat(), AggrPolicy); Graph->AddIntAttrDatN(NId, AttrVal, it.GetKey()); } } if (NodeFltAttrs.IsKey(NId)) { TStrFltVH FltAttrVals = NodeFltAttrs.GetDat(NId); for (TStrFltVH::TIter it = FltAttrVals.BegI(); it < FltAttrVals.EndI(); it++) { TFlt AttrVal = Table->AggregateVector<TFlt>(it.GetDat(), AggrPolicy); Graph->AddFltAttrDatN(NId, AttrVal, it.GetKey()); } } if (NodeStrAttrs.IsKey(NId)) { TStrStrVH StrAttrVals = NodeStrAttrs.GetDat(NId); for (TStrStrVH::TIter it = StrAttrVals.BegI(); it < StrAttrVals.EndI(); it++) { TStr AttrVal = Table->AggregateVector<TStr>(it.GetDat(), AggrPolicy); Graph->AddStrAttrDatN(NId, AttrVal, it.GetKey()); } } } } Graph->SetEdges(NumRows); Sw->Stop(TStopwatch::AddEdges); // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } /// Calls ToNetworkMP with empty attribute vector. Convenience wrapper. template<class PGraphMP> PGraphMP ToNetworkMP(PTable Table, const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy) { TStrV V; return ToNetworkMP<PGraphMP>(Table, SrcCol, DstCol, V,AggrPolicy); } ///Implements table to network conversion in parallel. Not the recommended algorithm, using ToNetworkMP instead. template<class PGraphMP> inline PGraphMP ToNetworkMP2(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& SrcAttrV, TStrV& DstAttrV, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { TStopwatch* Sw = TStopwatch::GetInstance(); Sw->Start(TStopwatch::AllocateColumnCopies); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TInt NumRows = Table->NumValidRows; const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol2.Reserve(NumRows, NumRows); } } Sw->Stop(TStopwatch::AllocateColumnCopies); Sw->Start(TStopwatch::CopyColumns); TIntPrV Partitions; // int NThreads = omp_get_max_threads(); const int NThreads = 40; Table->GetPartitionRanges(Partitions, NThreads); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } // printf("NumRows = %d\n", NumRows.Val); // printf("NThreads = %d\n", NThreads); // for (int i = 0; i < Partitions.Len(); i++) { // printf("Partition %d %d->%d\n", i, Partitions[i].GetVal1().Val, Partitions[i].GetVal2().Val); // } int Parts[NThreads+1]; for (int i = 0; i < NThreads; i++) { Parts[i] = NumRows.Val / NThreads * i; } Parts[NThreads] = NumRows; // for (int i = 0; i < NThreads+1; i++) { // printf("Parts[%d] = %d\n", i, Parts[i]); // } Sw->Stop(TStopwatch::CopyColumns); Sw->Start(TStopwatch::Sort); TInt ExtremePoints[4][NThreads]; omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp for schedule(static) nowait for (int i = 0; i < NThreads; i++) { TInt StartPos = Parts[i]; TInt EndPos = Parts[i+1]-1; // TODO: Handle empty partition TTable::QSortKeyVal(SrcCol1, EdgeCol1, StartPos, EndPos); ExtremePoints[0][i] = SrcCol1[StartPos]; ExtremePoints[2][i] = SrcCol1[EndPos]; } #pragma omp for schedule(static) nowait for (int i = 0; i < NThreads; i++) { TInt StartPos = Parts[i]; TInt EndPos = Parts[i+1]-1; // TODO: Handle empty partition TTable::QSortKeyVal(DstCol2, EdgeCol2, StartPos, EndPos); ExtremePoints[1][i] = DstCol2[StartPos]; ExtremePoints[3][i] = DstCol2[EndPos]; } } // for (int i = 0; i < NThreads; i++) { // printf("ExtremePoints[%d] = %d-%d -> %d-%d\n", i, ExtremePoints[0][i].Val, ExtremePoints[1][i].Val, ExtremePoints[2][i].Val, ExtremePoints[3][i].Val); // } // find min points TInt MinId(INT_MAX); for (int j = 0; j < 2; j++) { for (int i = 0; i < NThreads; i++) { if (MinId > ExtremePoints[j][i]) { MinId = ExtremePoints[j][i]; } } } TInt MaxId(-1); for (int j = 2; j < 4; j++) { for (int i = 0; i < NThreads; i++) { if (MaxId < ExtremePoints[j][i]) { MaxId = ExtremePoints[j][i]; } } } // printf("MinId = %d\n", MinId.Val); // printf("MaxId = %d\n", MaxId.Val); Sw->Stop(TStopwatch::Sort); Sw->Start(TStopwatch::Group); // const int NumCollectors = omp_get_max_threads(); const int NumCollectors = 20; int Range = MaxId.Val - MinId.Val; TIntV IdRanges(NumCollectors+1); for (int j = 0; j < NumCollectors; j++) { IdRanges[j] = MinId + Range/NumCollectors*j; } IdRanges[NumCollectors] = MaxId+1; // for (int i = 0; i < NumCollectors+1; i++) { // printf("IdRanges[%d] = %d\n", i, IdRanges[i].Val); // } int SrcOffsets[NThreads][NumCollectors+1]; #pragma omp parallel for schedule(static) for (int i = 0; i < NThreads; i++) { int CollectorId = 0; for (int j = Parts[i]; j < Parts[i+1]; j++) { while (SrcCol1[j] >= IdRanges[CollectorId]) { SrcOffsets[i][CollectorId++] = j; } } while (CollectorId <= NumCollectors) { SrcOffsets[i][CollectorId++] = Parts[i+1]; } } int DstOffsets[NThreads][NumCollectors+1]; #pragma omp parallel for schedule(static) for (int i = 0; i < NThreads; i++) { int CollectorId = 0; for (int j = Parts[i]; j < Parts[i+1]; j++) { while (DstCol2[j] >= IdRanges[CollectorId]) { DstOffsets[i][CollectorId++] = j; } } while (CollectorId <= NumCollectors) { DstOffsets[i][CollectorId++] = Parts[i+1]; } } // for (int i = 0; i < NThreads; i++) { // for (int j = 0; j < NumCollectors+1; j++) { // printf("SrcOffsets[%d][%d] = %d\n", i, j, SrcOffsets[i][j]); // } // } // for (int i = 0; i < NThreads; i++) { // for (int j = 0; j < NumCollectors+1; j++) { // printf("DstOffsets[%d][%d] = %d\n", i, j, DstOffsets[i][j]); // } // } TIntV SrcCollectorOffsets(NumCollectors+1); SrcCollectorOffsets[0] = 0; for (int k = 0; k < NumCollectors; k++) { int SumOffset = 0; for (int i = 0; i < NThreads; i++) { SumOffset += SrcOffsets[i][k+1] - SrcOffsets[i][k]; } SrcCollectorOffsets[k+1] = SrcCollectorOffsets[k] + SumOffset; } TIntV DstCollectorOffsets(NumCollectors+1); DstCollectorOffsets[0] = 0; for (int k = 0; k < NumCollectors; k++) { int SumOffset = 0; for (int i = 0; i < NThreads; i++) { SumOffset += DstOffsets[i][k+1] - DstOffsets[i][k]; } DstCollectorOffsets[k+1] = DstCollectorOffsets[k] + SumOffset; } // for (int i = 0; i < NumCollectors+1; i++) { // printf("SrcCollectorOffsets[%d] = %d\n", i, SrcCollectorOffsets[i].Val); // } // for (int i = 0; i < NumCollectors+1; i++) { // printf("DstCollectorOffsets[%d] = %d\n", i, DstCollectorOffsets[i].Val); // } TIntV SrcCol3, EdgeCol3, EdgeCol4, DstCol4; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol3.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol3.Reserve(NumRows, NumRows); } #pragma omp section { DstCol4.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol4.Reserve(NumRows, NumRows); } } TIntV SrcNodeCounts(NumCollectors), DstNodeCounts(NumCollectors); #pragma omp parallel for schedule(static) for (int k = 0; k < NumCollectors; k++) { int ind = SrcCollectorOffsets[k]; for (int i = 0; i < NThreads; i++) { for (int j = SrcOffsets[i][k]; j < SrcOffsets[i][k+1]; j++) { SrcCol3[ind] = SrcCol1[j]; EdgeCol3[ind] = EdgeCol1[j]; ind++; } } TTable::QSortKeyVal(SrcCol3, EdgeCol3, SrcCollectorOffsets[k], SrcCollectorOffsets[k+1]-1); int SrcCount = 0; if (SrcCollectorOffsets[k+1] > SrcCollectorOffsets[k]) { SrcCount = 1; for (int j = SrcCollectorOffsets[k]+1; j < SrcCollectorOffsets[k+1]; j++) { if (SrcCol3[j] != SrcCol3[j-1]) { SrcCount++; } } } SrcNodeCounts[k] = SrcCount; ind = DstCollectorOffsets[k]; for (int i = 0; i < NThreads; i++) { for (int j = DstOffsets[i][k]; j < DstOffsets[i][k+1]; j++) { DstCol4[ind] = DstCol2[j]; EdgeCol4[ind] = EdgeCol2[j]; ind++; } } TTable::QSortKeyVal(DstCol4, EdgeCol4, DstCollectorOffsets[k], DstCollectorOffsets[k+1]-1); int DstCount = 0; if (DstCollectorOffsets[k+1] > DstCollectorOffsets[k]) { DstCount = 1; for (int j = DstCollectorOffsets[k]+1; j < DstCollectorOffsets[k+1]; j++) { if (DstCol4[j] != DstCol4[j-1]) { DstCount++; } } } DstNodeCounts[k] = DstCount; } TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < NumCollectors; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } // printf("Sorted = %d - %d\n", SrcCol3.IsSorted(), DstCol4.IsSorted()); // for (int i = 0; i < NumRows-1; i++) { // if (SrcCol3[i] > SrcCol3[i+1]) { printf("i=%d: %d %d\n", i, SrcCol3[i].Val, SrcCol3[i+1].Val); } // } // for (int i = 0; i < NumRows-1; i++) { // if (DstCol4[i] > DstCol4[i+1]) { printf("i=%d: %d %d\n", i, DstCol4[i].Val, DstCol4[i+1].Val); } // } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < NumCollectors; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } // Find the starting offset of each node (in both src and dst) #pragma omp parallel for schedule(dynamic) for (int t = 0; t < 2*NumCollectors; t++) { if (t < NumCollectors) { TInt i = t; if (SrcCollectorOffsets[i] < SrcCollectorOffsets[i+1]) { TInt CurrNode = SrcCol3[SrcCollectorOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcCollectorOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcCollectorOffsets[i]+1; j < SrcCollectorOffsets[i+1]; j++) { while (j < SrcCollectorOffsets[i+1] && SrcCol3[j] == CurrNode) { j++; } if (j < SrcCollectorOffsets[i+1]) { CurrNode = SrcCol3[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - NumCollectors; if (DstCollectorOffsets[i] < DstCollectorOffsets[i+1]) { TInt CurrNode = DstCol4[DstCollectorOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstCollectorOffsets[i]); TInt CurrCount = 1; for (TInt j = DstCollectorOffsets[i]+1; j < DstCollectorOffsets[i+1]; j++) { while (j < DstCollectorOffsets[i+1] && DstCol4[j] == CurrNode) { j++; } if (j < DstCollectorOffsets[i+1]) { CurrNode = DstCol4[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } Sw->Stop(TStopwatch::Group); Sw->Start(TStopwatch::MergeNeighborhoods); // Find the combined neighborhood (both out-neighbors and in-neighbors) of each node TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } Sw->Stop(TStopwatch::MergeNeighborhoods); Sw->Start(TStopwatch::AddNeighborhoods); TInt NumNodes = Nodes.Len(); PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows); // NumThreads = omp_get_max_threads(); // int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); // omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,100) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = EdgeCol3.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } OutVV[m].Reserve(Sz); OutVV[m].CopyUniqueFrom(EdgeCol3, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = EdgeCol4.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } InVV[m].Reserve(Sz); InVV[m].CopyUniqueFrom(EdgeCol4, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); } Graph->SetNodes(NumNodes); Sw->Stop(TStopwatch::AddNeighborhoods); Sw->Start(TStopwatch::AddEdges); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetIntAttr(SrcColIdx); TInt DstId = RowI.GetIntAttr(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetStrMapById(SrcColIdx); TInt DstId = RowI.GetStrMapById(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } Graph->SetEdges(NumRows); Sw->Stop(TStopwatch::AddEdges); // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } /// Calls ToNetworkMP2 with an empty attribute vector. Convenience wrapper. template<class PGraphMP> PGraphMP ToNetworkMP2(PTable Table, const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy) { TStrV V; return ToNetworkMP2<PGraphMP>(Table, SrcCol, DstCol, V, V, V, AggrPolicy); } #endif // GCC_ATOMIC /// Loads a mode, with name Name, into the PMMNet from the TTable. NCol specifies the node id column and NodeAttrV the node attributes. int LoadModeNetToNet(PMMNet Graph, const TStr& Name, PTable Table, const TStr& NCol, TStrV& NodeAttrV); /// Loads the nodes specified in column NCol from the TTable with the attributes specified in NodeAttrV. int LoadMode(TModeNet& Graph, PTable Table, const TStr& NCol, TStrV& NodeAttrV); /// Loads a crossnet from Mode1 to Mode2, with name CrossName, from the provided TTable. EdgeAttrV specifies edge attributes. int LoadCrossNetToNet(PMMNet Graph, const TStr& Mode1, const TStr& Mode2, const TStr& CrossName, PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV); /// Loads the edges from the TTable and EdgeAttrV specifies columns containing edge attributes. int LoadCrossNet(TCrossNet& Graph, PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV); /// Converts table to a network sequentially. Use if network has only edge attributes. template<class PGraph> PGraph ToNetwork(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { PGraph Graph = PGraph::TObj::New(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); //Table->AddGraphAttributeV(SrcAttrV, false, true, false); //Table->AddGraphAttributeV(DstAttrV, false, false, true); //Table->AddGraphAttributeV(EdgeAttrV, true, false, true); // node values - i.e. the unique values of src/dst col //THashSet<TInt> IntNodeVals; // for both int and string node attr types. THash<TFlt, TInt> FltNodeVals; // make single pass over all rows in the table for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal, DVal; if (NodeType == atFlt) { TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal); TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal); } else if (NodeType == atInt || NodeType == atStr) { if (NodeType == atInt) { SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx]; DVal = (Table->IntCols)[DstColIdx][CurrRowIdx]; } else { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value } if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); } if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); } //CheckAndAddIntNode(Graph, IntNodeVals, SVal); //CheckAndAddIntNode(Graph, IntNodeVals, DVal); } // add edge and edge attributes Graph->AddEdge(SVal, DVal, CurrRowIdx); // Aggregate edge attributes and add to graph for (TInt i = 0; i < EdgeAttrV.Len(); i++) { TStr ColName = EdgeAttrV[i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName); break; } } } return Graph; } /// Converts table to network in parallel. Use if network has only edge attributes. template<class PGraphMP> inline PGraphMP ToNetworkMP(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { TStopwatch* Sw = TStopwatch::GetInstance(); Sw->Start(TStopwatch::AllocateColumnCopies); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TInt NumRows = Table->GetNumValidRows(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2; THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol2.Reserve(NumRows, NumRows); } } Sw->Stop(TStopwatch::AllocateColumnCopies); Sw->Start(TStopwatch::CopyColumns); TIntPrV Partitions; Table->GetPartitionRanges(Partitions, omp_get_max_threads()); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } Sw->Stop(TStopwatch::CopyColumns); Sw->Start(TStopwatch::Sort); omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(SrcCol1, EdgeCol1) #endif { TTable::QSortKeyVal(SrcCol1, EdgeCol1, 0, NumRows-1); } } #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(EdgeCol2, DstCol2) #endif { TTable::QSortKeyVal(DstCol2, EdgeCol2, 0, NumRows-1); } } #ifndef GLib_WIN32 #pragma omp taskwait #endif } Sw->Stop(TStopwatch::Sort); Sw->Start(TStopwatch::Group); TInt NumThreads = omp_get_max_threads(); TInt PartSize = (NumRows/NumThreads); // Find the offset of all partitions, each of which contains a list of rows. // Nodes from same sources or destinations are ensured to be kept within same partition. TIntV SrcOffsets, DstOffsets; SrcOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) { // ensure that rows from the same sources are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); } } SrcOffsets.Add(NumRows); DstOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) { // ensure that rows to the same destinations are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); } } DstOffsets.Add(NumRows); TInt SrcPartCnt = SrcOffsets.Len()-1; // number of partitions TInt DstPartCnt = DstOffsets.Len()-1; // number of partitions // count the number of source nodes and destination nodes in each partition TIntV SrcNodeCounts, DstNodeCounts; SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt); DstNodeCounts.Reserve(DstPartCnt, DstPartCnt); #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { SrcNodeCounts[i] = 1; TInt CurrNode = SrcCol1[SrcOffsets[i]]; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { SrcNodeCounts[i]++; CurrNode = SrcCol1[j]; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { DstNodeCounts[i] = 1; TInt CurrNode = DstCol2[DstOffsets[i]]; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { DstNodeCounts[i]++; CurrNode = DstCol2[j]; } } } } } TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < SrcPartCnt; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < DstPartCnt; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val); // find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } // Find the starting offset of each node (in both src and dst) #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { TInt CurrNode = SrcCol1[SrcOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { CurrNode = SrcCol1[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { TInt CurrNode = DstCol2[DstOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]); TInt CurrCount = 1; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { CurrNode = DstCol2[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } Sw->Stop(TStopwatch::Group); Sw->Start(TStopwatch::MergeNeighborhoods); // Find the combined neighborhood (both out-neighbors and in-neighbors) of each node TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } Sw->Stop(TStopwatch::MergeNeighborhoods); Sw->Start(TStopwatch::AddNeighborhoods); TInt NumNodes = Nodes.Len(); PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows); // NumThreads = omp_get_max_threads(); // int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); // omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,100) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = EdgeCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } OutVV[m].Reserve(Sz); OutVV[m].CopyUniqueFrom(EdgeCol1, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = EdgeCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } InVV[m].Reserve(Sz); InVV[m].CopyUniqueFrom(EdgeCol2, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); } Graph->SetNodes(NumNodes); Sw->Stop(TStopwatch::AddNeighborhoods); Sw->Start(TStopwatch::AddEdges); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetIntAttr(SrcColIdx); TInt DstId = RowI.GetIntAttr(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetStrMapById(SrcColIdx); TInt DstId = RowI.GetStrMapById(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } Graph->SetEdges(NumRows); Graph->SetMxEId(NumRows); Sw->Stop(TStopwatch::AddEdges); // make single pass over all rows in the table to add attributes for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) { TStr ColName = EdgeAttrV[ea_i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName); break; } } } // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } /// Converts table to network sequentially. Takes edges from \c Table and nodes explicitly from \c NodeCol in \c NodeTable, with attribute vectors passed as columns in corresponding tables. template<class PGraph> PGraph ToNetwork(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV, PTable NodeTable, const TStr& NodeCol, TStrV& NodeAttrV, TAttrAggr AggrPolicy) { PGraph Graph = PGraph::TObj::New(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TAttrType NodeTypeN = NodeTable->GetColType(NodeCol); const TInt NodeColIdx = NodeTable->GetColIdx(NodeCol); THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; //Table->AddGraphAttributeV(SrcAttrV, false, true, false); //Table->AddGraphAttributeV(DstAttrV, false, false, true); //Table->AddGraphAttributeV(EdgeAttrV, true, false, true); // node values - i.e. the unique values of src/dst col //THashSet<TInt> IntNodeVals; // for both int and string node attr types. THash<TFlt, TInt> FltNodeVals; // make single pass over all rows in the table for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal, DVal; if (NodeType == atFlt) { TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal); TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal); } else if (NodeType == atInt || NodeType == atStr) { if (NodeType == atInt) { SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx]; DVal = (Table->IntCols)[DstColIdx][CurrRowIdx]; } else { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value } if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); } if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); } //CheckAndAddIntNode(Graph, IntNodeVals, SVal); //CheckAndAddIntNode(Graph, IntNodeVals, DVal); } // add edge and edge attributes Graph->AddEdge(SVal, DVal, CurrRowIdx); // Aggregate edge attributes and add to graph for (TInt i = 0; i < EdgeAttrV.Len(); i++) { TStr ColName = EdgeAttrV[i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName); break; } } } //Add node attribtes if (NodeAttrV.Len() > 0) { for (int CurrRowIdx = 0; CurrRowIdx < (NodeTable->Next).Len(); CurrRowIdx++) { if ((NodeTable->Next)[CurrRowIdx] == NodeTable->Invalid) { continue; } TInt NId; if (NodeTypeN == atInt) { NId = (NodeTable->IntCols)[NodeColIdx][CurrRowIdx]; } else if (NodeTypeN == atStr){ NId = (NodeTable->StrColMaps)[NodeColIdx][CurrRowIdx]; } for (TInt i = 0; i < NodeAttrV.Len(); i++) { TStr ColName = NodeAttrV[i]; TAttrType T = NodeTable->GetColType(ColName); TInt Index = NodeTable->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatN(NId, NodeTable->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatN(NId, NodeTable->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatN(NId, NodeTable->GetStrVal(Index, CurrRowIdx), ColName); break; } } } } return Graph; } /// Converts table to network in parallel. Takes edges from \c Table and nodes explicitly from \c NodeCol in \c NodeTable, with attribute vectors passed as columns in corresponding tables. template<class PGraphMP> inline PGraphMP ToNetworkMP(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV, PTable NodeTable, const TStr& NodeCol, TStrV& NodeAttrV, TAttrAggr AggrPolicy) { TStopwatch* Sw = TStopwatch::GetInstance(); Sw->Start(TStopwatch::AllocateColumnCopies); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TInt NumRows = Table->GetNumValidRows(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2; const TAttrType NodeTypeN = NodeTable->GetColType(NodeCol); const TInt NodeColIdx = NodeTable->GetColIdx(NodeCol); THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol2.Reserve(NumRows, NumRows); } } Sw->Stop(TStopwatch::AllocateColumnCopies); Sw->Start(TStopwatch::CopyColumns); TIntPrV Partitions; Table->GetPartitionRanges(Partitions, omp_get_max_threads()); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } Sw->Stop(TStopwatch::CopyColumns); Sw->Start(TStopwatch::Sort); omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(SrcCol1, EdgeCol1) #endif { TTable::QSortKeyVal(SrcCol1, EdgeCol1, 0, NumRows-1); } } #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(EdgeCol2, DstCol2) #endif { TTable::QSortKeyVal(DstCol2, EdgeCol2, 0, NumRows-1); } } #ifndef GLib_WIN32 #pragma omp taskwait #endif } Sw->Stop(TStopwatch::Sort); Sw->Start(TStopwatch::Group); TInt NumThreads = omp_get_max_threads(); TInt PartSize = (NumRows/NumThreads); // Find the offset of all partitions, each of which contains a list of rows. // Nodes from same sources or destinations are ensured to be kept within same partition. TIntV SrcOffsets, DstOffsets; SrcOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) { // ensure that rows from the same sources are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); } } SrcOffsets.Add(NumRows); DstOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) { // ensure that rows to the same destinations are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); } } DstOffsets.Add(NumRows); TInt SrcPartCnt = SrcOffsets.Len()-1; // number of partitions TInt DstPartCnt = DstOffsets.Len()-1; // number of partitions // count the number of source nodes and destination nodes in each partition TIntV SrcNodeCounts, DstNodeCounts; SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt); DstNodeCounts.Reserve(DstPartCnt, DstPartCnt); #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { SrcNodeCounts[i] = 1; TInt CurrNode = SrcCol1[SrcOffsets[i]]; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { SrcNodeCounts[i]++; CurrNode = SrcCol1[j]; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { DstNodeCounts[i] = 1; TInt CurrNode = DstCol2[DstOffsets[i]]; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { DstNodeCounts[i]++; CurrNode = DstCol2[j]; } } } } } TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < SrcPartCnt; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < DstPartCnt; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val); // find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } // Find the starting offset of each node (in both src and dst) #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { TInt CurrNode = SrcCol1[SrcOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { CurrNode = SrcCol1[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { TInt CurrNode = DstCol2[DstOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]); TInt CurrCount = 1; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { CurrNode = DstCol2[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } Sw->Stop(TStopwatch::Group); Sw->Start(TStopwatch::MergeNeighborhoods); // Find the combined neighborhood (both out-neighbors and in-neighbors) of each node TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } Sw->Stop(TStopwatch::MergeNeighborhoods); Sw->Start(TStopwatch::AddNeighborhoods); TInt NumNodes = Nodes.Len(); PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows); // NumThreads = omp_get_max_threads(); // int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); // omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,100) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = EdgeCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } OutVV[m].Reserve(Sz); OutVV[m].CopyUniqueFrom(EdgeCol1, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = EdgeCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } InVV[m].Reserve(Sz); InVV[m].CopyUniqueFrom(EdgeCol2, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); } Graph->SetNodes(NumNodes); Sw->Stop(TStopwatch::AddNeighborhoods); Sw->Start(TStopwatch::AddEdges); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetIntAttr(SrcColIdx); TInt DstId = RowI.GetIntAttr(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetStrMapById(SrcColIdx); TInt DstId = RowI.GetStrMapById(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } Graph->SetEdges(NumRows); Graph->SetMxEId(NumRows); Sw->Stop(TStopwatch::AddEdges); // make single pass over all rows in the table to add attributes for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) { TStr ColName = EdgeAttrV[ea_i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName); break; } } } //Add node attribtes if (NodeAttrV.Len() > 0) { for (int CurrRowIdx = 0; CurrRowIdx < (NodeTable->Next).Len(); CurrRowIdx++) { if ((NodeTable->Next)[CurrRowIdx] == NodeTable->Invalid) { continue; } TInt NId; if (NodeTypeN == atInt) { NId = (NodeTable->IntCols)[NodeColIdx][CurrRowIdx]; } else if (NodeTypeN == atStr){ NId = (NodeTable->StrColMaps)[NodeColIdx][CurrRowIdx]; } for (TInt i = 0; i < NodeAttrV.Len(); i++) { TStr ColName = NodeAttrV[i]; TAttrType T = NodeTable->GetColType(ColName); TInt Index = NodeTable->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatN(NId, NodeTable->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatN(NId, NodeTable->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatN(NId, NodeTable->GetStrVal(Index, CurrRowIdx), ColName); break; } } } } // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } #endif // GCC_ATOMIC }; // TSnap namespace // TODO tidy up GCC_ATOMIC directives #endif // CONV_H
parallelfor2.c
// A tricky case #include <omp.h> int main(void) { int i, a[1000]; #pragma omp parallel if(1) #pragma omp for for (i=0;i<1000;i++) a[i]=i*2; return 0; }
xy2sig.c
// this uses the coefficient cube optimiser from the paper: // // Wenzel Jakob and Johannes Hanika. A low-dimensional function space for // efficient spectral upsampling. Computer Graphics Forum (Proceedings of // Eurographics), 38(2), March 2019. // // run like // make && ./xy2sig 512 lut.pfm XYZ && eu lut.pfm -w 1400 -h 1400 // for every pixel in the xy chromaticity graph, do: // - match c0 c1 c2 or equivalently c0 y lambda // - explicitly instantiate resulting spectrum and numerically gauss blur it. // - compute xy position and store velocity field from source to the gauss blurred instance // TODO: // as a second step, using this 2D (c0 y lambda vx vy) map // - create another 2D (s, lambda) map as say 1024x1024 s=0..1 lambda=360..830 // for phi in circle around white point: // - create spectrum for xy // - walk velocity field both directions towards white (s=0) and spectral (s=1) // - store result in largeish array // - normalise range to resolution of 2D map, resample into row of texture // - row will have: s=-1..0..1 and is filled in two parts (c0 > 0 and c0 < 0) #include <math.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include "details/lu.h" #include "details/matrices.h" #include "mom.h" #include "clip.h" #include "../o-pfm/half.h" #define BAD_CMF #ifdef BAD_CMF // okay let's also hack the cie functions to our taste (or the gpu approximations we'll do) #define CIE_SAMPLES 30 #define CIE_FINE_SAMPLES 30 #define CIE_LAMBDA_MIN 400.0 #define CIE_LAMBDA_MAX 700.0 #else /// Discretization of quadrature scheme #define CIE_SAMPLES 95 #define CIE_LAMBDA_MIN 360.0 #define CIE_LAMBDA_MAX 830.0 #define CIE_FINE_SAMPLES ((CIE_SAMPLES - 1) * 3 + 1) #endif #define RGB2SPEC_EPSILON 1e-4 #define MOM_EPS 1e-3 #include "details/cie1931.h" /// Precomputed tables for fast spectral -> RGB conversion double lambda_tbl[CIE_FINE_SAMPLES], phase_tbl[CIE_FINE_SAMPLES], rgb_tbl[3][CIE_FINE_SAMPLES], rgb_to_xyz[3][3], xyz_to_rgb[3][3], xyz_whitepoint[3]; /// Currently supported gamuts typedef enum Gamut { SRGB, ProPhotoRGB, ACES2065_1, ACES_AP1, REC2020, ERGB, XYZ, } Gamut; double sigmoid(double x) { return 0.5 * x / sqrt(1.0 + x * x) + 0.5; } #if 0 // gauss blur a spectrum explicitly: static inline void gauss_blur( const double sigma_nm, // in nanometers const double *spectrum, double *spectrum_blur, const int cnt, const int u_shape) // for u-shapes, 1-blur(1-spec) { const double sigma = sigma_nm * cnt / (double)CIE_FINE_SAMPLES; // in bin widths const int r = 3*sigma; double max = 0.0; for(int i=0;i<cnt;i++) spectrum_blur[i] = 0.0; for(int i=0;i<cnt;i++) { double w = 0.0; for(int j=-r;j<=r;j++) { if(i+j < 0 || i+j >= cnt) continue; double wg = exp(-j*j / (2.0*sigma*sigma)); if(u_shape) spectrum_blur[i] += (1.0 - spectrum[i+j]) * wg; else spectrum_blur[i] += spectrum[i+j] * wg; w += wg; } spectrum_blur[i] /= w; max = fmax(max, spectrum_blur[i]); } // end gauss blur the spectrum loop if(u_shape) for(int i=0;i<cnt;i++) spectrum_blur[i] = 1.0 - spectrum_blur[i] / max; else for(int i=0;i<cnt;i++) spectrum_blur[i] /= max; } #endif void lookup2d(float *map, int w, int h, int stride, double *xy, float *res) { double x[] = {xy[0] * w, (1.0-xy[1]) * h}; x[0] = fmax(0.0, fmin(x[0], w-2)); x[1] = fmax(0.0, fmin(x[1], h-2)); #if 1 // bilin double u[2] = {x[0] - (int)x[0], x[1] - (int)x[1]}; for(int i=0;i<stride;i++) res[i] = (1.0-u[0]) * (1.0-u[1]) * map[stride * (w* (int)x[1] + (int)x[0] ) + i] + ( u[0]) * (1.0-u[1]) * map[stride * (w* (int)x[1] + (int)x[0] + 1) + i] + ( u[0]) * ( u[1]) * map[stride * (w*((int)x[1]+1) + (int)x[0] + 1) + i] + (1.0-u[0]) * ( u[1]) * map[stride * (w*((int)x[1]+1) + (int)x[0] ) + i]; #else // box for(int i=0;i<stride;i++) res[i] = map[stride * (w*(int)x[1] + (int)x[0]) +i]; #endif } void lookup1d(float *map, int w, int stride, double x, float *res) { x = x * w; x = fmax(0.0, fmin(x, w-2)); double u = x - (int)x; for(int i=0;i<stride;i++) res[i] = (1.0-u) * map[stride * (int)x + i] + u * map[stride * ((int)x+1) + i]; } double sqrd(double x) { return x * x; } void cvt_c0yl_c012(const double *c0yl, double *coeffs) { coeffs[0] = c0yl[0]; coeffs[1] = c0yl[2] * -2.0 * c0yl[0]; coeffs[2] = c0yl[1] + c0yl[0] * c0yl[2] * c0yl[2]; } void cvt_c012_c0yl(const double *coeffs, double *c0yl) { // account for normalising lambda: double c0 = CIE_LAMBDA_MIN, c1 = 1.0 / (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); double A = coeffs[0], B = coeffs[1], C = coeffs[2]; double A2 = (float)(A*(sqrd(c1))); double B2 = (float)(B*c1 - 2*A*c0*(sqrd(c1))); double C2 = (float)(C - B*c0*c1 + A*(sqrd(c0*c1))); if(fabs(A2) < 1e-12) { c0yl[0] = c0yl[1] = c0yl[2] = 0.0; return; } // convert to c0 y dom-lambda: c0yl[0] = A2; // square slope stays c0yl[2] = B2 / (-2.0*A2); // dominant wavelength c0yl[1] = C2 - B2*B2 / (4.0 * A2); // y #if 0 double tmp[3]; tmp[0] = c0yl[0]; tmp[1] = c0yl[2] * -2.0 * c0yl[0]; tmp[2] = c0yl[1] + c0yl[0] * c0yl[2] * c0yl[2]; fprintf(stdout, "%g %g %g -- %g %g %g\n", A2, B2, C2, tmp[0], tmp[1], tmp[2]); #endif } void cvt_c0yl_lwd(const double *c0yl, double *lwd) { // const double A = c0yl[0], B = c0yl[1], C = c0yl[2]; // const double c0 = A; // square slope stays // const double ldom = B / (-2.0*A); // dominant wavelength // const double y = C - B*B / (4.0 * A); // y const double c0 = c0yl[0]; const double y = c0yl[1]; const double ldom = c0yl[2]; const double y0 = c0 > 0.0 ? 1.0 : -2.0; const double w = 2.0 * sqrt((y0 - y)/c0); const double d = copysign(sqrt(c0*(y0-y)) * pow(y0*y0+1, -3./2.), c0); lwd[0] = ldom; lwd[1] = w; lwd[2] = d; } void quantise_coeffs(double coeffs[3], float out[3]) { #if 1 // def SIGMOID // account for normalising lambda: double c0 = CIE_LAMBDA_MIN, c1 = 1.0 / (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); double A = coeffs[0], B = coeffs[1], C = coeffs[2]; const double A2 = (A*(sqrd(c1))); const double B2 = (B*c1 - 2*A*c0*(sqrd(c1))); const double C2 = (C - B*c0*c1 + A*(sqrd(c0*c1))); out[0] = (float)A2; out[1] = (float)B2; out[2] = (float)C2; #if 0 { const double c0 = A2; // square slope stays const double ldom = B2 / (-2.0*A2); // dominant wavelength const double y = C2 - B2*B2 / (4.0 * A2); // y const double y0 = c0 > 0.0 ? 1.0 : -2.0; const double w = 2.0 * sqrt((y0 - y)/c0); const double d = copysign(sqrt(c0*(y0-y)) * pow(y0*y0+1, -3./2.), c0); out[0] = ldom; out[1] = w; out[2] = d;//fabs(d); /// XXX DEBUG abs to see the output } #endif #if 0 // convert to c0 y dom-lambda: A = out[0]; B = out[1]; C = out[2]; out[0] = A; // square slope stays out[2] = B / (-2.0*A); // dominant wavelength // out[1] = C - A * out[2] * out[2]; // y out[1] = C - B*B / (4.0 * A); // y // // these are good bounds: // if(out[0] > 0.0 && out[1] > 0.85) fprintf(stdout, "!!!\n"); // // else fprintf(stdout, "yay, good!\n"); // if(out[0] < 0.0 && out[1] < -1.85) fprintf(stdout, "!!!??? %g %g %g\n", out[0], out[1], out[2]); // // else fprintf(stdout, "yay, good!\n"); // XXX visualise abs: // out[0] = fabsf(out[0]); // goes from 1.0/256.0 (spectral locus) .. 0 (purple ridge through white) // out[1] = fabsf(out[1]); // somewhat useful from 0..large purple ridge..spectral locus, but high-low-high for purple tones // out[1] = -out[1]; #endif #if 0 // convert to shift width slope: A = out[0]; B = out[1]; C = out[2]; // TODO: if 4ac - b^2 > 0: int firstcase = 4*A*C - B*B > 0.0; if(firstcase) { out[0] = - sqrt(4*A*C - B*B) / 2.0; // FIXME something with the signs i don't get out[1] = - sqrt(4*A*C - B*B) / (2.0*A); out[2] = - B / (2.0 * A); // dominant wavelength } else { out[0] = - sqrt(B*B - 4*A*C) / 2.0; out[1] = - sqrt(B*B - 4*A*C) / (2.0*A); out[2] = - B / (2.0 * A); } { const double slope = out[0], width = out[1], dom_lambda = out[2]; // TODO: if first case double c0, c1, c2; c0 = slope/width; c1 = -2.0*c0*dom_lambda; c2 = c0 * (dom_lambda*dom_lambda - width*width); if(4*c0*c2 > c1*c1) c2 = slope * width + c0 * dom_lambda*dom_lambda; // if(A != 0 || B != 0 || C != 0) fprintf(stderr, "input: %g %g %g\n", slope, width, dom_lambda); if(A != 0 || B != 0 || C != 0) fprintf(stderr, "roundtrip: %g %g %g -- %g %g %g \n", A, B, C, c0, c1, c2); } // slope = +/- sqrt(4 a c - b^2) / 2 // width = +/- sqrt(4 a c - b^2) / (2a) // dlamb = - b / (2a) // DEBUG: // out[2] = (out[2] - CIE_LAMBDA_MIN) / (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); /* Scale lambda to 0..1 range */ #endif #else out[0] = coeffs[0]; out[1] = coeffs[1]; out[2] = coeffs[2]; #endif } void init_coeffs(double coeffs[3]) { #if 1//def SIGMOID #ifdef SIG_SWZ coeffs[0] = 0.1; coeffs[1] = 80.0; coeffs[2] = 550.0; #else coeffs[0] = 0.0; coeffs[1] = 0.0; coeffs[2] = 0.0; #endif #else coeffs[0] = 0.5; coeffs[1] = 0.0; coeffs[2] = 0.0; #endif } void clamp_coeffs(double coeffs[3]) { #ifdef SIG_SWZ if(coeffs[2] < 200) coeffs[2] = 200; if(coeffs[2] > 1000) coeffs[2] = 1000; if(coeffs[1] < 1e-5) coeffs[1] = 1e-5; if(coeffs[1] > 400.0) coeffs[1] = 400.0; if(coeffs[0] < -100.0) coeffs[0] = -100.0; if(coeffs[0] > 100.0) coeffs[0] = 100.0; #else double max = fmax(fmax(fabs(coeffs[0]), fabs(coeffs[1])), fabs(coeffs[2])); if (max > 1000) { for (int j = 0; j < 3; ++j) coeffs[j] *= 1000 / max; } #endif } int check_gamut(double rgb[3]) { double xyz[3] = {0.0}; for(int j=0;j<3;j++) for(int i=0;i<3;i++) xyz[i] += rgb_to_xyz[i][j] * rgb[j]; double x = xyz[0] / (xyz[0] + xyz[1] + xyz[2]); double y = xyz[1] / (xyz[0] + xyz[1] + xyz[2]); return spectrum_outside(x, y); } // Journal of Computer Graphics Techniques, Simple Analytic Approximations to // the CIE XYZ Color Matching Functions Vol. 2, No. 2, 2013 http://jcgt.org //Inputs: Wavelength in nanometers double xFit_1931( double wave ) { double t1 = (wave-442.0)*((wave<442.0)?0.0624:0.0374); double t2 = (wave-599.8)*((wave<599.8)?0.0264:0.0323); double t3 = (wave-501.1)*((wave<501.1)?0.0490:0.0382); return 0.362*exp(-0.5*t1*t1) + 1.056*exp(-0.5*t2*t2)- 0.065*exp(-0.5*t3*t3); } double yFit_1931( double wave ) { double t1 = (wave-568.8)*((wave<568.8)?0.0213:0.0247); double t2 = (wave-530.9)*((wave<530.9)?0.0613:0.0322); return 0.821*exp(-0.5*t1*t1) + 0.286*exp(-0.5*t2*t2); } double zFit_1931( double wave ) { double t1 = (wave-437.0)*((wave<437.0)?0.0845:0.0278); double t2 = (wave-459.0)*((wave<459.0)?0.0385:0.0725); return 1.217*exp(-0.5*t1*t1) + 0.681*exp(-0.5*t2*t2); } /** * This function precomputes tables used to convert arbitrary spectra * to RGB (either sRGB or ProPhoto RGB) * * A composite quadrature rule integrates the CIE curves, reflectance, and * illuminant spectrum over each 5nm segment in the 360..830nm range using * Simpson's 3/8 rule (4th-order accurate), which evaluates the integrand at * four positions per segment. While the CIE curves and illuminant spectrum are * linear over the segment, the reflectance could have arbitrary behavior, * hence the extra precations. */ void init_tables(Gamut gamut) { memset(rgb_tbl, 0, sizeof(rgb_tbl)); memset(xyz_whitepoint, 0, sizeof(xyz_whitepoint)); const double *illuminant = 0; switch (gamut) { case SRGB: illuminant = cie_d65; memcpy(xyz_to_rgb, xyz_to_srgb, sizeof(double) * 9); memcpy(rgb_to_xyz, srgb_to_xyz, sizeof(double) * 9); break; case ERGB: illuminant = cie_e; memcpy(xyz_to_rgb, xyz_to_ergb, sizeof(double) * 9); memcpy(rgb_to_xyz, ergb_to_xyz, sizeof(double) * 9); break; case XYZ: illuminant = cie_e; memcpy(xyz_to_rgb, xyz_to_xyz, sizeof(double) * 9); memcpy(rgb_to_xyz, xyz_to_xyz, sizeof(double) * 9); break; case ProPhotoRGB: illuminant = cie_d50; memcpy(xyz_to_rgb, xyz_to_prophoto_rgb, sizeof(double) * 9); memcpy(rgb_to_xyz, prophoto_rgb_to_xyz, sizeof(double) * 9); break; case ACES2065_1: illuminant = cie_d60; memcpy(xyz_to_rgb, xyz_to_aces2065_1, sizeof(double) * 9); memcpy(rgb_to_xyz, aces2065_1_to_xyz, sizeof(double) * 9); break; case ACES_AP1: illuminant = cie_d60; memcpy(xyz_to_rgb, xyz_to_aces_ap1, sizeof(double) * 9); memcpy(rgb_to_xyz, aces_ap1_to_xyz, sizeof(double) * 9); break; case REC2020: illuminant = cie_d65; memcpy(xyz_to_rgb, xyz_to_rec2020, sizeof(double) * 9); memcpy(rgb_to_xyz, rec2020_to_xyz, sizeof(double) * 9); break; } double norm = 0.0, n2[3] = {0.0}; for (int i = 0; i < CIE_FINE_SAMPLES; ++i) { #ifndef BAD_CMF double h = (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) / (CIE_FINE_SAMPLES - 1.0); double lambda = CIE_LAMBDA_MIN + i * h; double xyz[3] = { cie_interp(cie_x, lambda), cie_interp(cie_y, lambda), cie_interp(cie_z, lambda) }, I = cie_interp(illuminant, lambda); #else double h = (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) / (double)CIE_FINE_SAMPLES; double lambda = CIE_LAMBDA_MIN + (i+0.5) * h; // double lambda = CIE_LAMBDA_MIN + i * h; double xyz[3] = { cie_interp(cie_x, lambda), cie_interp(cie_y, lambda), cie_interp(cie_z, lambda) }; // double xyz[3] = { // xFit_1931(lambda), // yFit_1931(lambda), // zFit_1931(lambda), }, const double Iw = cie_interp(illuminant, lambda); // I = blackbody_radiation(lambda, 6504.0); const double cw[3] = { -9.16167e-06, 0.00870653, -2.35259 // d65 / 3 // 0.00014479, -0.189595, 62.5251 // d65 / 1.1 // 0, 0, 10000 // illum E // -14.1899, 13622.4, -3.26377e+06 // -8.2609e-05, 0.0823704, -25.0921 // 0.00395809, -4.02143, 1021.5 // -9.12318e-05, 0.0924729, -27.9558 // 0.0691431, -74.2713, 19943.2 // says rec2020 // 0.0871685, -94.3229, 25511.3 // says xyz }; const double Is = 1.0/106.8 * sigmoid(cw[2] + lambda*(cw[1] + cw[0]*lambda)); // fprintf(stderr, "%g %g %g\n", Is, Iw, lambda); const double I = Iw; #endif norm += I; #ifndef BAD_CMF double weight = 3.0 / 8.0 * h; if (i == 0 || i == CIE_FINE_SAMPLES - 1) ; else if ((i - 1) % 3 == 2) weight *= 2.f; else weight *= 3.f; #else double weight = h; #endif #if 0 // output table for shader code double out[3] = {0.0}; for (int k = 0; k < 3; ++k) for (int j = 0; j < 3; ++j) out[k] += xyz_to_rgb[k][j] * xyz[j]; fprintf(stderr, "vec3(%g, %g, %g), // %g nm\n", out[0], out[1], out[2], lambda); #endif lambda_tbl[i] = lambda; phase_tbl[i] = mom_warp_lambda(lambda); for (int k = 0; k < 3; ++k) for (int j = 0; j < 3; ++j) rgb_tbl[k][i] += xyz_to_rgb[k][j] * xyz[j] * I * weight; for (int k = 0; k < 3; ++k) xyz_whitepoint[k] += xyz[k] * I * weight; for (int k = 0; k < 3; ++k) n2[k] += xyz[k] * weight; } } void eval_residual(const double *coeff, const double *rgb, double *residual) { double out[3] = { 0.0, 0.0, 0.0 }; for (int i = 0; i < CIE_FINE_SAMPLES; ++i) { // the optimiser doesn't like nanometers. // we'll do the normalised lambda thing and later convert when we write out. #ifndef SIG_SWZ #ifdef BAD_CMF double lambda = (i+.5)/(double)CIE_FINE_SAMPLES;//(lambda_tbl[i] - CIE_LAMBDA_MIN) / (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); /* Scale lambda to 0..1 range */ #else double lambda = i/(double)CIE_FINE_SAMPLES;//(lambda_tbl[i] - CIE_LAMBDA_MIN) / (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); /* Scale lambda to 0..1 range */ #endif double cf[3] = {coeff[0], coeff[1], coeff[2]}; #else double lambda = lambda_tbl[i]; // float x = rgb2spec_fma(rgb2spec_fma(coeff[0], lambda, coeff[1]), lambda, coeff[2]), // y = 1.0f / sqrtf(rgb2spec_fma(x, x, 1.0f)); // float s = rgb2spec_fma(.5f * x, y, .5f); double slope = coeff[0]; double width = coeff[1]; double dom_lambda = coeff[2]; #if 0 // from alisa's jupyter notebook: double s = slope; // coeff[0] double w = width; // coeff[1] double z = dom_lambda; // coeff[2] const double t = (fabs(s) * w + sqrt(s*s*w*w + 1.0/9.0) ) / (2.0*fabs(s)*w); const double sqrt_t = sqrt(t); const double c0 = s * sqrt_t*sqrt_t*sqrt_t / w; const double c1 = -2.0 * c0 * z; const double c2 = c0 * z*z + s*w*sqrt_t*(5.0*t - 6.0); #endif #if 1 // my simpler version (but forgot what the values mean) const double c0 = slope/width; const double c1 = -2.0*c0*dom_lambda; const double c2 = slope * width + c0 * dom_lambda*dom_lambda; // this has the advantage that mathematica can invert it: // slope = +/- sqrt(4 a c - b^2) / 2 // width = +/- sqrt(4 a c - b^2) / (2a) // dlamb = - b / (2a) #endif double cf[3] = {c0, c1, c2}; #endif { // scope /* Polynomial */ double x = 0.0; for (int i = 0; i < 3; ++i) x = x * lambda + cf[i]; /* Sigmoid */ double s = sigmoid(x); /* Integrate against precomputed curves */ for (int j = 0; j < 3; ++j) out[j] += rgb_tbl[j][i] * s; } } // cie_lab(out); memcpy(residual, rgb, sizeof(double) * 3); // cie_lab(residual); for (int j = 0; j < 3; ++j) residual[j] -= out[j]; } void eval_jacobian(const double *coeffs, const double *rgb, double **jac) { double r0[3], r1[3], tmp[3]; for (int i = 0; i < 3; ++i) { memcpy(tmp, coeffs, sizeof(double) * 3); tmp[i] -= RGB2SPEC_EPSILON; eval_residual(tmp, rgb, r0); memcpy(tmp, coeffs, sizeof(double) * 3); tmp[i] += RGB2SPEC_EPSILON; eval_residual(tmp, rgb, r1); for(int j=0;j<3;j++) assert(r1[j] == r1[j]); for(int j=0;j<3;j++) assert(r0[j] == r0[j]); for (int j = 0; j < 3; ++j) jac[j][i] = (r1[j] - r0[j]) * 1.0 / (2 * RGB2SPEC_EPSILON); } } double gauss_newton(const double rgb[3], double coeffs[3]) { int it = 40;//15; double r = 0; for (int i = 0; i < it; ++i) { double J0[3], J1[3], J2[3], *J[3] = { J0, J1, J2 }; double residual[3]; clamp_coeffs(coeffs); eval_residual(coeffs, rgb, residual); eval_jacobian(coeffs, rgb, J); #if 0 // fix boundary issues when the coefficients do not change any more (some colours may be outside the representable range) const double eps = 1e-6; for(int j=0;j<3;j++) { if(fabs(J0[j]) < eps) J0[j] = ((drand48() > 0.5) ? 1.0 : -1.0)*eps*(0.5 + drand48()); if(fabs(J1[j]) < eps) J1[j] = ((drand48() > 0.5) ? 1.0 : -1.0)*eps*(0.5 + drand48()); if(fabs(J2[j]) < eps) J2[j] = ((drand48() > 0.5) ? 1.0 : -1.0)*eps*(0.5 + drand48()); } #endif int P[4]; int rv = LUPDecompose(J, 3, 1e-15, P); if (rv != 1) { fprintf(stdout, "RGB %g %g %g -> %g %g %g\n", rgb[0], rgb[1], rgb[2], coeffs[0], coeffs[1], coeffs[2]); fprintf(stdout, "J0 %g %g %g\n", J0[0], J0[1], J0[2]); fprintf(stdout, "J1 %g %g %g\n", J1[0], J1[1], J1[2]); fprintf(stdout, "J2 %g %g %g\n", J2[0], J2[1], J2[2]); return 666.0; } double x[3]; LUPSolve(J, P, residual, 3, x); r = 0.0; for (int j = 0; j < 3; ++j) { coeffs[j] -= x[j]; r += residual[j] * residual[j]; } if (r < 1e-6) break; } return sqrt(r); } static Gamut parse_gamut(const char *str) { if(!strcasecmp(str, "sRGB")) return SRGB; if(!strcasecmp(str, "eRGB")) return ERGB; if(!strcasecmp(str, "XYZ")) return XYZ; if(!strcasecmp(str, "ProPhotoRGB")) return ProPhotoRGB; if(!strcasecmp(str, "ACES2065_1")) return ACES2065_1; if(!strcasecmp(str, "ACES_AP1")) return ACES_AP1; if(!strcasecmp(str, "REC2020")) return REC2020; return SRGB; } int main(int argc, char **argv) { if (argc < 3) { printf("Syntax: xy2sig <resolution> <output> [<gamut>]\n" "where <gamut> is one of sRGB,eRGB,XYZ,ProPhotoRGB,ACES2065_1,ACES_AP1,REC2020\n"); exit(-1); } Gamut gamut = SRGB; if(argc > 3) gamut = parse_gamut(argv[3]); init_tables(gamut); const int res = atoi(argv[1]); // resolution of 2d lut printf("Optimizing "); { // determine white coefficients so we can replace D65 by something faster to evaluate than the array data: double coeffs[3] = {0, 0, 1000};//{0.0691431, -74.2713, 19943.2}; init_coeffs(coeffs); // double rgb[3] = {0.95047, 1.0, 1.08883}; // xyz d65 double rgb[3] = {1, 1, 1}; // illum E double b = rgb[0] + rgb[1] + rgb[2]; rgb[0] /= b; rgb[1] /= b; rgb[2] /= b; double resid = gauss_newton(rgb, coeffs); float out[3]; quantise_coeffs(coeffs, out); // fprintf(stderr, "white: %g, %g, %g resid %g\n", out[0], out[1], out[2], resid); } // read grey map from macadam: int max_w, max_h; float *max_b = 0; { // convert macad.pfm -fx 'r' -colorspace Gray -blur 15x15 brightness.pfm FILE *f = fopen("brightness.pfm", "rb"); if(!f) { fprintf(stderr, "could not read macadam.pfm!!\n"); exit(2); } fscanf(f, "Pf\n%d %d\n%*[^\n]", &max_w, &max_h); max_b = calloc(sizeof(float), max_w*max_h); fgetc(f); // \n fread(max_b, sizeof(float), max_w*max_h, f); fclose(f); } int lsres = res/4; // allocate enough for mip maps too float *lsbuf = calloc(sizeof(float), 2* 5*lsres*lsres); size_t bufsize = 5*res*res; float *out = calloc(sizeof(float), bufsize); #if defined(_OPENMP) #pragma omp parallel for schedule(dynamic) shared(stdout,out,max_b,max_w,max_h) #endif for (int j = 0; j < res; ++j) { // const double y = (res - 1 - (j+0.5)) / (double)res; const double y = (res - 1 - (j)) / (double)res; printf("."); fflush(stdout); for (int i = 0; i < res; ++i) { // const double x = (i+0.5) / (double)res; const double x = (i) / (double)res; double rgb[3]; // range of fourier moments is [0,1]x[-1/pi,+1/pi]^2 double coeffs[3]; init_coeffs(coeffs); // normalise to max(rgb)=1 rgb[0] = x; rgb[1] = y; rgb[2] = 1.0-x-y; if(check_gamut(rgb)) continue; int ii = (int)fmin(max_w - 1, fmax(0, i * (max_w / (double)res))); int jj = max_h - 1 - (int)fmin(max_h - 1, fmax(0, j * (max_h / (double)res))); double m = fmax(0.001, 0.5*max_b[ii + max_w * jj]); double rgbm[3] = {rgb[0] * m, rgb[1] * m, rgb[2] * m}; double resid = gauss_newton(rgbm, coeffs); (void)resid; double c0yl[3], lwd[3]; cvt_c012_c0yl(coeffs, c0yl); // cvt_c0yl_lwd(c0yl, lwd); // fprintf(stderr, "%g %g %g %g %g\n", lwd[0], lwd[1], lwd[2], x, y); double velx = 0.0, vely = 0.0; #if 0 // TODO: now that we have a good spectrum: // explicitly instantiate it // explicitly gauss blur it // convert back to xy // store pointer to this other pixel const int cnt = CIE_FINE_SAMPLES; double spectrum[cnt]; double spectrum_blur[cnt]; for (int l = 0; l < cnt; l++) { double lambda = l/(cnt-1.0); // double lambda = (lambda_tbl[l] - CIE_LAMBDA_MIN) / (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); /* Scale lambda to 0..1 range */ double x = 0.0; for (int i = 0; i < 3; ++i) x = x * lambda + coeffs[i]; spectrum[l] = sigmoid(x); } const double sigma = 9.0; // FIXME: < 9 results in banding, > 12 results in second attractor gauss_blur(sigma, spectrum, spectrum_blur, cnt, 0);//coeffs[0] > 0.0); double col[3] = {0.0}; #if 1 // cnt = CIE_FINE_SAMPLES for (int l = 0; l < cnt; l++) for (int j = 0; j < 3; ++j) col[j] += rgb_tbl[j][l] * spectrum_blur[l]; #else // otherwise for (int l = 0; l < cnt; l++) { double lambda = CIE_LAMBDA_MIN + l/(cnt-1.0) * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); double xyz[3] = { cie_interp(cie_x, lambda), cie_interp(cie_y, lambda), cie_interp(cie_z, lambda) }; col[0] += xyz[0] * spectrum_blur[l]; col[1] += xyz[1] * spectrum_blur[l]; col[2] += xyz[2] * spectrum_blur[l]; } #endif // col is in XYZ, we want the chromaticity coordinates: double b = col[0]+col[1]+col[2]; // velocity vector: velx = col[0] / b - x; vely = col[1] / b - y; double speed = sqrt(velx*velx + vely*vely); velx /= speed; // normalise vely /= speed; #endif int idx = j*res + i; out[5*idx + 0] = coeffs[0]; out[5*idx + 1] = coeffs[1]; out[5*idx + 2] = coeffs[2]; out[5*idx + 3] = c0yl[2];//velx;//m; float xy[2] = {x, y}, white[2] = {1.0f/3.0f, 1.0f/3.0f}; // illum E //{.3127266, .32902313}; // D65 float sat = spectrum_saturation(xy, white); out[5*idx + 4] = sat; // bin into lambda/saturation buffer float satc = lsres * sat; float lamc = (c0yl[2] - CIE_LAMBDA_MIN)/(CIE_LAMBDA_MAX-CIE_LAMBDA_MIN) * lsres / 2; int lami = fmaxf(0, fminf(lsres/2-1, lamc)); int sati = satc; if(c0yl[0] > 0) lami += lsres/2; lami = fmaxf(0, fminf(lsres-1, lami)); sati = fmaxf(0, fminf(lsres-1, sati)); float olamc = lsbuf[5*(lami*lsres + sati)+3]; float osatc = lsbuf[5*(lami*lsres + sati)+4]; float odist = (olamc - lami - 0.5f)*(olamc - lami - 0.5f)+ (osatc - sati - 0.5f)*(osatc - sati - 0.5f); float dist = ( lamc - lami - 0.5f)*( lamc - lami - 0.5f)+ ( satc - sati - 0.5f)*( satc - sati - 0.5f); if(dist < odist) { lsbuf[5*(lami*lsres + sati)+0] = x; lsbuf[5*(lami*lsres + sati)+1] = y; lsbuf[5*(lami*lsres + sati)+2] = 1.0-x-y; lsbuf[5*(lami*lsres + sati)+3] = lamc; lsbuf[5*(lami*lsres + sati)+4] = satc; } out[5*idx + 3] = (lami+0.5f) / (float)lsres; out[5*idx + 4] = (sati+0.5f) / (float)lsres; } } #if 0 { // TODO: another sanity check: plot all points of some lambda with analytic curves of what we think // would be good values for c0 and y! // FIXME: for red lambda and n, large s turn around towards white! // FIXME: pretty much all turn around for u // FIXME: gaps can only be filled on the way back! const int lambda_cnt = 32;//512; const int sat_cnt = 256; // for(int un=0;un<2;un++) const int un = 1; { for(int l=0;l<lambda_cnt;l++) { for(int s=0;s<sat_cnt;s++) { double lambda = CIE_LAMBDA_MIN + l/(lambda_cnt-1.0) * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); // create spectrum for c0 = +/- 0.01? y=? lambda double c0, y; if(un) c0 = -pow(s/(sat_cnt-1.0), 3.) * 1./100.; // |c0| in 0..1/256 // FIXME: want softer progression of c0 to get less extreme blue-white-red ridges else c0 = 0.0000 + 0.0015 * pow(s/(sat_cnt-1.0), 0.8); // c < 0.0015 or 0.002 to go to border // else c0 = 0.0000 + 0.0015 * pow(s/(sat_cnt-1.0), 1./2.); // c < 0.0015 or 0.002 to go to border if(un) y = s/(sat_cnt-1.0); else y = -0 - 20.0 * s/(sat_cnt-1.0); // want to reach -20 at the border double c0yl[3] = {c0, y, lambda}; double col[3] = {0}; for (int ll = 0; ll < CIE_FINE_SAMPLES; ll++) { double l2 = CIE_LAMBDA_MIN + ll/(double)CIE_FINE_SAMPLES * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); double x = c0yl[0] * (l2 - c0yl[2])*(l2 - c0yl[2]) + c0yl[1]; double p = sigmoid(x); for(int j=0;j<3;j++) col[j] += rgb_tbl[j][ll] * p; } double xy[] = { col[0] / (col[0]+col[1]+col[2]), col[1] / (col[0]+col[1]+col[2])}; fprintf(stderr, "%g %g %d\n", xy[0], xy[1], s); } } } } #endif #if 0 // circular version { // now we have a c0 c1 c2 vx vy map. // as a second step, using this 2D (c0 y lambda vx vy) map // create another 2D (s, lambda) map as say 512x1024: const int lambda_cnt = 40;//1024;//512; const int sat_cnt = 512; float *map = calloc(sizeof(float)*3, lambda_cnt*2*sat_cnt); const int stripe_size = 256;//2048; float *stripe = calloc(sizeof(float)*3*lambda_cnt, stripe_size); int *right = calloc(sizeof(int), lambda_cnt); int *left = calloc(sizeof(int), lambda_cnt); for(int l=0;l<lambda_cnt;l++) for(int i=0;i<2*sat_cnt;i++) { map[3*(2*sat_cnt * l + i) + 0] = 1.0; map[3*(2*sat_cnt * l + i) + 1] = 0.0; map[3*(2*sat_cnt * l + i) + 2] = 0.0; } // for(int d=-1;d<=1;d+=2) const int d = 1; // this isn't in fact a wavelength but an angle for(int l=0;l<lambda_cnt;l++) { // weird offset hand tuned such that the separation n-shape/u-shape goes through y=0 coordinate in the output map: double phi = 2.0*M_PI*l/(lambda_cnt-1.0) + 0.48; const double radius = 0.10; double xy[2] = {1.0/3.0 + radius * cos(phi), 1.0/3.0 + radius * sin(phi)}; double c0yl[3]; float px[5]; lookup2d(out, res, res, 5, xy, px); double coeffs[3] = {px[0], px[1], px[2]}; // cvt_c0yl_c012(c0yl, coeffs); cvt_c012_c0yl(coeffs, c0yl); const int it_cnt = stripe_size/2; for(int it=0;it<it_cnt;it++) { double lwd[3]; cvt_c0yl_lwd(c0yl, lwd); fprintf(stderr, "%g %g %g %d %d %g %g\n", xy[0], xy[1], c0yl[2], l, it, lwd[1], lwd[2]); { double col[3] = {0}; for (int ll = 0; ll < CIE_FINE_SAMPLES; ll++) { double l2 = CIE_LAMBDA_MIN + ll/(double)CIE_FINE_SAMPLES * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); double x = c0yl[0] * (l2 - c0yl[2])*(l2 - c0yl[2]) + c0yl[1]; double s = sigmoid(x); for(int j=0;j<3;j++) col[j] += rgb_tbl[j][ll] * s; } xy[0] = col[0] / (col[0]+col[1]+col[2]); xy[1] = col[1] / (col[0]+col[1]+col[2]); } // read velocity field at xy and walk a single pixel step: lookup2d(out, res, res, 5, xy, px); double dir = -1.0;//c0yl[0] > 0.0 ? 1.0 : - 1.0; xy[0] += d*dir * px[3] * .5/it_cnt; xy[1] += d*dir * px[4] * .5/it_cnt; lookup2d(out, res, res, 5, xy, px); double new_c[] = {px[0], px[1], px[2]}; double new_c0yl[3]; cvt_c012_c0yl(new_c, new_c0yl); stripe[3*(stripe_size*l + stripe_size/2 + (d> 0 ? it : -it-1)) + 0] = c0yl[0]; stripe[3*(stripe_size*l + stripe_size/2 + (d> 0 ? it : -it-1)) + 1] = c0yl[1]; stripe[3*(stripe_size*l + stripe_size/2 + (d> 0 ? it : -it-1)) + 2] = c0yl[2]; if(d > 0) right[l] = it; // expand right boundary else left[l] = it; // expand left boundary (doesn't really work) if(c0yl[0] * new_c0yl[0] <= 0.0) break; // n vs u mismatch, streamline borken :( // if(fabs(xy[0] - 1.0/3.0) < 1e-4 && fabs(xy[1] - 1.0/3.0) < 1e-4) break; if(spectrum_outside(xy[0], xy[1])) break; // outside spectral locus // ?? c0yl[0] = new_c0yl[0]; c0yl[1] = new_c0yl[1]; // c0yl[2] = new_c0yl[2]; // keep lambda // } // for(int i=0;i<it_cnt;i++) // { // map[3*(2*sat_cnt * l + sat_cnt + (d>0 ? it : -it-1)) + 0] = fabs(c0yl[0]); // map[3*(2*sat_cnt * l + sat_cnt + (d>0 ? it : -it-1)) + 1] = fabs(c0yl[1]); // map[3*(2*sat_cnt * l + sat_cnt + (d>0 ? it : -it-1)) + 2] = (c0yl[2] - CIE_LAMBDA_MIN)/(CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); } } // smooth right boundary: for(int i=1;i<lambda_cnt-1;i++) { int crop = (right[i-1] + right[i+1])/2; if(right[i] > crop) right[i] = crop; } for(int i=1;i<lambda_cnt-1;i++) { int crop = (left[i-1] + left[i+1])/2; if(left[i] > crop) left[i] = crop; } // resample lines from stripe_size [ss/2..right] to sat_cnt for(int l=0;l<lambda_cnt;l++) { for(int i=0;i<sat_cnt;i++) { float res[3]; float f = i/(float)sat_cnt * right[l]/(float)(stripe_size); lookup1d(stripe + 3*(stripe_size*l + stripe_size/2), stripe_size, 3, f, res); map[3*(2*sat_cnt * l + sat_cnt + i) + 0] = fabsf(res[0]); map[3*(2*sat_cnt * l + sat_cnt + i) + 1] = fabsf(res[1]); map[3*(2*sat_cnt * l + sat_cnt + i) + 2] = (res[2] - CIE_LAMBDA_MIN)/(CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); } for(int i=0;i<sat_cnt;i++) { float res[3]; float f = 0.5 - i/(float)sat_cnt * left[l]/(float)(stripe_size); lookup1d(stripe + 3*(stripe_size*l), stripe_size, 3, f, res); map[3*(2*sat_cnt * l + sat_cnt - i-1) + 0] = fabsf(res[0]); map[3*(2*sat_cnt * l + sat_cnt - i-1) + 1] = fabsf(res[1]); map[3*(2*sat_cnt * l + sat_cnt - i-1) + 2] = (res[2] - CIE_LAMBDA_MIN)/(CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); } } #if 0 // invalidate samples outside for(int l=0;l<lambda_cnt;l++) { for(int i=right[l];i<sat_cnt;i++) { map[3*(2*sat_cnt * l + sat_cnt + i) + 0] = 1.0; map[3*(2*sat_cnt * l + sat_cnt + i) + 1] = 0.0; map[3*(2*sat_cnt * l + sat_cnt + i) + 2] = 0.0; } } #endif FILE *f = fopen("map.pfm", "wb"); if(f) { fprintf(f, "PF\n%d %d\n-1.0\n", 2*sat_cnt, lambda_cnt); for(int k=0;k<sat_cnt*2*lambda_cnt;k++) { float coeffs[3] = {map[3*k+0], map[3*k+1], map[3*k+2]}; fwrite(coeffs, sizeof(float), 3, f); } fclose(f); } } // end scope #endif #if 0 { // now we have a c0 c1 c2 vx vy map. // as a second step, using this 2D (c0 y lambda vx vy) map // create another 2D (s, lambda) map as say 512x1024: const int lambda_cnt = 128;//512; const int sat_cnt = 64; float *map = calloc(sizeof(float)*3, lambda_cnt*2*sat_cnt); const int stripe_size = 2048; float *stripe = calloc(sizeof(float)*3, stripe_size); // for n and u spectra and lambda in [360, 830], do: // for(int un=0;un<2;un++) const int un = 0; { for(int l=0;l<lambda_cnt;l++) // const int l = lambda_cnt / 2; { int dir_lower = stripe_size/2, dir_upper = stripe_size/2; memset(stripe, 0, sizeof(float)*3*stripe_size); // walk velocity field both directions towards white (s=0) and spectral (s=1) for(int dir=-1;dir<=1;dir+=2) { // TODO: could start from a rasterised 2d circle in xy around 1/3 1/3, radius < 0.1 instead! double lambda = CIE_LAMBDA_MIN + l/(lambda_cnt-1.0) * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); // create spectrum for c0 = +/- 0.01? y=? lambda double c0 = un ? -0.0005 : 0.0001; // n case is negative double y = un ? 3 : -2; double c0yl[3] = {c0, y, lambda}; double xy[2] = {0.4, 0.4}; float px[5]; // lookup2d(out, res, res, 5, xy, px); // double coeffs[3] = {px[0], px[1], px[2]}; // cvt_c0yl_c012(c0yl, coeffs); // cvt_c012_c0yl(coeffs, c0yl); const int it_cnt = 150;// TODO: put sane maximum number of steps for(int it=0;it<it_cnt;it++) // for(int it=0;it<5;it++) // TODO: put sane maximum number of steps { // determine xy // XXX DEBUG if(it==0) // only step xy + vel, don't convert to spectrum in between { double col[3] = {0}; for (int ll = 0; ll < CIE_FINE_SAMPLES; ll++) { #if 0 double l2 = CIE_LAMBDA_MIN + ll/(double)CIE_FINE_SAMPLES * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); double c0 = 360.0, c1 = 1.0 / (830.0 - 360.0); double A = coeffs[0], B = coeffs[1], C = coeffs[2]; double A2 = (float)(A*(sqrd(c1))); double B2 = (float)(B*c1 - 2*A*c0*(sqrd(c1))); double C2 = (float)(C - B*c0*c1 + A*(sqrd(c0*c1))); double x = A2*l2*l2 + B2*l2 + C2; #endif #if 0 double l3 = ll/(double)CIE_FINE_SAMPLES; double x = 0.0; for (int i = 0; i < 3; ++i) x = x * l3 + coeffs[i]; #endif #if 1 double l2 = CIE_LAMBDA_MIN + ll/(double)CIE_FINE_SAMPLES * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); double x = c0yl[0] * (l2 - c0yl[2])*(l2 - c0yl[2]) + c0yl[1]; #endif double s = sigmoid(x); // fprintf(stdout, "%g %g\n", l2, s); for(int j=0;j<3;j++) col[j] += rgb_tbl[j][ll] * s; } // XXX FIXME: col seems to stay the same while xy moves along, something is broken here // if(it) // fprintf(stderr, "%g %g -- %g %g %d\n", xy[0], xy[1], // col[0] / (col[0]+col[1]+col[2]), // col[1] / (col[0]+col[1]+col[2]), // it); xy[0] = col[0] / (col[0]+col[1]+col[2]); xy[1] = col[1] / (col[0]+col[1]+col[2]); } // fprintf(stderr, "%g %g %d\n", xy[0], xy[1], it); // read velocity field at xy and walk a single pixel step: float px[5]; lookup2d(out, res, res, 5, xy, px); xy[0] += (c0yl[0] > 0.0 ? 1.0 : -1.0) * dir * px[3] * .5/it_cnt; xy[1] += (c0yl[0] > 0.0 ? 1.0 : -1.0) * dir * px[4] * .5/it_cnt; // double cf[3] = {px[0], px[1], px[2]}; // fprintf(stderr, "c0yl %g %g %g -- ", c0yl[0], c0yl[1], c0yl[2]); // cvt_c012_c0yl(cf, c0yl); // fprintf(stderr, "cf %g %g %g ", cf[0], cf[1], cf[2]); // fprintf(stderr, "%g %g %g\n", c0yl[0], c0yl[1], c0yl[2]); // store result in largeish array const int si = dir < 0 ? dir_lower-- : dir_upper++; if(dir_lower < 0 || dir_upper >= stripe_size) { fprintf(stdout, "array full\n"); break; } // fprintf(stdout, "filling %d %g\n", si, c0yl[0]); stripe[3*si + 0] = fabs(c0yl[0]); stripe[3*si + 1] = fabs(c0yl[1]); stripe[3*si + 2] = c0yl[2]; // stripe[3*si + 2] = CIE_LAMBDA_MIN + l/(double)CIE_FINE_SAMPLES * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); // terminate if xy close to white if(fabs(xy[0] - 1.0/3.0) < 1e-3 && fabs(xy[1] - 1.0/3.0) < 1e-3) { // fprintf(stdout, "exit white\n"); break; } // terminate if xy out of clip range if(spectrum_outside(xy[0], xy[1])) { // fprintf(stdout, "exit border\n"); break; } // FIXME: walking only xy + vel is different to round tripping through spectrum!! // read c0 c1 c2 and convert to c0 y lambda. update c0 and y, keep lambda. lookup2d(out, res, res, 5, xy, px); // coeffs[0] = px[0]; coeffs[1] = px[1]; coeffs[2] = px[2]; double new_c[] = {px[0], px[1], px[2]}; double new_c0yl[3]; cvt_c012_c0yl(new_c, new_c0yl); // TODO: avoid sign change in c0! // TODO: n-shaped should not have y < 0 (or so?) // TODO: u-shaped should not have y > 1 // fprintf(stdout, "c0 %g %g y: %g %g\n", c0yl[0], new_c0yl[0], c0yl[1], new_c0yl[1]); c0yl[0] = new_c0yl[0]; c0yl[1] = new_c0yl[1]; // c0yl[2] = new_c0yl[2]; // keep lambda // cvt_c0yl_c012(c0yl, coeffs); } // end iterations along direction } // end direction forward/back // normalise range of stripe (dir_lower, dir_upper) to resolution of 2D map, resample into row of texture // row will have: s=-1..0..1 and is filled in two parts (c0 > 0 and c0 < 0) // fprintf(stdout, "%d begin end %d %d\n", l, dir_lower, dir_upper); // dir_upper = stripe_size/2; // XXX DEBUG // dir_lower = 0; // all scanlines for n seem to agree on this quite well for(int i=0;i<sat_cnt;i++) { // convert to index in stripe double f = i/(double)sat_cnt; float c0yl[3]; lookup1d(stripe + 3*(dir_lower+1), dir_upper-dir_lower-1, 3, f, c0yl); // if(fabsf(c0yl[0]) > 0.0f) // fprintf(stdout, "val %d %d [%g]= %g\n", l, i, f * (dir_upper-dir_lower-1), c0yl[0]); if(un) { // n shapes (spectral colours) map[3*(2*sat_cnt * l + sat_cnt + i) + 0] = c0yl[0]; map[3*(2*sat_cnt * l + sat_cnt + i) + 1] = c0yl[1]; map[3*(2*sat_cnt * l + sat_cnt + i) + 2] = 0;//c0yl[2]; } else { // u shapes (purple line) map[3*(2*sat_cnt * l + sat_cnt - i - 1) + 0] = c0yl[0]; map[3*(2*sat_cnt * l + sat_cnt - i - 1) + 1] = c0yl[1]; map[3*(2*sat_cnt * l + sat_cnt - i - 1) + 2] = 0;//c0yl[2]; } } // end saturation row for const l } // end lambda l } // end u-shape n-shape un FILE *f = fopen("map.pfm", "wb"); if(f) { fprintf(f, "PF\n%d %d\n-1.0\n", 2*sat_cnt, lambda_cnt); for(int k=0;k<sat_cnt*2*lambda_cnt;k++) { float coeffs[3] = {map[3*k+0], map[3*k+1], map[3*k+2]}; fwrite(coeffs, sizeof(float), 3, f); } fclose(f); } } // end scope #endif #if 1 { // scope write lsbuf #if 0 // superbasic push/pull hole filling. better use gmic's morphological hole filling. // gmic lsbuf.pfm --mul 256 --select_color 0,0,0,0 -inpaint_morpho[0] [1] -rm[1] -o lsbuf2.pfm (only that this doesn't work :( ) // allocate mipmap memory: int num_mips = 0; for(int r=lsres;r;r>>=1) num_mips++; // push down inited avg to mipmaps: int r = lsres; float *b0 = lsbuf; for(int l=1;l<num_mips;l++) { int r0 = r; float *b1 = b0 + r0 * r0 * 5; r >>= 1; for(int j=0;j<r;j++) for(int i=0;i<r;i++) { if(b1[5*(j*r+i)+0] == 0.0f) { // average finer res, if inited int cnt = 0; float avg[5] = {0.0f}; #define PIX(II,JJ) \ if(b0[5*((2*j+JJ)*r0 + 2*i+II)+0] != 0.0f) { \ cnt ++;\ for(int k=0;k<5;k++) avg[k] += b0[5*((2*j+JJ)*r0 + 2*i+II)+k];\ } PIX(0,0); PIX(0,1); PIX(1,0); PIX(1,1); #undef PIX if(cnt) for(int k=0;k<5;k++) b1[5*(j*r+i)+k] = avg[k] / cnt; } } b0 = b1; } // pull up to uninited hi res for(int j=0;j<lsres;j++) for(int i=0;i<lsres;i++) { if(lsbuf[5*(j*lsres+i)] == 0.0f) { int ii = i, jj = j, r = lsres; float *b1 = lsbuf; for(int l=0;l<num_mips;l++) { b1 += 5*r*r; r >>= 1; ii >>= 1; jj >>= 1; if(b1[5*(jj*r+ii)] != 0.0f) { for(int k=0;k<5;k++) lsbuf[5*(j*lsres+i)+k] = b1[5*(jj*r+ii)+k]; break; } } } } #endif #if 1 // interpolate 0. 0.08 linearly from white to first meaningful values: #endif FILE *f = fopen("lsbuf.pfm", "wb"); if(f) { fprintf(f, "PF\n%d %d\n-1.0\n", lsres, lsres); for(int j=0;j<lsres;j++) for(int i=0;i<lsres;i++) fwrite(lsbuf + j*5*lsres + i*5, sizeof(float), 3, f); fclose(f); } #if 0 // DEBUG plot a couple of grid points for(int j=0;j<lsres;j+=1) for(int i=0;i<lsres;i+=10) fprintf(stderr, "%g %g\n", lsbuf[3*(j*lsres+i)+0], lsbuf[3*(j*lsres+i)+1]); #endif } #endif #if 1 // write four channel half lut { // convert to half uint32_t size = 4*sizeof(uint16_t)*res*res; uint16_t *b16 = malloc(size); for(int k=0;k<res*res;k++) { double coeffs[3] = {out[5*k+0], out[5*k+1], out[5*k+2]}; double c0yl[3]; cvt_c012_c0yl(coeffs, c0yl); float q[4] = {c0yl[0], c0yl[1], c0yl[2], out[5*k+4]}; b16[4*k+0] = float_to_half(1e5f*q[0]); b16[4*k+1] = float_to_half(q[1]); b16[4*k+2] = float_to_half(q[2]); b16[4*k+3] = float_to_half(q[3]); } typedef struct header_t { uint32_t magic; uint16_t version; uint16_t channels; uint32_t wd; uint32_t ht; } header_t; header_t head = (header_t) { .magic = 1234, .version = 1, .channels = 4, .wd = res, .ht = res, }; FILE *f = fopen("sig.lut", "wb"); if(f) { fwrite(&head, sizeof(head), 1, f); fwrite(b16, size, 1, f); } fclose(f); } #endif FILE *f = fopen(argv[2], "wb"); if(f) { fprintf(f, "PF\n%d %d\n-1.0\n", res, res); for(int k=0;k<res*res;k++) { double coeffs[3] = {out[5*k+0], out[5*k+1], out[5*k+2]}; float q[3]; quantise_coeffs(coeffs, q); // fprintf(stdout, "%g %g %g\n", q[0], q[1], q[2]); q[2] = q[0]; q[0] = out[5*k+3]; // DEBUG lambda tc q[1] = out[5*k+4]; // DEBUG saturation tc #if 1 // coeff data fwrite(q, sizeof(float), 3, f); #else // velocity field float vel[3] = { out[5*k+3], out[5*k+4], // 1.0}; (1.0- out[5*k+3]*out[5*k+3]- out[5*k+4]*out[5*k+4])}; // vel[0] = 0.5 + 0.5*vel[0]; // vel[1] = 0.5 + 0.5*vel[1]; // vel[2] = 0.5 + 0.5*vel[2]; fwrite(vel, sizeof(float), 3, f); // fwrite(out+5*k+0, sizeof(float), 1, f); // fwrite(out+5*k+3, sizeof(float), 2, f); // fwrite(&one, sizeof(float), 1, f); #endif } fclose(f); } free(out); printf("\n"); }
libgomp.h
/* Copyright (C) 2005-2017 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>. This file is part of the GNU Offloading and Multi Processing Library (libgomp). Libgomp is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* This file contains data types and function declarations that are not part of the official OpenACC or OpenMP user interfaces. There are declarations in here that are part of the GNU Offloading and Multi Processing ABI, in that the compiler is required to know about them and use them. The convention is that the all caps prefix "GOMP" is used group items that are part of the external ABI, and the lower case prefix "gomp" is used group items that are completely private to the library. */ #ifndef LIBGOMP_H #define LIBGOMP_H 1 #ifndef _LIBGOMP_CHECKING_ /* Define to 1 to perform internal sanity checks. */ #define _LIBGOMP_CHECKING_ 0 #endif #include "config.h" #include "gstdint.h" #include "libgomp-plugin.h" #ifdef HAVE_PTHREAD_H #include <pthread.h> #endif #include <stdbool.h> #include <stdlib.h> #include <stdio.h> #include <stdarg.h> /* Needed for memset in priority_queue.c. */ #if _LIBGOMP_CHECKING_ # ifdef STRING_WITH_STRINGS # include <string.h> # include <strings.h> # else # ifdef HAVE_STRING_H # include <string.h> # else # ifdef HAVE_STRINGS_H # include <strings.h> # endif # endif # endif #endif #ifdef HAVE_ATTRIBUTE_VISIBILITY # pragma GCC visibility push(hidden) #endif /* If we were a C++ library, we'd get this from <std/atomic>. */ enum memmodel { MEMMODEL_RELAXED = 0, MEMMODEL_CONSUME = 1, MEMMODEL_ACQUIRE = 2, MEMMODEL_RELEASE = 3, MEMMODEL_ACQ_REL = 4, MEMMODEL_SEQ_CST = 5 }; /* alloc.c */ extern void *gomp_malloc (size_t) __attribute__((malloc)); extern void *gomp_malloc_cleared (size_t) __attribute__((malloc)); extern void *gomp_realloc (void *, size_t); /* Avoid conflicting prototypes of alloca() in system headers by using GCC's builtin alloca(). */ #define gomp_alloca(x) __builtin_alloca(x) /* error.c */ extern void gomp_vdebug (int, const char *, va_list); extern void gomp_debug (int, const char *, ...) __attribute__ ((format (printf, 2, 3))); #define gomp_vdebug(KIND, FMT, VALIST) \ do { \ if (__builtin_expect (gomp_debug_var, 0)) \ (gomp_vdebug) ((KIND), (FMT), (VALIST)); \ } while (0) #define gomp_debug(KIND, ...) \ do { \ if (__builtin_expect (gomp_debug_var, 0)) \ (gomp_debug) ((KIND), __VA_ARGS__); \ } while (0) extern void gomp_verror (const char *, va_list); extern void gomp_error (const char *, ...) __attribute__ ((format (printf, 1, 2))); extern void gomp_vfatal (const char *, va_list) __attribute__ ((noreturn)); extern void gomp_fatal (const char *, ...) __attribute__ ((noreturn, format (printf, 1, 2))); struct gomp_task; struct gomp_taskgroup; struct htab; #include "priority_queue.h" #include "sem.h" #include "mutex.h" #include "bar.h" #include "simple-bar.h" #include "ptrlock.h" /* This structure contains the data to control one work-sharing construct, either a LOOP (FOR/DO) or a SECTIONS. */ enum gomp_schedule_type { GFS_RUNTIME, GFS_STATIC, GFS_DYNAMIC, GFS_GUIDED, GFS_AUTO }; struct gomp_doacross_work_share { union { /* chunk_size copy, as ws->chunk_size is multiplied by incr for GFS_DYNAMIC. */ long chunk_size; /* Likewise, but for ull implementation. */ unsigned long long chunk_size_ull; /* For schedule(static,0) this is the number of iterations assigned to the last thread, i.e. number of iterations / number of threads. */ long q; /* Likewise, but for ull implementation. */ unsigned long long q_ull; }; /* Size of each array entry (padded to cache line size). */ unsigned long elt_sz; /* Number of dimensions in sink vectors. */ unsigned int ncounts; /* True if the iterations can be flattened. */ bool flattened; /* Actual array (of elt_sz sized units), aligned to cache line size. This is indexed by team_id for GFS_STATIC and outermost iteration / chunk_size for other schedules. */ unsigned char *array; /* These two are only used for schedule(static,0). */ /* This one is number of iterations % number of threads. */ long t; union { /* And this one is cached t * (q + 1). */ long boundary; /* Likewise, but for the ull implementation. */ unsigned long long boundary_ull; }; /* Array of shift counts for each dimension if they can be flattened. */ unsigned int shift_counts[]; }; struct gomp_work_share { /* This member records the SCHEDULE clause to be used for this construct. The user specification of "runtime" will already have been resolved. If this is a SECTIONS construct, this value will always be DYNAMIC. */ enum gomp_schedule_type sched; int mode; union { struct { /* This is the chunk_size argument to the SCHEDULE clause. */ long chunk_size; /* This is the iteration end point. If this is a SECTIONS construct, this is the number of contained sections. */ long end; /* This is the iteration step. If this is a SECTIONS construct, this is always 1. */ long incr; }; struct { /* The same as above, but for the unsigned long long loop variants. */ unsigned long long chunk_size_ull; unsigned long long end_ull; unsigned long long incr_ull; }; }; union { /* This is a circular queue that details which threads will be allowed into the ordered region and in which order. When a thread allocates iterations on which it is going to work, it also registers itself at the end of the array. When a thread reaches the ordered region, it checks to see if it is the one at the head of the queue. If not, it blocks on its RELEASE semaphore. */ unsigned *ordered_team_ids; /* This is a pointer to DOACROSS work share data. */ struct gomp_doacross_work_share *doacross; }; /* This is the number of threads that have registered themselves in the circular queue ordered_team_ids. */ unsigned ordered_num_used; /* This is the team_id of the currently acknowledged owner of the ordered section, or -1u if the ordered section has not been acknowledged by any thread. This is distinguished from the thread that is *allowed* to take the section next. */ unsigned ordered_owner; /* This is the index into the circular queue ordered_team_ids of the current thread that's allowed into the ordered reason. */ unsigned ordered_cur; /* This is a chain of allocated gomp_work_share blocks, valid only in the first gomp_work_share struct in the block. */ struct gomp_work_share *next_alloc; /* The above fields are written once during workshare initialization, or related to ordered worksharing. Make sure the following fields are in a different cache line. */ /* This lock protects the update of the following members. */ gomp_mutex_t lock __attribute__((aligned (64))); /* This is the count of the number of threads that have exited the work share construct. If the construct was marked nowait, they have moved on to other work; otherwise they're blocked on a barrier. The last member of the team to exit the work share construct must deallocate it. */ unsigned threads_completed; union { /* This is the next iteration value to be allocated. In the case of GFS_STATIC loops, this the iteration start point and never changes. */ long next; /* The same, but with unsigned long long type. */ unsigned long long next_ull; /* This is the returned data structure for SINGLE COPYPRIVATE. */ void *copyprivate; }; union { /* Link to gomp_work_share struct for next work sharing construct encountered after this one. */ gomp_ptrlock_t next_ws; /* gomp_work_share structs are chained in the free work share cache through this. */ struct gomp_work_share *next_free; }; /* If only few threads are in the team, ordered_team_ids can point to this array which fills the padding at the end of this struct. */ unsigned inline_ordered_team_ids[0]; }; /* This structure contains all of the thread-local data associated with a thread team. This is the data that must be saved when a thread encounters a nested PARALLEL construct. */ struct gomp_team_state { /* This is the team of which the thread is currently a member. */ struct gomp_team *team; /* This is the work share construct which this thread is currently processing. Recall that with NOWAIT, not all threads may be processing the same construct. */ struct gomp_work_share *work_share; /* This is the previous work share construct or NULL if there wasn't any. When all threads are done with the current work sharing construct, the previous one can be freed. The current one can't, as its next_ws field is used. */ struct gomp_work_share *last_work_share; /* This is the ID of this thread within the team. This value is guaranteed to be between 0 and N-1, where N is the number of threads in the team. */ unsigned team_id; /* Nesting level. */ unsigned level; /* Active nesting level. Only active parallel regions are counted. */ unsigned active_level; /* Place-partition-var, offset and length into gomp_places_list array. */ unsigned place_partition_off; unsigned place_partition_len; #ifdef HAVE_SYNC_BUILTINS /* Number of single stmts encountered. */ unsigned long single_count; #endif /* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the trip number through the loop. So first time a particular loop is encountered this number is 0, the second time through the loop is 1, etc. This is unused when the compiler knows in advance that the loop is statically scheduled. */ unsigned long static_trip; }; struct target_mem_desc; /* These are the OpenMP 4.0 Internal Control Variables described in section 2.3.1. Those described as having one copy per task are stored within the structure; those described as having one copy for the whole program are (naturally) global variables. */ struct gomp_task_icv { unsigned long nthreads_var; enum gomp_schedule_type run_sched_var; int run_sched_chunk_size; int default_device_var; unsigned int thread_limit_var; bool dyn_var; bool nest_var; char bind_var; /* Internal ICV. */ struct target_mem_desc *target_data; }; extern struct gomp_task_icv gomp_global_icv; #ifndef HAVE_SYNC_BUILTINS extern gomp_mutex_t gomp_managed_threads_lock; extern gomp_mutex_t popcorn_tid_lock; #endif extern unsigned long gomp_max_active_levels_var; extern bool gomp_cancel_var; extern int gomp_max_task_priority_var; extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var; extern unsigned long gomp_available_cpus, gomp_managed_threads; extern unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len; extern char *gomp_bind_var_list; extern unsigned long gomp_bind_var_list_len; extern void **gomp_places_list; extern unsigned long gomp_places_list_len; extern unsigned int gomp_num_teams_var; extern int gomp_debug_var; extern int goacc_device_num; extern char *goacc_device_type; /* Popcorn profiling machinery. */ extern bool popcorn_profiling; extern const char *popcorn_prof_fn; extern FILE *popcorn_prof_fp; enum gomp_task_kind { /* Implicit task. */ GOMP_TASK_IMPLICIT, /* Undeferred task. */ GOMP_TASK_UNDEFERRED, /* Task created by GOMP_task and waiting to be run. */ GOMP_TASK_WAITING, /* Task currently executing or scheduled and about to execute. */ GOMP_TASK_TIED, /* Used for target tasks that have vars mapped and async run started, but not yet completed. Once that completes, they will be readded into the queues as GOMP_TASK_WAITING in order to perform the var unmapping. */ GOMP_TASK_ASYNC_RUNNING }; struct gomp_task_depend_entry { /* Address of dependency. */ void *addr; struct gomp_task_depend_entry *next; struct gomp_task_depend_entry *prev; /* Task that provides the dependency in ADDR. */ struct gomp_task *task; /* Depend entry is of type "IN". */ bool is_in; bool redundant; bool redundant_out; }; struct gomp_dependers_vec { size_t n_elem; size_t allocated; struct gomp_task *elem[]; }; /* Used when in GOMP_taskwait or in gomp_task_maybe_wait_for_dependencies. */ struct gomp_taskwait { bool in_taskwait; bool in_depend_wait; /* Number of tasks we are waiting for. */ size_t n_depend; gomp_sem_t taskwait_sem; }; /* This structure describes a "task" to be run by a thread. */ struct gomp_task { /* Parent of this task. */ struct gomp_task *parent; /* Children of this task. */ struct priority_queue children_queue; /* Taskgroup this task belongs in. */ struct gomp_taskgroup *taskgroup; /* Tasks that depend on this task. */ struct gomp_dependers_vec *dependers; struct htab *depend_hash; struct gomp_taskwait *taskwait; /* Number of items in DEPEND. */ size_t depend_count; /* Number of tasks this task depends on. Once this counter reaches 0, we have no unsatisfied dependencies, and this task can be put into the various queues to be scheduled. */ size_t num_dependees; /* Priority of this task. */ int priority; /* The priority node for this task in each of the different queues. We put this here to avoid allocating space for each priority node. Then we play offsetof() games to convert between pnode[] entries and the gomp_task in which they reside. */ struct priority_node pnode[3]; struct gomp_task_icv icv; void (*fn) (void *); void *fn_data; enum gomp_task_kind kind; bool in_tied_task; bool final_task; bool copy_ctors_done; /* Set for undeferred tasks with unsatisfied dependencies which block further execution of their parent until the dependencies are satisfied. */ bool parent_depends_on; /* Dependencies provided and/or needed for this task. DEPEND_COUNT is the number of items available. */ struct gomp_task_depend_entry depend[]; }; /* This structure describes a single #pragma omp taskgroup. */ struct gomp_taskgroup { struct gomp_taskgroup *prev; /* Queue of tasks that belong in this taskgroup. */ struct priority_queue taskgroup_queue; bool in_taskgroup_wait; bool cancelled; gomp_sem_t taskgroup_sem; size_t num_children; }; /* Various state of OpenMP async offloading tasks. */ enum gomp_target_task_state { GOMP_TARGET_TASK_DATA, GOMP_TARGET_TASK_BEFORE_MAP, GOMP_TARGET_TASK_FALLBACK, GOMP_TARGET_TASK_READY_TO_RUN, GOMP_TARGET_TASK_RUNNING, GOMP_TARGET_TASK_FINISHED }; /* This structure describes a target task. */ struct gomp_target_task { struct gomp_device_descr *devicep; void (*fn) (void *); size_t mapnum; size_t *sizes; unsigned short *kinds; unsigned int flags; enum gomp_target_task_state state; struct target_mem_desc *tgt; struct gomp_task *task; struct gomp_team *team; /* Device-specific target arguments. */ void **args; void *hostaddrs[]; }; /* This structure describes a "team" of threads. These are the threads that are spawned by a PARALLEL constructs, as well as the work sharing constructs that the team encounters. */ struct gomp_team { /* This is the number of threads in the current team. */ unsigned nthreads; /* This is number of gomp_work_share structs that have been allocated as a block last time. */ unsigned work_share_chunk; /* This is the saved team state that applied to a master thread before the current thread was created. */ struct gomp_team_state prev_ts; /* This semaphore should be used by the master thread instead of its "native" semaphore in the thread structure. Required for nested parallels, as the master is a member of two teams. */ gomp_sem_t master_release; /* This points to an array with pointers to the release semaphore of the threads in the team. */ gomp_sem_t **ordered_release; /* List of work shares on which gomp_fini_work_share hasn't been called yet. If the team hasn't been cancelled, this should be equal to each thr->ts.work_share, but otherwise it can be a possibly long list of workshares. */ struct gomp_work_share *work_shares_to_free; /* List of gomp_work_share structs chained through next_free fields. This is populated and taken off only by the first thread in the team encountering a new work sharing construct, in a critical section. */ struct gomp_work_share *work_share_list_alloc; /* List of gomp_work_share structs freed by free_work_share. New entries are atomically added to the start of the list, and alloc_work_share can safely only move all but the first entry to work_share_list alloc, as free_work_share can happen concurrently with alloc_work_share. */ struct gomp_work_share *work_share_list_free; #ifdef HAVE_SYNC_BUILTINS /* Number of simple single regions encountered by threads in this team. */ unsigned long single_count; #else /* Mutex protecting addition of workshares to work_share_list_free. */ gomp_mutex_t work_share_list_free_lock; #endif /* This barrier is used for most synchronization of the team. */ gomp_barrier_t barrier; /* Initial work shares, to avoid allocating any gomp_work_share structs in the common case. */ struct gomp_work_share work_shares[8]; gomp_mutex_t task_lock; /* Scheduled tasks. */ struct priority_queue task_queue; /* Number of all GOMP_TASK_{WAITING,TIED} tasks in the team. */ unsigned int task_count; /* Number of GOMP_TASK_WAITING tasks currently waiting to be scheduled. */ unsigned int task_queued_count; /* Number of GOMP_TASK_{WAITING,TIED} tasks currently running directly in gomp_barrier_handle_tasks; tasks spawned from e.g. GOMP_taskwait or GOMP_taskgroup_end don't count, even when that is called from a task run from gomp_barrier_handle_tasks. task_running_count should be always <= team->nthreads, and if current task isn't in_tied_task, then it will be even < team->nthreads. */ unsigned int task_running_count; int work_share_cancelled; int team_cancelled; /* This array contains structures for implicit tasks. */ struct gomp_task implicit_task[]; }; /* This structure contains all data that is private to libgomp and is allocated per thread. */ struct gomp_thread { /* This is the function that the thread should run upon launch. */ void (*fn) (void *data); void *data; /* This is the current team state for this thread. The ts.team member is NULL only if the thread is idle. */ struct gomp_team_state ts; /* This is the task that the thread is currently executing. */ struct gomp_task *task; /* This semaphore is used for ordered loops. */ gomp_sem_t release; /* Place this thread is bound to plus one, or zero if not bound to any place. */ unsigned int place; /* User pthread thread pool */ struct gomp_thread_pool *thread_pool; /* Popcorn's TID, basically this thread's number out of the total number of threads created by the runtime over the lifetime of the application. */ int popcorn_created_tid; /* Node ID on which this thread is executing in Popcorn. */ int popcorn_nid; /* Reduction method for variables currently being reduced. */ int reduction_method; }; struct gomp_thread_pool { /* This array manages threads spawned from the top level, which will return to the idle loop once the current PARALLEL construct ends. */ struct gomp_thread **threads; unsigned threads_size; unsigned threads_used; /* The last team is used for non-nested teams to delay their destruction to make sure all the threads in the team move on to the pool's barrier before the team's barrier is destroyed. */ struct gomp_team *last_team; /* Number of threads running in this contention group. */ unsigned long threads_busy; /* This barrier holds and releases threads waiting in thread pools. */ gomp_simple_barrier_t threads_dock; }; enum gomp_cancel_kind { GOMP_CANCEL_PARALLEL = 1, GOMP_CANCEL_LOOP = 2, GOMP_CANCEL_FOR = GOMP_CANCEL_LOOP, GOMP_CANCEL_DO = GOMP_CANCEL_LOOP, GOMP_CANCEL_SECTIONS = 4, GOMP_CANCEL_TASKGROUP = 8 }; /* ... and here is that TLS data. */ #if defined __nvptx__ extern struct gomp_thread *nvptx_thrs __attribute__((shared)); static inline struct gomp_thread *gomp_thread (void) { int tid; asm ("mov.u32 %0, %%tid.y;" : "=r" (tid)); return nvptx_thrs + tid; } #elif defined HAVE_TLS || defined USE_EMUTLS extern __thread struct gomp_thread gomp_tls_data; static inline struct gomp_thread *gomp_thread (void) { return &gomp_tls_data; } #else extern pthread_key_t gomp_tls_key; static inline struct gomp_thread *gomp_thread (void) { return pthread_getspecific (gomp_tls_key); } #endif extern struct gomp_task_icv *gomp_new_icv (void); /* Here's how to access the current copy of the ICVs. */ static inline struct gomp_task_icv *gomp_icv (bool write) { struct gomp_task *task = gomp_thread ()->task; if (task) return &task->icv; else if (write) return gomp_new_icv (); else return &gomp_global_icv; } #ifdef LIBGOMP_USE_PTHREADS /* The attributes to be used during thread creation. */ extern pthread_attr_t gomp_thread_attr; extern pthread_key_t gomp_thread_destructor; #endif /* Function prototypes. */ /* affinity.c */ extern void gomp_init_affinity (void); #ifdef LIBGOMP_USE_PTHREADS extern void gomp_init_thread_affinity (pthread_attr_t *, unsigned int); #endif extern void **gomp_affinity_alloc (unsigned long, bool); extern void gomp_affinity_init_place (void *); extern bool gomp_affinity_add_cpus (void *, unsigned long, unsigned long, long, bool); extern bool gomp_affinity_remove_cpu (void *, unsigned long); extern bool gomp_affinity_copy_place (void *, void *, long); extern bool gomp_affinity_same_place (void *, void *); extern bool gomp_affinity_finalize_place_list (bool); extern bool gomp_affinity_init_level (int, unsigned long, bool); extern void gomp_affinity_print_place (void *); extern void gomp_get_place_proc_ids_8 (int, int64_t *); extern bool popcorn_affinity_init_nodes (unsigned long *, unsigned long, bool); extern bool popcorn_affinity_init_nodes_uniform (unsigned long, bool); extern bool popcorn_affinity_init_node_ratings (unsigned long *, unsigned long, bool); /* iter.c */ extern bool gomp_iter_is_last (long); extern bool gomp_iter_is_last_ull (unsigned long long); extern int gomp_iter_static_next (long *, long *); extern bool gomp_iter_dynamic_next_locked (long *, long *); extern bool gomp_iter_guided_next_locked (long *, long *); #ifdef HAVE_SYNC_BUILTINS extern bool gomp_iter_dynamic_next (long *, long *); extern bool gomp_iter_guided_next (long *, long *); #endif /* iter_ull.c */ extern int gomp_iter_ull_static_next (unsigned long long *, unsigned long long *); extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *, unsigned long long *); extern bool gomp_iter_ull_guided_next_locked (unsigned long long *, unsigned long long *); #if defined HAVE_SYNC_BUILTINS && defined __LP64__ extern bool gomp_iter_ull_dynamic_next (unsigned long long *, unsigned long long *); extern bool gomp_iter_ull_guided_next (unsigned long long *, unsigned long long *); #endif /* ordered.c */ extern void gomp_ordered_first (void); extern void gomp_ordered_last (void); extern void gomp_ordered_next (void); extern void gomp_ordered_static_init (void); extern void gomp_ordered_static_next (void); extern void gomp_ordered_sync (void); extern void gomp_doacross_init (unsigned, long *, long); extern void gomp_doacross_ull_init (unsigned, unsigned long long *, unsigned long long); /* parallel.c */ extern unsigned gomp_resolve_num_threads (unsigned, unsigned); /* proc.c (in config/) */ extern void gomp_init_num_threads (void); extern unsigned gomp_dynamic_max_threads (void); /* task.c */ extern void gomp_init_task (struct gomp_task *, struct gomp_task *, struct gomp_task_icv *); extern void gomp_end_task (void); extern void gomp_barrier_handle_tasks (gomp_barrier_state_t); extern void gomp_task_maybe_wait_for_dependencies (void **); extern bool gomp_create_target_task (struct gomp_device_descr *, void (*) (void *), size_t, void **, size_t *, unsigned short *, unsigned int, void **, void **, enum gomp_target_task_state); static void inline gomp_finish_task (struct gomp_task *task) { if (__builtin_expect (task->depend_hash != NULL, 0)) free (task->depend_hash); } /* team.c */ extern struct gomp_team *gomp_new_team (unsigned); extern void gomp_team_start (void (*) (void *), void *, unsigned, unsigned, struct gomp_team *); extern void gomp_team_end (void); extern void gomp_free_thread (void *); /* target.c */ extern void gomp_init_targets_once (void); extern int gomp_get_num_devices (void); extern bool gomp_target_task_fn (void *); /* Splay tree definitions. */ typedef struct splay_tree_node_s *splay_tree_node; typedef struct splay_tree_s *splay_tree; typedef struct splay_tree_key_s *splay_tree_key; struct target_var_desc { /* Splay key. */ splay_tree_key key; /* True if data should be copied from device to host at the end. */ bool copy_from; /* True if data always should be copied from device to host at the end. */ bool always_copy_from; /* Relative offset against key host_start. */ uintptr_t offset; /* Actual length. */ uintptr_t length; }; struct target_mem_desc { /* Reference count. */ uintptr_t refcount; /* All the splay nodes allocated together. */ splay_tree_node array; /* Start of the target region. */ uintptr_t tgt_start; /* End of the targer region. */ uintptr_t tgt_end; /* Handle to free. */ void *to_free; /* Previous target_mem_desc. */ struct target_mem_desc *prev; /* Number of items in following list. */ size_t list_count; /* Corresponding target device descriptor. */ struct gomp_device_descr *device_descr; /* List of target items to remove (or decrease refcount) at the end of region. */ struct target_var_desc list[]; }; /* Special value for refcount - infinity. */ #define REFCOUNT_INFINITY (~(uintptr_t) 0) /* Special value for refcount - tgt_offset contains target address of the artificial pointer to "omp declare target link" object. */ #define REFCOUNT_LINK (~(uintptr_t) 1) struct splay_tree_key_s { /* Address of the host object. */ uintptr_t host_start; /* Address immediately after the host object. */ uintptr_t host_end; /* Descriptor of the target memory. */ struct target_mem_desc *tgt; /* Offset from tgt->tgt_start to the start of the target object. */ uintptr_t tgt_offset; /* Reference count. */ uintptr_t refcount; /* Pointer to the original mapping of "omp declare target link" object. */ splay_tree_key link_key; }; /* The comparison function. */ static inline int splay_compare (splay_tree_key x, splay_tree_key y) { if (x->host_start == x->host_end && y->host_start == y->host_end) return 0; if (x->host_end <= y->host_start) return -1; if (x->host_start >= y->host_end) return 1; return 0; } #include "splay-tree.h" typedef struct acc_dispatch_t { /* This is a linked list of data mapped using the acc_map_data/acc_unmap_data or "acc enter data"/"acc exit data" pragmas. Unlike mapped_data in the goacc_thread struct, unmapping can happen out-of-order with respect to mapping. */ /* This is guarded by the lock in the "outer" struct gomp_device_descr. */ struct target_mem_desc *data_environ; /* Execute. */ __typeof (GOMP_OFFLOAD_openacc_exec) *exec_func; /* Async cleanup callback registration. */ __typeof (GOMP_OFFLOAD_openacc_register_async_cleanup) *register_async_cleanup_func; /* Asynchronous routines. */ __typeof (GOMP_OFFLOAD_openacc_async_test) *async_test_func; __typeof (GOMP_OFFLOAD_openacc_async_test_all) *async_test_all_func; __typeof (GOMP_OFFLOAD_openacc_async_wait) *async_wait_func; __typeof (GOMP_OFFLOAD_openacc_async_wait_async) *async_wait_async_func; __typeof (GOMP_OFFLOAD_openacc_async_wait_all) *async_wait_all_func; __typeof (GOMP_OFFLOAD_openacc_async_wait_all_async) *async_wait_all_async_func; __typeof (GOMP_OFFLOAD_openacc_async_set_async) *async_set_async_func; /* Create/destroy TLS data. */ __typeof (GOMP_OFFLOAD_openacc_create_thread_data) *create_thread_data_func; __typeof (GOMP_OFFLOAD_openacc_destroy_thread_data) *destroy_thread_data_func; /* NVIDIA target specific routines. */ struct { __typeof (GOMP_OFFLOAD_openacc_cuda_get_current_device) *get_current_device_func; __typeof (GOMP_OFFLOAD_openacc_cuda_get_current_context) *get_current_context_func; __typeof (GOMP_OFFLOAD_openacc_cuda_get_stream) *get_stream_func; __typeof (GOMP_OFFLOAD_openacc_cuda_set_stream) *set_stream_func; } cuda; } acc_dispatch_t; /* Various state of the accelerator device. */ enum gomp_device_state { GOMP_DEVICE_UNINITIALIZED, GOMP_DEVICE_INITIALIZED, GOMP_DEVICE_FINALIZED }; /* This structure describes accelerator device. It contains name of the corresponding libgomp plugin, function handlers for interaction with the device, ID-number of the device, and information about mapped memory. */ struct gomp_device_descr { /* Immutable data, which is only set during initialization, and which is not guarded by the lock. */ /* The name of the device. */ const char *name; /* Capabilities of device (supports OpenACC, OpenMP). */ unsigned int capabilities; /* This is the ID number of device among devices of the same type. */ int target_id; /* This is the TYPE of device. */ enum offload_target_type type; /* Function handlers. */ __typeof (GOMP_OFFLOAD_get_name) *get_name_func; __typeof (GOMP_OFFLOAD_get_caps) *get_caps_func; __typeof (GOMP_OFFLOAD_get_type) *get_type_func; __typeof (GOMP_OFFLOAD_get_num_devices) *get_num_devices_func; __typeof (GOMP_OFFLOAD_init_device) *init_device_func; __typeof (GOMP_OFFLOAD_fini_device) *fini_device_func; __typeof (GOMP_OFFLOAD_version) *version_func; __typeof (GOMP_OFFLOAD_load_image) *load_image_func; __typeof (GOMP_OFFLOAD_unload_image) *unload_image_func; __typeof (GOMP_OFFLOAD_alloc) *alloc_func; __typeof (GOMP_OFFLOAD_free) *free_func; __typeof (GOMP_OFFLOAD_dev2host) *dev2host_func; __typeof (GOMP_OFFLOAD_host2dev) *host2dev_func; __typeof (GOMP_OFFLOAD_dev2dev) *dev2dev_func; __typeof (GOMP_OFFLOAD_can_run) *can_run_func; __typeof (GOMP_OFFLOAD_run) *run_func; __typeof (GOMP_OFFLOAD_async_run) *async_run_func; /* Splay tree containing information about mapped memory regions. */ struct splay_tree_s mem_map; /* Mutex for the mutable data. */ gomp_mutex_t lock; /* Current state of the device. OpenACC allows to move from INITIALIZED state back to UNINITIALIZED state. OpenMP allows only to move from INITIALIZED to FINALIZED state (at program shutdown). */ enum gomp_device_state state; /* OpenACC-specific data and functions. */ /* This is mutable because of its mutable data_environ and target_data members. */ acc_dispatch_t openacc; }; /* Kind of the pragma, for which gomp_map_vars () is called. */ enum gomp_map_vars_kind { GOMP_MAP_VARS_OPENACC, GOMP_MAP_VARS_TARGET, GOMP_MAP_VARS_DATA, GOMP_MAP_VARS_ENTER_DATA }; extern void gomp_acc_insert_pointer (size_t, void **, size_t *, void *); extern void gomp_acc_remove_pointer (void *, bool, int, int); extern struct target_mem_desc *gomp_map_vars (struct gomp_device_descr *, size_t, void **, void **, size_t *, void *, bool, enum gomp_map_vars_kind); extern void gomp_unmap_vars (struct target_mem_desc *, bool); extern void gomp_init_device (struct gomp_device_descr *); extern void gomp_free_memmap (struct splay_tree_s *); extern void gomp_unload_device (struct gomp_device_descr *); /* work.c */ extern void gomp_init_work_share (struct gomp_work_share *, bool, unsigned); extern void gomp_fini_work_share (struct gomp_work_share *); extern bool gomp_work_share_start (bool); extern void gomp_work_share_end (void); extern bool gomp_work_share_end_cancel (void); extern void gomp_work_share_end_nowait (void); static inline void gomp_work_share_init_done (void) { struct gomp_thread *thr = gomp_thread (); if (__builtin_expect (thr->ts.last_work_share != NULL, 1)) gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share); } #ifdef HAVE_ATTRIBUTE_VISIBILITY # pragma GCC visibility pop #endif /* Now that we're back to default visibility, include the globals. */ #include "libgomp_g.h" /* Include omp.h by parts. */ #include "omp-lock.h" #define _LIBGOMP_OMP_LOCK_DEFINED 1 #include "omp.h.in" #if !defined (HAVE_ATTRIBUTE_VISIBILITY) \ || !defined (HAVE_ATTRIBUTE_ALIAS) \ || !defined (HAVE_AS_SYMVER_DIRECTIVE) \ || !defined (PIC) \ || !defined (HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT) # undef LIBGOMP_GNU_SYMBOL_VERSIONING #endif #ifdef LIBGOMP_GNU_SYMBOL_VERSIONING extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW; extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW; extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW; extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW; extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW; extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; # define strong_alias(fn, al) \ extern __typeof (fn) al __attribute__ ((alias (#fn))); # define omp_lock_symver(fn) \ __asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \ __asm (".symver g" #fn "_25, " #fn "@OMP_1.0"); #else # define gomp_init_lock_30 omp_init_lock # define gomp_destroy_lock_30 omp_destroy_lock # define gomp_set_lock_30 omp_set_lock # define gomp_unset_lock_30 omp_unset_lock # define gomp_test_lock_30 omp_test_lock # define gomp_init_nest_lock_30 omp_init_nest_lock # define gomp_destroy_nest_lock_30 omp_destroy_nest_lock # define gomp_set_nest_lock_30 omp_set_nest_lock # define gomp_unset_nest_lock_30 omp_unset_nest_lock # define gomp_test_nest_lock_30 omp_test_nest_lock #endif #ifdef HAVE_ATTRIBUTE_VISIBILITY # define attribute_hidden __attribute__ ((visibility ("hidden"))) #else # define attribute_hidden #endif #ifdef HAVE_ATTRIBUTE_ALIAS # define ialias_ulp ialias_str1(__USER_LABEL_PREFIX__) # define ialias_str1(x) ialias_str2(x) # define ialias_str2(x) #x # define ialias(fn) \ extern __typeof (fn) gomp_ialias_##fn \ __attribute__ ((alias (#fn))) attribute_hidden; # define ialias_redirect(fn) \ extern __typeof (fn) fn __asm__ (ialias_ulp "gomp_ialias_" #fn) attribute_hidden; # define ialias_call(fn) gomp_ialias_ ## fn #else # define ialias(fn) # define ialias_redirect(fn) # define ialias_call(fn) fn #endif /* Helper function for priority_node_to_task() and task_to_priority_node(). Return the offset from a task to its priority_node entry. The priority_node entry is has a type of TYPE. */ static inline size_t priority_queue_offset (enum priority_queue_type type) { return offsetof (struct gomp_task, pnode[(int) type]); } /* Return the task associated with a priority NODE of type TYPE. */ static inline struct gomp_task * priority_node_to_task (enum priority_queue_type type, struct priority_node *node) { return (struct gomp_task *) ((char *) node - priority_queue_offset (type)); } /* Return the priority node of type TYPE for a given TASK. */ static inline struct priority_node * task_to_priority_node (enum priority_queue_type type, struct gomp_task *task) { return (struct priority_node *) ((char *) task + priority_queue_offset (type)); } #endif /* LIBGOMP_H */
DRB041-3mm-parallel-no.c
/** * 3mm.c: This file is part of the PolyBench/C 3.2 test suite. * three steps of matrix multiplication to multiply four matrices. * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net * License: /LICENSE.OSU.txt */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include "polybench/polybench.h" /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "polybench/3mm.h" /* Array initialization. */ static void init_array(int ni,int nj,int nk,int nl,int nm,double A[128 + 0][128 + 0],double B[128 + 0][128 + 0],double C[128 + 0][128 + 0],double D[128 + 0][128 + 0]) { //int i; //int j; { int c2; int c1; if (nl >= 1) { #pragma omp parallel for private(c2) for (c1 = 0; c1 <= ((((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) < nm + -1?((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) : nm + -1)); c1++) { for (c2 = 0; c2 <= ((((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)) < nm + -1?((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)) : nm + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = nl; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = nm; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nl + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nl + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = (nl > nm?nl : nm); c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } for (c2 = nj; c2 <= ((((nk + -1 < nl + -1?nk + -1 : nl + -1)) < nm + -1?((nk + -1 < nl + -1?nk + -1 : nl + -1)) : nm + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = (nj > nl?nj : nl); c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = (nj > nm?nj : nm); c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = (((nj > nl?nj : nl)) > nm?((nj > nl?nj : nl)) : nm); c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } for (c2 = nk; c2 <= ((((nj + -1 < nl + -1?nj + -1 : nl + -1)) < nm + -1?((nj + -1 < nl + -1?nj + -1 : nl + -1)) : nm + -1)); c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = (nk > nl?nk : nl); c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = (nk > nm?nk : nm); c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = (((nk > nl?nk : nl)) > nm?((nk > nl?nk : nl)) : nm); c2 <= nj + -1; c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } for (c2 = (nj > nk?nj : nk); c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = (((nj > nk?nj : nk)) > nl?((nj > nk?nj : nk)) : nl); c2 <= nm + -1; c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c2 <= nl + -1; c2++) { D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } } } if (nl <= 0) { #pragma omp parallel for private(c2) for (c1 = 0; c1 <= ((((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) < nm + -1?((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)) : nm + -1)); c1++) { for (c2 = 0; c2 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = nm; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } for (c2 = nj; c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = (nj > nm?nj : nm); c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } for (c2 = nk; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = (nk > nm?nk : nm); c2 <= nj + -1; c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } for (c2 = (nj > nk?nj : nk); c2 <= nm + -1; c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } } } if (nm >= 1) { #pragma omp parallel for private(c2) for (c1 = nm; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)); c1++) { for (c2 = 0; c2 <= nm + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = nm; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } for (c2 = nj; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } for (c2 = nk; c2 <= nj + -1; c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } } } if (nm <= 0) { #pragma omp parallel for private(c2) for (c1 = 0; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nk + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nk + -1)); c1++) { for (c2 = 0; c2 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } for (c2 = nj; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } for (c2 = nk; c2 <= nj + -1; c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } } } if (nj >= 1 && nl >= 1) { #pragma omp parallel for private(c2) for (c1 = nj; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) { for (c2 = 0; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = nl; c2 <= nj + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } for (c2 = nj; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = (nj > nl?nj : nl); c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } for (c2 = nk; c2 <= nl + -1; c2++) { D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } } } if (nj >= 1 && nl <= 0) { #pragma omp parallel for private(c2) for (c1 = nj; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) { for (c2 = 0; c2 <= nj + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } for (c2 = nj; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } } } if (nj >= 1) { #pragma omp parallel for private(c2) for (c1 = (nj > nm?nj : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) { for (c2 = 0; c2 <= nj + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } for (c2 = nj; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } } } if (nj <= 0 && nl >= 1) { #pragma omp parallel for private(c2) for (c1 = 0; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) { for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = nl; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } for (c2 = nk; c2 <= nl + -1; c2++) { D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } } } if (nj <= 0 && nl <= 0) { #pragma omp parallel for private(c2) for (c1 = 0; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) { for (c2 = 0; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } } } if (nj <= 0) { #pragma omp parallel for private(c2) for (c1 = (0 > nm?0 : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) { for (c2 = 0; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } } } if (nk >= 1 && nl >= 1) { #pragma omp parallel for private(c2) for (c1 = nk; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nm + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nm + -1)); c1++) { for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = nl; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = nk; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = (nk > nl?nk : nl); c2 <= nm + -1; c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = nm; c2 <= nl + -1; c2++) { D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } } } if (nk >= 1 && nl <= 0) { #pragma omp parallel for private(c2) for (c1 = nk; c1 <= ((((ni + -1 < nj + -1?ni + -1 : nj + -1)) < nm + -1?((ni + -1 < nj + -1?ni + -1 : nj + -1)) : nm + -1)); c1++) { for (c2 = 0; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = nk; c2 <= nm + -1; c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } } } if (nk >= 1 && nm >= 1) { #pragma omp parallel for private(c2) for (c1 = (nk > nm?nk : nm); c1 <= ((ni + -1 < nj + -1?ni + -1 : nj + -1)); c1++) { for (c2 = 0; c2 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = nm; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } for (c2 = nk; c2 <= nm + -1; c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } } } if (nk >= 1 && nm <= 0) { #pragma omp parallel for private(c2) for (c1 = nk; c1 <= ((ni + -1 < nj + -1?ni + -1 : nj + -1)); c1++) { for (c2 = 0; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } } } if (nk >= 1 && nl >= 1) { #pragma omp parallel for private(c2) for (c1 = (nj > nk?nj : nk); c1 <= ((ni + -1 < nm + -1?ni + -1 : nm + -1)); c1++) { for (c2 = 0; c2 <= ((nk + -1 < nl + -1?nk + -1 : nl + -1)); c2++) { A[c1][c2] = ((double )c1) * c2 / ni; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = nl; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } for (c2 = nk; c2 <= nl + -1; c2++) { D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } } } if (nk >= 1 && nl <= 0) { #pragma omp parallel for private(c2) for (c1 = (nj > nk?nj : nk); c1 <= ((ni + -1 < nm + -1?ni + -1 : nm + -1)); c1++) { for (c2 = 0; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } } } if (nk >= 1) { #pragma omp parallel for private(c2) for (c1 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c1 <= ni + -1; c1++) { for (c2 = 0; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } } } if (nl >= 1) { #pragma omp parallel for private(c2) for (c1 = (0 > ni?0 : ni); c1 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c1++) { for (c2 = 0; c2 <= ((((nj + -1 < nl + -1?nj + -1 : nl + -1)) < nm + -1?((nj + -1 < nl + -1?nj + -1 : nl + -1)) : nm + -1)); c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = nl; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = nm; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = (nl > nm?nl : nm); c2 <= nj + -1; c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } for (c2 = nj; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = (nj > nl?nj : nl); c2 <= nm + -1; c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = (nj > nm?nj : nm); c2 <= nl + -1; c2++) { D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } } } if (nl <= 0) { #pragma omp parallel for private(c2) for (c1 = (0 > ni?0 : ni); c1 <= ((((nj + -1 < nk + -1?nj + -1 : nk + -1)) < nm + -1?((nj + -1 < nk + -1?nj + -1 : nk + -1)) : nm + -1)); c1++) { for (c2 = 0; c2 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = nm; c2 <= nj + -1; c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } for (c2 = nj; c2 <= nm + -1; c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } } } if (nm >= 1) { #pragma omp parallel for private(c2) for (c1 = (ni > nm?ni : nm); c1 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c1++) { for (c2 = 0; c2 <= nm + -1; c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = nm; c2 <= nj + -1; c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } } } if (nm <= 0) { #pragma omp parallel for private(c2) for (c1 = (0 > ni?0 : ni); c1 <= ((nj + -1 < nk + -1?nj + -1 : nk + -1)); c1++) { for (c2 = 0; c2 <= nj + -1; c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } } } if (nj >= 1 && nl >= 1) { #pragma omp parallel for private(c2) for (c1 = (ni > nj?ni : nj); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) { for (c2 = 0; c2 <= ((nj + -1 < nl + -1?nj + -1 : nl + -1)); c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = nl; c2 <= nj + -1; c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } for (c2 = nj; c2 <= nl + -1; c2++) { D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } } } if (nj >= 1 && nl <= 0) { #pragma omp parallel for private(c2) for (c1 = (ni > nj?ni : nj); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) { for (c2 = 0; c2 <= nj + -1; c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } } } if (nj >= 1) { #pragma omp parallel for private(c2) for (c1 = (((ni > nj?ni : nj)) > nm?((ni > nj?ni : nj)) : nm); c1 <= nk + -1; c1++) { for (c2 = 0; c2 <= nj + -1; c2++) { B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } } } if (nk >= 1 && nl >= 1) { #pragma omp parallel for private(c2) for (c1 = (ni > nk?ni : nk); c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) { for (c2 = 0; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = nl; c2 <= nm + -1; c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = nm; c2 <= nl + -1; c2++) { D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } } } if (nk >= 1 && nl <= 0) { #pragma omp parallel for private(c2) for (c1 = (ni > nk?ni : nk); c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) { for (c2 = 0; c2 <= nm + -1; c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } } } if (nk >= 1 && nm >= 1) { #pragma omp parallel for private(c2) for (c1 = (((ni > nk?ni : nk)) > nm?((ni > nk?ni : nk)) : nm); c1 <= nj + -1; c1++) { for (c2 = 0; c2 <= nm + -1; c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } } } if (nk <= 0 && nl >= 1) { #pragma omp parallel for private(c2) for (c1 = 0; c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) { for (c2 = 0; c2 <= ((nl + -1 < nm + -1?nl + -1 : nm + -1)); c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } for (c2 = nl; c2 <= nm + -1; c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } for (c2 = nm; c2 <= nl + -1; c2++) { D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } } } if (nk <= 0 && nl <= 0) { #pragma omp parallel for private(c2) for (c1 = 0; c1 <= ((nj + -1 < nm + -1?nj + -1 : nm + -1)); c1++) { for (c2 = 0; c2 <= nm + -1; c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } } } if (nk <= 0 && nm >= 1) { #pragma omp parallel for private(c2) for (c1 = nm; c1 <= nj + -1; c1++) { for (c2 = 0; c2 <= nm + -1; c2++) { C[c1][c2] = ((double )c1) * (c2 + 3) / nl; } } } if (nj <= 0 && nl >= 1) { #pragma omp parallel for private(c2) for (c1 = (0 > ni?0 : ni); c1 <= ((nk + -1 < nm + -1?nk + -1 : nm + -1)); c1++) { for (c2 = 0; c2 <= nl + -1; c2++) { D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } } } if (nk >= 1 && nl >= 1) { #pragma omp parallel for private(c2) for (c1 = (((ni > nj?ni : nj)) > nk?((ni > nj?ni : nj)) : nk); c1 <= nm + -1; c1++) { for (c2 = 0; c2 <= nl + -1; c2++) { D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } } } if (nk <= 0 && nl >= 1) { #pragma omp parallel for private(c2) for (c1 = (0 > nj?0 : nj); c1 <= nm + -1; c1++) { for (c2 = 0; c2 <= nl + -1; c2++) { D[c1][c2] = ((double )c1) * (c2 + 2) / nk; } } } } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni,int nl,double G[128 + 0][128 + 0]) { int i; int j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf(stderr,"%0.2lf ",G[i][j]); if ((i * ni + j) % 20 == 0) fprintf(stderr,"\n"); } fprintf(stderr,"\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni,int nj,int nk,int nl,int nm,double E[128 + 0][128 + 0],double A[128 + 0][128 + 0],double B[128 + 0][128 + 0],double F[128 + 0][128 + 0],double C[128 + 0][128 + 0],double D[128 + 0][128 + 0],double G[128 + 0][128 + 0]) { //int i; //int j; //int k; //#pragma scop { int c1; int c2; int c5; #pragma omp parallel for private(c2) for (c1 = 0; c1 <= 127; c1++) { for (c2 = 0; c2 <= 127; c2++) { G[c1][c2] = 0; F[c1][c2] = 0; } } #pragma omp parallel for private(c5, c2) for (c1 = 0; c1 <= 127; c1++) { for (c2 = 0; c2 <= 127; c2++) { for (c5 = 0; c5 <= 127; c5++) { F[c1][c2] += C[c1][c5] * D[c5][c2]; } } } #pragma omp parallel for private(c2) for (c1 = 0; c1 <= 127; c1++) { for (c2 = 0; c2 <= 127; c2++) { E[c1][c2] = 0; } } #pragma omp parallel for private(c5, c2) for (c1 = 0; c1 <= 127; c1++) { for (c2 = 0; c2 <= 127; c2++) { for (c5 = 0; c5 <= 127; c5++) { E[c1][c2] += A[c1][c5] * B[c5][c2]; } for (c5 = 0; c5 <= 127; c5++) { G[c1][c5] += E[c1][c2] * F[c2][c5]; } } } } //#pragma endscop } int main(int argc,char **argv) { /* Retrieve problem size. */ int ni = 128; int nj = 128; int nk = 128; int nl = 128; int nm = 128; /* Variable declaration/allocation. */ double (*E)[128 + 0][128 + 0]; E = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double ))))); ; double (*A)[128 + 0][128 + 0]; A = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double ))))); ; double (*B)[128 + 0][128 + 0]; B = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double ))))); ; double (*F)[128 + 0][128 + 0]; F = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double ))))); ; double (*C)[128 + 0][128 + 0]; C = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double ))))); ; double (*D)[128 + 0][128 + 0]; D = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double ))))); ; double (*G)[128 + 0][128 + 0]; G = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double ))))); ; /* Initialize array(s). */ init_array(ni,nj,nk,nl,nm, *A, *B, *C, *D); /* Start timer. */ polybench_timer_start(); ; /* Run kernel. */ kernel_3mm(ni,nj,nk,nl,nm, *E, *A, *B, *F, *C, *D, *G); /* Stop and print timer. */ polybench_timer_stop(); ; polybench_timer_print(); ; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ if (argc > 42 && !strcmp(argv[0],"")) print_array(ni,nl, *G); /* Be clean. */ free(((void *)E)); ; free(((void *)A)); ; free(((void *)B)); ; free(((void *)F)); ; free(((void *)C)); ; free(((void *)D)); ; free(((void *)G)); ; return 0; }
10.c
/* Написать программу, в которой объявить и присвоить начальные значения массиву целых чисел a[30], для инициализации значений использовать генератор случайных чисел. Используя конструкцию omp parallel for и omp atomic вычислить количество элементов массива, числовые значения которых кратны 9. Количество нитей задать самостоятельно. Результат выдать на экран. */ #include <stdio.h> #include <time.h> #include <omp.h> int main(int argc, char *argv[]) { srand(time(NULL)); int a[10]; for (int i = 0; i < 10; i++) a[i] = rand() % 50; int count = 0; #pragma omp parallel for shared(count) num_threads(4) for (int i = 0; i < 10; i++) { if (a[i] % 9 == 0) { printf("%d\n", a[i]); #pragma omp atomic count++; } } printf("Количество элементов, кратных 9: %d", count); }
cher2k.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zher2k.c, normal z -> c, Fri Sep 28 17:38:06 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_her2k * * Performs one of the Hermitian rank 2k operations * * \f[ C = \alpha A \times B^H + conjg( \alpha ) B \times A^H + \beta C, \f] * or * \f[ C = \alpha A^H \times B + conjg( \alpha ) B^H \times A + \beta C, \f] * * where alpha is a complex scalar, beta is a real scalar, * C is an n-by-n Hermitian matrix, and A and B are n-by-k matrices * in the first case and k-by-n matrices in the second case. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: * \f[ C = \alpha A \times B^H * + conjg( \alpha ) B \times A^H + \beta C; \f] * - PlasmaConjTrans: * \f[ C = \alpha A^H \times B * + conjg( \alpha ) B^H \times A + \beta C. \f] * * @param[in] n * The order of the matrix C. n >= zero. * * @param[in] k * If trans = PlasmaNoTrans, number of columns of the A and B matrices; * if trans = PlasmaConjTrans, number of rows of the A and B matrices. * * @param[in] alpha * The scalar alpha. * * @param[in] pA * An lda-by-ka matrix. * If trans = PlasmaNoTrans, ka = k; * if trans = PlasmaConjTrans, ka = n. * * @param[in] lda * The leading dimension of the array A. * If trans = PlasmaNoTrans, lda >= max(1, n); * if trans = PlasmaConjTrans, lda >= max(1, k). * * @param[in] pB * An ldb-by-kb matrix. * If trans = PlasmaNoTrans, kb = k; * if trans = PlasmaConjTrans, kb = n. * * @param[in] ldb * The leading dimension of the array B. * If trans = PlasmaNoTrans, ldb >= max(1, n); * if trans = PlasmaConjTrans, ldb >= max(1, k). * * @param[in] beta * The scalar beta. * * @param[in,out] pC * An ldc-by-n matrix. * On exit, the uplo part of the matrix is overwritten * by the uplo part of the updated matrix. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1, n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_cher2k * @sa plasma_cher2k * ******************************************************************************/ int plasma_cher2k(plasma_enum_t uplo, plasma_enum_t trans, int n, int k, plasma_complex32_t alpha, plasma_complex32_t *pA, int lda, plasma_complex32_t *pB, int ldb, float beta, plasma_complex32_t *pC, int ldc) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if ((trans != PlasmaNoTrans) && (trans != PlasmaConjTrans)) { plasma_error("illegal value of trans"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (k < 0) { plasma_error("illegal value of k"); return -4; } int am, an; int bm, bn; if (trans == PlasmaNoTrans) { am = n; an = k; bm = n; bn = k; } else { am = k; an = n; bm = k; bn = n; } if (lda < imax(1, am)) { plasma_error("illegal value of lda"); return -7; } if (ldb < imax(1, bm)) { plasma_error("illegal value of ldb"); return -9; } if (ldc < imax(1, n)) { plasma_error("illegal value of ldc"); return -12; } // quick return if (n == 0 || ((alpha == 0.0 || k == 0.0) && beta == 1.0)) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_syr2k(plasma, PlasmaComplexFloat, n, k); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; plasma_desc_t C; int retval; retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, am, an, 0, 0, am, an, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, bm, bn, 0, 0, bm, bn, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, n, n, 0, 0, n, n, &C); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); plasma_desc_destroy(&B); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cge2desc(pA, lda, A, &sequence, &request); plasma_omp_cge2desc(pB, ldb, B, &sequence, &request); plasma_omp_cge2desc(pC, ldc, C, &sequence, &request); // Call the tile async function. plasma_omp_cher2k(uplo, trans, alpha, A, B, beta, C, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_cdesc2ge(C, pC, ldc, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); plasma_desc_destroy(&C); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_her2k * * Performs rank 2k update. * Non-blocking tile version of plasma_cher2k(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: * \f[ C = \alpha A \times B^H * + conjg( \alpha ) B \times A^H + \beta C; \f] * - PlasmaConjTrans: * \f[ C = \alpha A^H \times B * + conjg( \alpha ) B^H \times A + \beta C. \f] * * @param[in] alpha * The scalar alpha. * * @param[in] A * Descriptor of matrix A. * *@param[in] B * Descriptor of matrix B. * * @param[in] beta * The scalar beta. * * @param[in,out] C * Descriptor of matrix C. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_cher2k * @sa plasma_omp_cher2k * @sa plasma_omp_cher2k * ******************************************************************************/ void plasma_omp_cher2k(plasma_enum_t uplo, plasma_enum_t trans, plasma_complex32_t alpha, plasma_desc_t A, plasma_desc_t B, float beta, plasma_desc_t C, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((trans != PlasmaNoTrans) && (trans != PlasmaConjTrans)) { plasma_error("illegal value of trans"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid B"); return; } if (plasma_desc_check(C) != PlasmaSuccess) { plasma_error("invalid C"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return int k = trans == PlasmaNoTrans ? A.n : A.m; if (C.m == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0)) return; // Call the parallel function. plasma_pcher2k(uplo, trans, alpha, A, B, beta, C, sequence, request); }
ejercicio9b.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> //#define PRINTF_ALL main(int argc, char **argv) { if(argc < 3) { fprintf(stderr,"Falta fila y columna\n"); exit(-1); } struct timespec; double ncgt,cgt1,cgt2; //para tiempo de ejecución int i,k, f = atoi(argv[1]); int c = atoi(argv[2]); double *v1,*v2; v1 = (double*)malloc(f*sizeof(double)); v2 = (double*)malloc(f*sizeof(double)); double sumalocal=0; double **m; m = (double**)malloc(f*sizeof(double*)); //Inicializo v1 y reservo el espacio para la matriz #pragma omp parallel for for(i=0;i<c;++i){ m[i]=(double*)malloc(c*sizeof(double)); v1[i]=2; } //Inicializo la matriz #pragma omp parallel private(i) { for (i=0; i<f; i++) #pragma omp for for(k=0;k<c;++k) m[i][k]=2; } //Calculo la multiplicacion de la matriz por el vector y obtengo el tiempo for(i=0;i<c;++i) v2[i]=0; #pragma omp parallel private(i) { #pragma omp single cgt1 = omp_get_wtime(); for (i=0; i<f; i++){ #pragma omp single sumalocal=0; #pragma omp for for(k=0;k<c;++k){ #pragma omp atomic sumalocal+=m[i][k]*v1[k]; } #pragma omp single { v2[i]=sumalocal; sumalocal=0; } } #pragma omp single cgt2 = omp_get_wtime(); } ncgt=(cgt2-cgt1)/*/(1.e+9)*/; //Imprimo los resultados #ifdef PRINTF_ALL printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\n",ncgt,f); for (i=0; i<f; i++){ for(k=0;k<c;++k){ printf("/ m[%d][%d]*V1[%d]=v2[%i] (%8.6f*%8.6f=%8.6f) /\n",i,k,k,i,m[i][k],v1[k],v2[i]); } } #else printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\t/ m[0][0]*V1[0]=V2[0](%8.6f+%8.6f=%8.6f) // m[%d][%d]*V1[%d]=V2[%d](%8.6f+%8.6f=%8.6f) /\n", ncgt,f,m[0][0],v1[0],v2[0],f-1,c-1,f-1,f-1,m[f-1][c-1],v1[f-1],v2[f-1]); #endif free(v1); // libera el espacio reservado para v1 free(v2); // libera el espacio reservado para v2 for(i=0;i<c;++i){ free(m[i]); } free(m); // libera el espacio reservado para m }
TimeCluster.h
/****************************************************************************** ** Copyright (c) 2015, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ /** * @file * This file is part of SeisSol. * * @author Alex Breuer (breuer AT mytum.de, http://www5.in.tum.de/wiki/index.php/Dipl.-Math._Alexander_Breuer) * * @section LICENSE * Copyright (c) 2013-2015, SeisSol Group * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @section DESCRIPTION * LTS cluster in SeisSol. **/ #ifndef TIMECLUSTER_H_ #define TIMECLUSTER_H_ #ifdef USE_MPI #include <mpi.h> #include <list> #endif #include <Initializer/typedefs.hpp> #include <SourceTerm/typedefs.hpp> #include <utils/logger.h> #include <Initializer/LTS.h> #include <Initializer/tree/LTSTree.hpp> #include <Kernels/Time.h> #include <Kernels/Local.h> #include <Kernels/Neighbor.h> #include <Kernels/DynamicRupture.h> #include <Kernels/Plasticity.h> #include <Solver/FreeSurfaceIntegrator.h> #include <Monitoring/LoopStatistics.h> #include <Kernels/TimeCommon.h> #ifdef ACL_DEVICE #include <device.h> #include <Solver/Pipeline/DrPipeline.h> #endif namespace seissol { namespace time_stepping { class TimeCluster; } namespace kernels { class ReceiverCluster; } } /** * Time cluster, which represents a collection of elements having the same time step width. **/ class seissol::time_stepping::TimeCluster { public: //! cluster id on this rank const unsigned int m_clusterId; //! global cluster cluster id const unsigned int m_globalClusterId; private: bool usePlasticity; //! number of time steps unsigned long m_numberOfTimeSteps; /* * integrators */ //! time kernel kernels::Time m_timeKernel; //! local kernel kernels::Local m_localKernel; //! neighbor kernel kernels::Neighbor m_neighborKernel; kernels::DynamicRupture m_dynamicRuptureKernel; /* * mesh structure */ struct MeshStructure *m_meshStructure; /* * global data */ //! global data structures GlobalData *m_globalDataOnHost{nullptr}; GlobalData *m_globalDataOnDevice{nullptr}; #ifdef ACL_DEVICE device::DeviceInstance& device = device::DeviceInstance::getInstance(); dr::pipeline::DrPipeline drPipeline; #endif /* * element data and mpi queues */ #ifdef USE_MPI //! pending copy region sends std::list< MPI_Request* > m_sendQueue; //! pending ghost region receives std::list< MPI_Request* > m_receiveQueue; #endif seissol::initializers::TimeCluster* m_clusterData; seissol::initializers::TimeCluster* m_dynRupClusterData; seissol::initializers::LTS* m_lts; seissol::initializers::DynamicRupture* m_dynRup; //! time step width of the performed time step. double m_timeStepWidth; //! Mapping of cells to point sources sourceterm::CellToPointSourcesMapping const* m_cellToPointSources; //! Number of mapping of cells to point sources unsigned m_numberOfCellToPointSourcesMappings; //! Point sources sourceterm::PointSources const* m_pointSources; //! true if dynamic rupture faces are present bool m_dynamicRuptureFaces; enum ComputePart { LocalInterior = 0, NeighborInterior, DRNeighborInterior, #ifdef USE_MPI LocalCopy, NeighborCopy, DRNeighborCopy, #endif DRFrictionLawCopy, DRFrictionLawInterior, PlasticityCheck, PlasticityYield, NUM_COMPUTE_PARTS }; long long m_flops_nonZero[NUM_COMPUTE_PARTS]; long long m_flops_hardware[NUM_COMPUTE_PARTS]; //! Tv parameter for plasticity double m_tv; //! Relax time for plasticity double m_oneMinusIntegratingFactor; //! Stopwatch of TimeManager LoopStatistics* m_loopStatistics; unsigned m_regionComputeLocalIntegration; unsigned m_regionComputeNeighboringIntegration; unsigned m_regionComputeDynamicRupture; kernels::ReceiverCluster* m_receiverCluster; #ifdef USE_MPI /** * Receives the copy layer data from relevant neighboring MPI clusters. **/ void receiveGhostLayer(); /** * Sends the associated regions of the copy layer to relevant neighboring MPI clusters **/ void sendCopyLayer(); #if defined(_OPENMP) && defined(USE_COMM_THREAD) /** * Inits Receives the copy layer data from relevant neighboring MPI clusters, active when using communication thread **/ void initReceiveGhostLayer(); /** * Inits Sends the associated regions of the copy layer to relevant neighboring MPI clusters, active when using communication thread **/ void initSendCopyLayer(); /** * Waits until the initialization of the communication is finished. **/ void waitForInits(); #endif /** * Tests for pending ghost layer communication. **/ bool testForGhostLayerReceives(); /** * Tests for pending copy layer communication. **/ bool testForCopyLayerSends(); #endif /** * Writes the receiver output if applicable (receivers present, receivers have to be written). **/ void writeReceivers(); /** * Computes the source terms if applicable. **/ void computeSources(); /** * Computes dynamic rupture. **/ void computeDynamicRupture( seissol::initializers::Layer& layerData ); /** * Computes all cell local integration. * * This are: * * time integration * * volume integration * * local boundary integration * * Remark: After this step the DOFs are only updated half with the boundary contribution * of the neighborings cells missing. * * @param i_numberOfCells number of cells. * @param i_cellInformation cell local information. * @param i_cellData cell data. * @param io_buffers time integration buffers. * @param io_derivatives time derivatives. * @param io_dofs degrees of freedom. **/ void computeLocalIntegration( seissol::initializers::Layer& i_layerData ); /** * Computes the contribution of the neighboring cells to the boundary integral. * * Remark: After this step (in combination with the local integration) the DOFs are at the next time step. * TODO: This excludes dynamic rupture contribution. * * @param i_numberOfCells number of cells. * @param i_cellInformation cell local information. * @param i_cellData cell data. * @param i_faceNeighbors pointers to neighboring time buffers or derivatives. * @param io_dofs degrees of freedom. **/ void computeNeighboringIntegration( seissol::initializers::Layer& i_layerData ); #ifndef ACL_DEVICE template<bool usePlasticity> std::pair<long, long> computeNeighboringIntegrationImplementation(seissol::initializers::Layer& i_layerData) { SCOREP_USER_REGION( "computeNeighboringIntegration", SCOREP_USER_REGION_TYPE_FUNCTION ) m_loopStatistics->begin(m_regionComputeNeighboringIntegration); real* (*faceNeighbors)[4] = i_layerData.var(m_lts->faceNeighbors); CellDRMapping (*drMapping)[4] = i_layerData.var(m_lts->drMapping); CellLocalInformation* cellInformation = i_layerData.var(m_lts->cellInformation); PlasticityData* plasticity = i_layerData.var(m_lts->plasticity); real (*pstrain)[7 * NUMBER_OF_ALIGNED_BASIS_FUNCTIONS] = i_layerData.var(m_lts->pstrain); unsigned numberOTetsWithPlasticYielding = 0; kernels::NeighborData::Loader loader; loader.load(*m_lts, i_layerData); real *l_timeIntegrated[4]; real *l_faceNeighbors_prefetch[4]; #ifdef _OPENMP #pragma omp parallel for schedule(static) default(none) private(l_timeIntegrated, l_faceNeighbors_prefetch) shared(cellInformation, loader, faceNeighbors, pstrain, i_layerData, plasticity, drMapping) reduction(+:numberOTetsWithPlasticYielding) #endif for( unsigned int l_cell = 0; l_cell < i_layerData.getNumberOfCells(); l_cell++ ) { auto data = loader.entry(l_cell); seissol::kernels::TimeCommon::computeIntegrals(m_timeKernel, data.cellInformation.ltsSetup, data.cellInformation.faceTypes, m_subTimeStart, m_timeStepWidth, faceNeighbors[l_cell], #ifdef _OPENMP *reinterpret_cast<real (*)[4][tensor::I::size()]>(&(m_globalDataOnHost->integrationBufferLTS[omp_get_thread_num()*4*tensor::I::size()])), #else *reinterpret_cast<real (*)[4][tensor::I::size()]>(m_globalData->integrationBufferLTS), #endif l_timeIntegrated); #ifdef ENABLE_MATRIX_PREFETCH l_faceNeighbors_prefetch[0] = (cellInformation[l_cell].faceTypes[1] != FaceType::dynamicRupture) ? faceNeighbors[l_cell][1] : drMapping[l_cell][1].godunov; l_faceNeighbors_prefetch[1] = (cellInformation[l_cell].faceTypes[2] != FaceType::dynamicRupture) ? faceNeighbors[l_cell][2] : drMapping[l_cell][2].godunov; l_faceNeighbors_prefetch[2] = (cellInformation[l_cell].faceTypes[3] != FaceType::dynamicRupture) ? faceNeighbors[l_cell][3] : drMapping[l_cell][3].godunov; // fourth face's prefetches if (l_cell < (i_layerData.getNumberOfCells()-1) ) { l_faceNeighbors_prefetch[3] = (cellInformation[l_cell+1].faceTypes[0] != FaceType::dynamicRupture) ? faceNeighbors[l_cell+1][0] : drMapping[l_cell+1][0].godunov; } else { l_faceNeighbors_prefetch[3] = faceNeighbors[l_cell][3]; } #endif m_neighborKernel.computeNeighborsIntegral( data, drMapping[l_cell], #ifdef ENABLE_MATRIX_PREFETCH l_timeIntegrated, l_faceNeighbors_prefetch #else l_timeIntegrated #endif ); if constexpr (usePlasticity) { numberOTetsWithPlasticYielding += seissol::kernels::Plasticity::computePlasticity( m_oneMinusIntegratingFactor, m_timeStepWidth, m_tv, m_globalDataOnHost, &plasticity[l_cell], data.dofs, pstrain[l_cell] ); } #ifdef INTEGRATE_QUANTITIES seissol::SeisSol::main.postProcessor().integrateQuantities( m_timeStepWidth, i_layerData, l_cell, dofs[l_cell] ); #endif // INTEGRATE_QUANTITIES } const long long nonZeroFlopsPlasticity = i_layerData.getNumberOfCells() * m_flops_nonZero[PlasticityCheck] + numberOTetsWithPlasticYielding * m_flops_nonZero[PlasticityYield]; const long long hardwareFlopsPlasticity = i_layerData.getNumberOfCells() * m_flops_hardware[PlasticityCheck] + numberOTetsWithPlasticYielding * m_flops_hardware[PlasticityYield]; m_loopStatistics->end(m_regionComputeNeighboringIntegration, i_layerData.getNumberOfCells()); return {nonZeroFlopsPlasticity, hardwareFlopsPlasticity}; } #endif // ACL_DEVICE void computeLocalIntegrationFlops(unsigned numberOfCells, CellLocalInformation const* cellInformation, long long& nonZeroFlops, long long& hardwareFlops); void computeNeighborIntegrationFlops( unsigned numberOfCells, CellLocalInformation const* cellInformation, CellDRMapping const (*drMapping)[4], long long& nonZeroFlops, long long& hardwareFlops, long long& drNonZeroFlops, long long& drHardwareFlops ); void computeDynamicRuptureFlops( seissol::initializers::Layer& layerData, long long& nonZeroFlops, long long& hardwareFlops ); void computeFlops(); //! Update relax time for plasticity void updateRelaxTime() { m_oneMinusIntegratingFactor = (m_tv > 0.0) ? 1.0 - exp(-m_timeStepWidth / m_tv) : 1.0; } public: //! flags identifiying if the respective part is allowed to be updated struct { bool localCopy; bool neighboringCopy; bool localInterior; bool neighboringInterior; } m_updatable; #ifdef USE_MPI //! send true LTS buffers volatile bool m_sendLtsBuffers; #endif //! reset lts buffers before performing time predictions volatile bool m_resetLtsBuffers; /* Sub start time of width respect to the next cluster; use 0 if not relevant, for example in GTS. * LTS requires to evaluate a partial time integration of the derivatives. The point zero in time refers to the derivation of the surrounding time derivatives, which * coincides with the last completed time step of the next cluster. The start/end of the time step is the start/end of this clusters time step relative to the zero point. * Example: * <verb> * 5 dt * |-----------------------------------------------------------------------------------------| <<< Time stepping of the next cluster (Cn) (5x larger than the current). * | | | | | | * |*****************|*****************|+++++++++++++++++| | | <<< Status of the current cluster. * | | | | | | * |-----------------|-----------------|-----------------|-----------------|-----------------| <<< Time stepping of the current cluster (Cc). * 0 dt 2dt 3dt 4dt 5dt * </verb> * * In the example above two clusters are illustrated: Cc and Cn. Cc is the current cluster under consideration and Cn the next cluster with respect to LTS terminology. * Cn is currently at time 0 and provided Cc with derivatives valid until 5dt. Cc updated already twice and did its last full update to reach 2dt (== subTimeStart). Next * computeNeighboringCopy is called to accomplish the next full update to reach 3dt (+++). Besides working on the buffers of own buffers and those of previous clusters, * Cc needs to evaluate the time prediction of Cn in the interval [2dt, 3dt]. */ double m_subTimeStart; //! number of full updates the cluster has performed since the last synchronization unsigned int m_numberOfFullUpdates; //! simulation time of the last full update (this is a complete volume and boundary integration) double m_fullUpdateTime; //! final time of the prediction (derivatives and time integrated DOFs). double m_predictionTime; //! time of the next receiver output double m_receiverTime; /** * Constructs a new LTS cluster. * * @param i_clusterId id of this cluster with respect to the current rank. * @param i_globalClusterId global id of this cluster. * @param usePlasticity true if using plasticity * @param i_timeKernel time integration kernel. * @param i_volumeKernel volume integration kernel. * @param i_boundaryKernel boundary integration kernel. * @param i_meshStructure mesh structure of this cluster. * @param i_copyCellInformation cell information in the copy layer. * @param i_interiorCellInformation cell information in the interior. * @param i_globalData global data. * @param i_copyCellData cell data in the copy layer. * @param i_interiorCellData cell data in the interior. * @param i_cells degrees of freedom, time buffers, time derivatives. **/ TimeCluster(unsigned int i_clusterId, unsigned int i_globalClusterId, bool usePlasticity, MeshStructure *i_meshStructure, CompoundGlobalData i_globalData, seissol::initializers::TimeCluster* i_clusterData, seissol::initializers::TimeCluster* i_dynRupClusterData, seissol::initializers::LTS* i_lts, seissol::initializers::DynamicRupture* i_dynRup, LoopStatistics* i_loopStatistics); /** * Destructor of a LTS cluster. * TODO: Currently prints only statistics in debug mode. **/ ~TimeCluster(); double timeStepWidth() const { return m_timeStepWidth; } void setTimeStepWidth(double timestep) { m_timeStepWidth = timestep; updateRelaxTime(); m_dynamicRuptureKernel.setTimeStepWidth(timestep); } /** * Adds a source to the cluster. * * @param i_meshId mesh id of the point of interest. **/ void addSource( unsigned int i_meshId ); /** * Sets the pointer to the cluster's point sources * * @param i_cellToPointSources Contains mappings of 1 cell offset to m point sources * @param i_numberOfCellToPointSourcesMappings Size of i_cellToPointSources * @param i_pointSources pointer to all point sources used on this cluster */ void setPointSources( sourceterm::CellToPointSourcesMapping const* i_cellToPointSources, unsigned i_numberOfCellToPointSourcesMappings, sourceterm::PointSources const* i_pointSources ); void setReceiverCluster( kernels::ReceiverCluster* receiverCluster) { m_receiverCluster = receiverCluster; } /** * Set Tv constant for plasticity. */ void setTv(double tv) { m_tv = tv; updateRelaxTime(); } #ifdef USE_MPI /** * Computes cell local integration of all cells in the copy layer and initiates the corresponding communication. * LTS buffers (updated more than once in general) are reset to zero up on request; GTS-Buffers are reset independently of the request. * * Cell local integration is: * * time integration * * volume integration * * local boundary integration * * @return true if the update (incl. communication requests), false if the update failed due to unfinshed sends of copy data to MPI neighbors. **/ bool computeLocalCopy(); #endif /** * Computes cell local integration of all cells in the interior. * LTS buffers (updated more than once in general) are reset to zero up on request; GTS-Buffers are reset independently of the request. * * Cell local integration is: * * time integration * * volume integration * * local boundary integration **/ void computeLocalInterior(); #ifdef USE_MPI /** * Computes the neighboring contribution to the boundary integral for all cells in the copy layer. * * @return true if the update (incl. communication requests), false if the update failed due to missing data from neighboring ranks. **/ bool computeNeighboringCopy(); #endif /** * Computes the neighboring contribution to the boundary integral for all cells in the interior. **/ void computeNeighboringInterior(); /** * Returns number of cells managed by this cluster. * @return Number of cells */ long getNumberOfCells() const; #if defined(_OPENMP) && defined(USE_MPI) && defined(USE_COMM_THREAD) /** * Tests for pending ghost layer communication, active when using communication thread **/ void pollForGhostLayerReceives(); /** * Polls for pending copy layer communication, active when using communication thread **/ void pollForCopyLayerSends(); /** * Start Receives the copy layer data from relevant neighboring MPI clusters, active when using communication thread **/ void startReceiveGhostLayer(); /** * start Sends the associated regions of the copy layer to relevant neighboring MPI clusters, active when using communication thread **/ void startSendCopyLayer(); #endif }; #endif
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/OpenMPClause.h" #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFENVHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(const Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// otherwise, it is a tag declaration. bool TemplateScope : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype, SourceLocation SwiftNewtypeLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); void ParsePtrauthQualifier(ParsedAttributes &Attrs); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parses OpenMP context selectors and calls \p Callback for each /// successfully parsed context selector. bool parseOpenMPContextSelectors(SourceLocation Loc, SmallVectorImpl<Sema::OMPCtxSelectorData> &Data); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers> MapTypeModifiers; SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers> MapTypeModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLastLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, ParsedType ObjectType, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); ExprResult ParseBuiltinPtrauthTypeDiscriminator(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
t_cholmod_gpu.c
/* ========================================================================== */ /* === GPU/t_cholmod_gpu ==================================================== */ /* ========================================================================== */ /* ----------------------------------------------------------------------------- * CHOLMOD/GPU Module. Copyright (C) 2005-2012, Timothy A. Davis * http://www.suitesparse.com * -------------------------------------------------------------------------- */ /* GPU BLAS template routine for cholmod_super_numeric. */ /* ========================================================================== */ /* === include files and definitions ======================================== */ /* ========================================================================== */ #ifdef GPU_BLAS #include <string.h> #include "cholmod_template.h" #include "cholmod_gpu_kernels.h" #include <fenv.h> #include <cuda.h> #include <cuda_runtime.h> #undef L_ENTRY #ifdef REAL #define L_ENTRY 1 #else #define L_ENTRY 2 #endif /* ========================================================================== */ /* === gpu_clear_memory ===================================================== */ /* ========================================================================== */ /* * Ensure the Lx is zeroed before forming factor. This is a significant cost * in the GPU case - so using this parallel memset code for efficiency. */ void TEMPLATE2 (CHOLMOD (gpu_clear_memory)) ( double* buff, size_t size, int num_threads ) { int chunk_multiplier = 5; int num_chunks = chunk_multiplier * num_threads; size_t chunksize = size / num_chunks; size_t i; #pragma omp parallel for num_threads(num_threads) private(i) schedule(dynamic) for(i = 0; i < num_chunks; i++) { size_t chunkoffset = i * chunksize; if(i == num_chunks - 1) { memset(buff + chunkoffset, 0, (size - chunksize*(num_chunks - 1)) * sizeof(double)); } else { memset(buff + chunkoffset, 0, chunksize * sizeof(double)); } } } /* ========================================================================== */ /* === gpu_init ============================================================= */ /* ========================================================================== */ /* * Performs required initialization for GPU computing. * * Returns 0 if there is an error, so the intended use is * * useGPU = CHOLMOD(gpu_init) * * which would locally turn off gpu processing if the initialization failed. */ int TEMPLATE2 (CHOLMOD (gpu_init)) ( void *Cwork, cholmod_factor *L, cholmod_common *Common, Int nsuper, Int n, Int nls, cholmod_gpu_pointers *gpu_p ) { Int i, k, maxSize ; cublasStatus_t cublasError ; cudaError_t cudaErr ; size_t maxBytesSize, HostPinnedSize ; feenableexcept (FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW ); maxSize = L->maxcsize; /* #define PAGE_SIZE (4*1024) */ CHOLMOD_GPU_PRINTF (("gpu_init : %p\n", (void *) ((size_t) Cwork & ~(4*1024-1)))) ; /* make sure the assumed buffer sizes are large enough */ if ( (nls+2*n+4)*sizeof(Int) > Common->devBuffSize ) { ERROR (CHOLMOD_GPU_PROBLEM,"\n\n" "GPU Memory allocation error. Ls, Map and RelativeMap exceed\n" "devBuffSize. It is not clear if this is due to insufficient\n" "device or host memory or both. You can try:\n" " 1) increasing the amount of GPU memory requested\n" " 2) reducing CHOLMOD_NUM_HOST_BUFFERS\n" " 3) using a GPU & host with more memory\n" "This issue is a known limitation and should be fixed in a \n" "future release of CHOLMOD.\n") ; return (0) ; } /* divvy up the memory in dev_mempool */ gpu_p->d_Lx[0] = Common->dev_mempool; gpu_p->d_Lx[1] = (char*)Common->dev_mempool + Common->devBuffSize; gpu_p->d_C = (char*)Common->dev_mempool + 2 * Common->devBuffSize; gpu_p->d_A[0] = (char*)Common->dev_mempool + 3 * Common->devBuffSize; gpu_p->d_A[1] = (char*)Common->dev_mempool + 4 * Common->devBuffSize; gpu_p->d_Ls = (char*)Common->dev_mempool + 5 * Common->devBuffSize; gpu_p->d_Map = (char*)gpu_p->d_Ls + (nls + 1) * sizeof(Int); gpu_p->d_RelativeMap = (char*)gpu_p->d_Map + (n + 1) * sizeof(Int); /* Copy all of the Ls and Lpi data to the device. If any supernodes are * to be computed on the device then this will be needed, so might as * well do it now. */ cudaErr = cudaMemcpy ( gpu_p->d_Ls, L->s, nls*sizeof(Int), cudaMemcpyHostToDevice ); CHOLMOD_HANDLE_CUDA_ERROR(cudaErr,"cudaMemcpy(d_Ls)"); if (!(Common->gpuStream[0])) { /* ------------------------------------------------------------------ */ /* create each CUDA stream */ /* ------------------------------------------------------------------ */ for ( i=0; i<CHOLMOD_HOST_SUPERNODE_BUFFERS; i++ ) { cudaErr = cudaStreamCreate ( &(Common->gpuStream[i]) ); if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA stream") ; return (0) ; } } /* ------------------------------------------------------------------ */ /* create each CUDA event */ /* ------------------------------------------------------------------ */ for (i = 0 ; i < 3 ; i++) { cudaErr = cudaEventCreateWithFlags (&(Common->cublasEventPotrf [i]), cudaEventDisableTiming) ; if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event") ; return (0) ; } } for (i = 0 ; i < CHOLMOD_HOST_SUPERNODE_BUFFERS ; i++) { cudaErr = cudaEventCreateWithFlags (&(Common->updateCBuffersFree[i]), cudaEventDisableTiming) ; if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event") ; return (0) ; } } cudaErr = cudaEventCreateWithFlags ( &(Common->updateCKernelsComplete), cudaEventDisableTiming ); if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA updateCKernelsComplete event") ; return (0) ; } } gpu_p->h_Lx[0] = (double*)(Common->host_pinned_mempool); for ( k=1; k<CHOLMOD_HOST_SUPERNODE_BUFFERS; k++ ) { gpu_p->h_Lx[k] = (double*)((char *)(Common->host_pinned_mempool) + k*Common->devBuffSize); } return (1); /* initialization successfull, useGPU = 1 */ } /* ========================================================================== */ /* === gpu_reorder_descendants ============================================== */ /* ========================================================================== */ /* Reorder the descendant supernodes as: * 1st - descendant supernodes eligible for processing on the GPU * in increasing (by flops) order * 2nd - supernodes whose processing is to remain on the CPU * in any order * * All of the GPU-eligible supernodes will be scheduled first. All * CPU-eligible descendants will overlap with the last (largest) * CHOLMOD_HOST_SUPERNODE_BUFFERS GPU-eligible descendants. */ typedef int(*__compar_fn_t) (const void *, const void *); void TEMPLATE2 (CHOLMOD (gpu_reorder_descendants)) ( cholmod_common *Common, Int *Super, Int *locals, Int *Lpi, Int *Lpos, Int *Head, Int *Next, Int *Previous, Int *ndescendants, Int *tail, Int *mapCreatedOnGpu, cholmod_gpu_pointers *gpu_p ) { Int prevd, nextd, firstcpu, d, k, kd1, kd2, ndcol, pdi, pdend, pdi1; Int dnext, ndrow2, p; Int n_descendant = 0; double score; /* use h_Lx[0] to buffer the GPU-eligible descendants */ struct cholmod_descendant_score_t* scores = (struct cholmod_descendant_score_t*) gpu_p->h_Lx[0]; double cpuref = 0.0; int nreverse = 1; int previousd; d = Head[*locals]; prevd = -1; firstcpu = -1; *mapCreatedOnGpu = 0; while ( d != EMPTY ) { /* Get the parameters for the current descendant supernode */ kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */ kd2 = Super [d+1] ; ndcol = kd2 - kd1 ; /* # of columns in all of d */ pdi = Lpi [d] ; /* pointer to first row of d in Ls */ pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */ p = Lpos [d] ; /* offset of 1st row of d affecting s */ pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */ ndrow2 = pdend - pdi1; nextd = Next[d]; /* compute a rough flops 'score' for this descendant supernode */ score = ndrow2 * ndcol; if ( ndrow2*L_ENTRY >= CHOLMOD_ND_ROW_LIMIT && ndcol*L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) { score += Common->devBuffSize; } /* place in sort buffer */ scores[n_descendant].score = score; scores[n_descendant].d = d; n_descendant++; d = nextd; } /* Sort the GPU-eligible supernodes */ qsort(scores, n_descendant, sizeof(struct cholmod_descendant_score_t), (__compar_fn_t)CHOLMOD(score_comp)); /* Place sorted data back in descendant supernode linked list*/ if ( n_descendant > 0 ) { Head[*locals] = scores[0].d; if ( n_descendant > 1 ) { #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ if (n_descendant > 64) for ( k=1; k<n_descendant; k++ ) { Next[scores[k-1].d] = scores[k].d; } } Next[scores[n_descendant-1].d] = firstcpu; } /* reverse the first CHOLMOD_HOST_SUPERNODE_BUFFERS to better hide PCIe communications */ if ( Head[*locals] != EMPTY && Next[Head[*locals]] != EMPTY ) { previousd = Head[*locals]; d = Next[Head[*locals]]; while ( d!=EMPTY && nreverse < CHOLMOD_HOST_SUPERNODE_BUFFERS ) { kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */ kd2 = Super [d+1] ; ndcol = kd2 - kd1 ; /* # of columns in all of d */ pdi = Lpi [d] ; /* pointer to first row of d in Ls */ pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */ p = Lpos [d] ; /* offset of 1st row of d affecting s */ pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */ ndrow2 = pdend - pdi1; nextd = Next[d]; nreverse++; if ( ndrow2*L_ENTRY >= CHOLMOD_ND_ROW_LIMIT && ndcol*L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) { /* place this supernode at the front of the list */ Next[previousd] = Next[d]; Next[d] = Head[*locals]; Head[*locals] = d; } else { previousd = d; } d = nextd; } } /* create a 'previous' list so we can traverse backwards */ *ndescendants = 0; if ( Head[*locals] != EMPTY ) { Previous[Head[*locals]] = EMPTY; for (d = Head [*locals] ; d != EMPTY ; d = dnext) { (*ndescendants)++; dnext = Next[d]; if ( dnext != EMPTY ) { Previous[dnext] = d; } else { *tail = d; } } } return; } /* ========================================================================== */ /* === gpu_initialize_supernode ============================================= */ /* ========================================================================== */ /* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except that k1:n-1 */ void TEMPLATE2 (CHOLMOD (gpu_initialize_supernode)) ( cholmod_common *Common, Int nscol, Int nsrow, Int psi, cholmod_gpu_pointers *gpu_p ) { cudaError_t cuErr; /* initialize the device supernode assemby memory to zero */ cuErr = cudaMemset ( gpu_p->d_A[0], 0, nscol*nsrow*L_ENTRY*sizeof(double) ); CHOLMOD_HANDLE_CUDA_ERROR(cuErr,"cudaMemset(d_A)"); /* Create the Map on the device */ createMapOnDevice ( (Int *)(gpu_p->d_Map), (Int *)(gpu_p->d_Ls), psi, nsrow ); return; } /* ========================================================================== */ /* === gpu_updateC ========================================================== */ /* ========================================================================== */ /* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except that k1:n-1 * refers to all of the rows in L, but many of the rows are all zero. * Supernode d holds columns kd1 to kd2-1 of L. Nonzero rows in the range * k1:k2-1 are in the list Ls [pdi1 ... pdi2-1], of size ndrow1. Nonzero rows * in the range k2:n-1 are in the list Ls [pdi2 ... pdend], of size ndrow2. * Let L1 = L (Ls [pdi1 ... pdi2-1], kd1:kd2-1), and let L2 = L (Ls [pdi2 ... * pdend], kd1:kd2-1). C is ndrow2-by-ndrow1. Let C1 be the first ndrow1 * rows of C and let C2 be the last ndrow2-ndrow1 rows of C. Only the lower * triangular part of C1 needs to be computed since C1 is symmetric. * * UpdateC is completely asynchronous w.r.t. the GPU. Once the input buffer * d_Lx[] has been filled, all of the device operations are issues, and the * host can continue with filling the next input buffer / or start processing * all of the descendant supernodes which are not eligible for processing on * the device (since they are too small - will not fill the device). */ int TEMPLATE2 (CHOLMOD (gpu_updateC)) ( Int ndrow1, /* C is ndrow2-by-ndrow2 */ Int ndrow2, Int ndrow, /* leading dimension of Lx */ Int ndcol, /* L1 is ndrow1-by-ndcol */ Int nsrow, Int pdx1, /* L1 starts at Lx + L_ENTRY*pdx1 */ /* L2 starts at Lx + L_ENTRY*(pdx1 + ndrow1) */ Int pdi1, double *Lx, double *C, cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { double *devPtrLx, *devPtrC ; double alpha, beta ; cublasStatus_t cublasStatus ; cudaError_t cudaStat [2] ; Int ndrow3 ; int icol, irow; int iHostBuff, iDevBuff ; #ifndef NTIMER double tstart = 0; #endif if ((ndrow2*L_ENTRY < CHOLMOD_ND_ROW_LIMIT) || (ndcol*L_ENTRY < CHOLMOD_ND_COL_LIMIT)) { /* too small for the CUDA BLAS; use the CPU instead */ return (0) ; } ndrow3 = ndrow2 - ndrow1 ; #ifndef NTIMER Common->syrkStart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_SYRK_CALLS++ ; #endif /* ---------------------------------------------------------------------- */ /* allocate workspace on the GPU */ /* ---------------------------------------------------------------------- */ iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; /* cycle the device Lx buffer, d_Lx, through CHOLMOD_DEVICE_STREAMS, usually 2, so we can overlap the copy of this descendent supernode with the compute of the previous descendant supernode */ devPtrLx = (double *)(gpu_p->d_Lx[iDevBuff]); /* very little overlap between kernels for difference descendant supernodes (since we enforce the supernodes must be large enough to fill the device) so we only need one C buffer */ devPtrC = (double *)(gpu_p->d_C); /* ---------------------------------------------------------------------- */ /* copy Lx to the GPU */ /* ---------------------------------------------------------------------- */ /* copy host data to pinned buffer first for better H2D bandwidth */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) if (ndcol > 32) for ( icol=0; icol<ndcol; icol++ ) { for ( irow=0; irow<ndrow2*L_ENTRY; irow++ ) { gpu_p->h_Lx[iHostBuff][icol*ndrow2*L_ENTRY+irow] = Lx[pdx1*L_ENTRY+icol*ndrow*L_ENTRY + irow]; } } cudaStat[0] = cudaMemcpyAsync ( devPtrLx, gpu_p->h_Lx[iHostBuff], ndrow2*ndcol*L_ENTRY*sizeof(devPtrLx[0]), cudaMemcpyHostToDevice, Common->gpuStream[iDevBuff] ); if ( cudaStat[0] ) { CHOLMOD_GPU_PRINTF ((" ERROR cudaMemcpyAsync = %d \n", cudaStat[0])); return (0); } /* make the current stream wait for kernels in previous streams */ cudaStreamWaitEvent ( Common->gpuStream[iDevBuff], Common->updateCKernelsComplete, 0 ) ; /* ---------------------------------------------------------------------- */ /* create the relative map for this descendant supernode */ /* ---------------------------------------------------------------------- */ createRelativeMapOnDevice ( (Int *)(gpu_p->d_Map), (Int *)(gpu_p->d_Ls), (Int *)(gpu_p->d_RelativeMap), pdi1, ndrow2, &(Common->gpuStream[iDevBuff]) ); /* ---------------------------------------------------------------------- */ /* do the CUDA SYRK */ /* ---------------------------------------------------------------------- */ cublasStatus = cublasSetStream (Common->cublasHandle, Common->gpuStream[iDevBuff]) ; if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream") ; } alpha = 1.0 ; beta = 0.0 ; #ifdef REAL cublasStatus = cublasDsyrk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, (int) ndrow1, (int) ndcol, /* N, K: L1 is ndrow1-by-ndcol */ &alpha, /* ALPHA: 1 */ devPtrLx, ndrow2, /* A, LDA: L1, ndrow2 */ &beta, /* BETA: 0 */ devPtrC, ndrow2) ; /* C, LDC: C1 */ #else cublasStatus = cublasZherk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, (int) ndrow1, (int) ndcol, /* N, K: L1 is ndrow1-by-ndcol*/ &alpha, /* ALPHA: 1 */ (const cuDoubleComplex *) devPtrLx, ndrow2, /* A, LDA: L1, ndrow2 */ &beta, /* BETA: 0 */ (cuDoubleComplex *) devPtrC, ndrow2) ; /* C, LDC: C1 */ #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } #ifndef NTIMER Common->CHOLMOD_GPU_SYRK_TIME += SuiteSparse_time() - Common->syrkStart; #endif /* ---------------------------------------------------------------------- */ /* compute remaining (ndrow2-ndrow1)-by-ndrow1 block of C, C2 = L2*L1' */ /* ---------------------------------------------------------------------- */ #ifndef NTIMER Common->CHOLMOD_GPU_GEMM_CALLS++ ; tstart = SuiteSparse_time(); #endif if (ndrow3 > 0) { #ifndef REAL cuDoubleComplex calpha = {1.0,0.0} ; cuDoubleComplex cbeta = {0.0,0.0} ; #endif /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dgemm */ /* ------------------------------------------------------------------ */ #ifdef REAL alpha = 1.0 ; beta = 0.0 ; cublasStatus = cublasDgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, ndrow3, ndrow1, ndcol, /* M, N, K */ &alpha, /* ALPHA: 1 */ devPtrLx + L_ENTRY*(ndrow1), /* A, LDA: L2*/ ndrow2, /* ndrow */ devPtrLx, /* B, LDB: L1 */ ndrow2, /* ndrow */ &beta, /* BETA: 0 */ devPtrC + L_ENTRY*ndrow1, /* C, LDC: C2 */ ndrow2) ; #else cublasStatus = cublasZgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_C, ndrow3, ndrow1, ndcol, /* M, N, K */ &calpha, /* ALPHA: 1 */ (const cuDoubleComplex*) devPtrLx + ndrow1, ndrow2, /* ndrow */ (const cuDoubleComplex *) devPtrLx, ndrow2, /* ndrow */ &cbeta, /* BETA: 0 */ (cuDoubleComplex *)devPtrC + ndrow1, ndrow2) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } } #ifndef NTIMER Common->CHOLMOD_GPU_GEMM_TIME += SuiteSparse_time() - tstart; #endif /* ------------------------------------------------------------------ */ /* Assemble the update C on the device using the d_RelativeMap */ /* ------------------------------------------------------------------ */ #ifdef REAL addUpdateOnDevice ( gpu_p->d_A[0], devPtrC, gpu_p->d_RelativeMap, ndrow1, ndrow2, nsrow, &(Common->gpuStream[iDevBuff]) ); #else addComplexUpdateOnDevice ( gpu_p->d_A[0], devPtrC, gpu_p->d_RelativeMap, ndrow1, ndrow2, nsrow, &(Common->gpuStream[iDevBuff]) ); #endif /* Record an event indicating that kernels for this descendant are complete */ cudaEventRecord ( Common->updateCKernelsComplete, Common->gpuStream[iDevBuff]); cudaEventRecord ( Common->updateCBuffersFree[iHostBuff], Common->gpuStream[iDevBuff]); return (1) ; } /* ========================================================================== */ /* === gpu_final_assembly =================================================== */ /* ========================================================================== */ /* If the supernode was assembled on both the CPU and the GPU, this will * complete the supernode assembly on both the GPU and CPU. */ void TEMPLATE2 (CHOLMOD (gpu_final_assembly)) ( cholmod_common *Common, double *Lx, Int psx, Int nscol, Int nsrow, int supernodeUsedGPU, int *iHostBuff, int *iDevBuff, cholmod_gpu_pointers *gpu_p ) { Int iidx, i, j; Int iHostBuff2 ; Int iDevBuff2 ; if ( supernodeUsedGPU ) { /* ------------------------------------------------------------------ */ /* Apply all of the Shur-complement updates, computed on the gpu, to */ /* the supernode. */ /* ------------------------------------------------------------------ */ *iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; *iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; if ( nscol * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { /* If this supernode is going to be factored using the GPU (potrf) * then it will need the portion of the update assembled ont the * CPU. So copy that to a pinned buffer an H2D copy to device. */ /* wait until a buffer is free */ cudaEventSynchronize ( Common->updateCBuffersFree[*iHostBuff] ); /* copy update assembled on CPU to a pinned buffer */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j; i<nsrow*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; gpu_p->h_Lx[*iHostBuff][iidx] = Lx[psx*L_ENTRY+iidx]; } } /* H2D transfer of update assembled on CPU */ cudaMemcpyAsync ( gpu_p->d_A[1], gpu_p->h_Lx[*iHostBuff], nscol*nsrow*L_ENTRY*sizeof(double), cudaMemcpyHostToDevice, Common->gpuStream[*iDevBuff] ); } Common->ibuffer++; iHostBuff2 = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; iDevBuff2 = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; /* wait for all kernels to complete */ cudaEventSynchronize( Common->updateCKernelsComplete ); /* copy assembled Schur-complement updates computed on GPU */ cudaMemcpyAsync ( gpu_p->h_Lx[iHostBuff2], gpu_p->d_A[0], nscol*nsrow*L_ENTRY*sizeof(double), cudaMemcpyDeviceToHost, Common->gpuStream[iDevBuff2] ); if ( nscol * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { /* with the current implementation, potrf still uses data from the * CPU - so put the fully assembled supernode in a pinned buffer for * fastest access */ /* need both H2D and D2H copies to be complete */ cudaDeviceSynchronize(); /* sum updates from cpu and device on device */ #ifdef REAL sumAOnDevice ( gpu_p->d_A[1], gpu_p->d_A[0], -1.0, nsrow, nscol ); #else sumComplexAOnDevice ( gpu_p->d_A[1], gpu_p->d_A[0], -1.0, nsrow, nscol ); #endif /* place final assembled supernode in pinned buffer */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nscol*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; gpu_p->h_Lx[*iHostBuff][iidx] -= gpu_p->h_Lx[iHostBuff2][iidx]; } } } else { /* assemble with CPU updates */ cudaDeviceSynchronize(); #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nsrow*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] -= gpu_p->h_Lx[iHostBuff2][iidx]; } } } } return; } /* ========================================================================== */ /* === gpu_lower_potrf ====================================================== */ /* ========================================================================== */ /* Cholesky factorzation (dpotrf) of a matrix S, operating on the lower * triangular part only. S is nscol2-by-nscol2 with leading dimension nsrow. * * S is the top part of the supernode (the lower triangular matrx). * This function also copies the bottom rectangular part of the supernode (B) * onto the GPU, in preparation for gpu_triangular_solve. */ /* * On entry, d_A[1] contains the fully assembled supernode */ int TEMPLATE2 (CHOLMOD (gpu_lower_potrf)) ( Int nscol2, /* S is nscol2-by-nscol2 */ Int nsrow, /* leading dimension of S */ Int psx, /* S is located at Lx + L_ENTRY*psx */ double *Lx, /* contains S; overwritten with Cholesky factor */ Int *info, /* BLAS info return value */ cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { double *devPtrA, *devPtrB, *A ; double alpha, beta ; cudaError_t cudaStat ; cublasStatus_t cublasStatus ; Int j, nsrow2, nb, n, gpu_lda, lda, gpu_ldb ; int ilda, ijb, iinfo ; #ifndef NTIMER double tstart ; #endif if (nscol2 * L_ENTRY < CHOLMOD_POTRF_LIMIT) { /* too small for the CUDA BLAS; use the CPU instead */ return (0) ; } #ifndef NTIMER tstart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_POTRF_CALLS++ ; #endif nsrow2 = nsrow - nscol2 ; /* ---------------------------------------------------------------------- */ /* heuristic to get the block size depending of the problem size */ /* ---------------------------------------------------------------------- */ nb = 128 ; if (nscol2 > 4096) nb = 256 ; if (nscol2 > 8192) nb = 384 ; n = nscol2 ; gpu_lda = ((nscol2+31)/32)*32 ; lda = nsrow ; A = gpu_p->h_Lx[(Common->ibuffer+CHOLMOD_HOST_SUPERNODE_BUFFERS-1)% CHOLMOD_HOST_SUPERNODE_BUFFERS]; /* ---------------------------------------------------------------------- */ /* determine the GPU leading dimension of B */ /* ---------------------------------------------------------------------- */ gpu_ldb = 0 ; if (nsrow2 > 0) { gpu_ldb = ((nsrow2+31)/32)*32 ; } /* ---------------------------------------------------------------------- */ /* remember where device memory is, to be used by triangular solve later */ /* ---------------------------------------------------------------------- */ devPtrA = gpu_p->d_Lx[0]; devPtrB = gpu_p->d_Lx[1]; /* ---------------------------------------------------------------------- */ /* copy A from device to device */ /* ---------------------------------------------------------------------- */ cudaStat = cudaMemcpy2DAsync ( devPtrA, gpu_lda * L_ENTRY * sizeof (devPtrA[0]), gpu_p->d_A[1], nsrow * L_ENTRY * sizeof (Lx[0]), nscol2 * L_ENTRY * sizeof (devPtrA[0]), nscol2, cudaMemcpyDeviceToDevice, Common->gpuStream[0] ); if ( cudaStat ) { ERROR ( CHOLMOD_GPU_PROBLEM, "GPU memcopy device to device"); } /* ---------------------------------------------------------------------- */ /* copy B in advance, for gpu_triangular_solve */ /* ---------------------------------------------------------------------- */ if (nsrow2 > 0) { cudaStat = cudaMemcpy2DAsync (devPtrB, gpu_ldb * L_ENTRY * sizeof (devPtrB [0]), gpu_p->d_A[1] + L_ENTRY*nscol2, nsrow * L_ENTRY * sizeof (Lx [0]), nsrow2 * L_ENTRY * sizeof (devPtrB [0]), nscol2, cudaMemcpyDeviceToDevice, Common->gpuStream[0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } } /* ------------------------------------------------------------------ */ /* define the dpotrf stream */ /* ------------------------------------------------------------------ */ cublasStatus = cublasSetStream (Common->cublasHandle, Common->gpuStream [0]) ; if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream") ; } /* ---------------------------------------------------------------------- */ /* block Cholesky factorization of S */ /* ---------------------------------------------------------------------- */ for (j = 0 ; j < n ; j += nb) { Int jb = nb < (n-j) ? nb : (n-j) ; /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dsyrk */ /* ------------------------------------------------------------------ */ alpha = -1.0 ; beta = 1.0 ; #ifdef REAL cublasStatus = cublasDsyrk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, jb, j, &alpha, devPtrA + j, gpu_lda, &beta, devPtrA + j + j*gpu_lda, gpu_lda) ; #else cublasStatus = cublasZherk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, jb, j, &alpha, (cuDoubleComplex*)devPtrA + j, gpu_lda, &beta, (cuDoubleComplex*)devPtrA + j + j*gpu_lda, gpu_lda) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* ------------------------------------------------------------------ */ cudaStat = cudaEventRecord (Common->cublasEventPotrf [0], Common->gpuStream [0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaStreamWaitEvent (Common->gpuStream [1], Common->cublasEventPotrf [0], 0) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } /* ------------------------------------------------------------------ */ /* copy back the jb columns on two different streams */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync (A + L_ENTRY*(j + j*lda), lda * L_ENTRY * sizeof (double), devPtrA + L_ENTRY*(j + j*gpu_lda), gpu_lda * L_ENTRY * sizeof (double), L_ENTRY * sizeof (double)*jb, jb, cudaMemcpyDeviceToHost, Common->gpuStream [1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy from device") ; } /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dgemm */ /* ------------------------------------------------------------------ */ if ((j+jb) < n) { #ifdef REAL alpha = -1.0 ; beta = 1.0 ; cublasStatus = cublasDgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, (n-j-jb), jb, j, &alpha, devPtrA + (j+jb), gpu_lda, devPtrA + (j) , gpu_lda, &beta, devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #else cuDoubleComplex calpha = {-1.0,0.0} ; cuDoubleComplex cbeta = { 1.0,0.0} ; cublasStatus = cublasZgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_C, (n-j-jb), jb, j, &calpha, (cuDoubleComplex*)devPtrA + (j+jb), gpu_lda, (cuDoubleComplex*)devPtrA + (j), gpu_lda, &cbeta, (cuDoubleComplex*)devPtrA + (j+jb + j*gpu_lda), gpu_lda ) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } } cudaStat = cudaStreamSynchronize (Common->gpuStream [1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } /* ------------------------------------------------------------------ */ /* compute the Cholesky factorization of the jbxjb block on the CPU */ /* ------------------------------------------------------------------ */ ilda = (int) lda ; ijb = jb ; #ifdef REAL LAPACK_DPOTRF ("L", &ijb, A + L_ENTRY * (j + j*lda), &ilda, &iinfo) ; #else LAPACK_ZPOTRF ("L", &ijb, A + L_ENTRY * (j + j*lda), &ilda, &iinfo) ; #endif *info = iinfo ; if (*info != 0) { *info = *info + j ; break ; } /* ------------------------------------------------------------------ */ /* copy the result back to the GPU */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync (devPtrA + L_ENTRY*(j + j*gpu_lda), gpu_lda * L_ENTRY * sizeof (double), A + L_ENTRY * (j + j*lda), lda * L_ENTRY * sizeof (double), L_ENTRY * sizeof (double) * jb, jb, cudaMemcpyHostToDevice, Common->gpuStream [0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dtrsm */ /* ------------------------------------------------------------------ */ if ((j+jb) < n) { #ifdef REAL alpha = 1.0 ; cublasStatus = cublasDtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, (n-j-jb), jb, &alpha, devPtrA + (j + j*gpu_lda), gpu_lda, devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #else cuDoubleComplex calpha = {1.0,0.0}; cublasStatus = cublasZtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_C, CUBLAS_DIAG_NON_UNIT, (n-j-jb), jb, &calpha, (cuDoubleComplex *)devPtrA + (j + j*gpu_lda), gpu_lda, (cuDoubleComplex *)devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* -------------------------------------------------------------- */ /* Copy factored column back to host. */ /* -------------------------------------------------------------- */ cudaStat = cudaEventRecord (Common->cublasEventPotrf[2], Common->gpuStream[0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaStreamWaitEvent (Common->gpuStream[1], Common->cublasEventPotrf[2], 0) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaMemcpy2DAsync (A + L_ENTRY*(j + jb + j * lda), lda * L_ENTRY * sizeof (double), devPtrA + L_ENTRY* (j + jb + j * gpu_lda), gpu_lda * L_ENTRY * sizeof (double), L_ENTRY * sizeof (double)* (n - j - jb), jb, cudaMemcpyDeviceToHost, Common->gpuStream[1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } } } #ifndef NTIMER Common->CHOLMOD_GPU_POTRF_TIME += SuiteSparse_time ( ) - tstart ; #endif return (1) ; } /* ========================================================================== */ /* === gpu_triangular_solve ================================================= */ /* ========================================================================== */ /* The current supernode is columns k1 to k2-1 of L. Let L1 be the diagonal * block (factorized by dpotrf/zpotrf above; rows/cols k1:k2-1), and L2 be rows * k2:n-1 and columns k1:k2-1 of L. The triangular system to solve is L2*L1' = * S2, where S2 is overwritten with L2. More precisely, L2 = S2 / L1' in * MATLAB notation. */ /* Version with pre-allocation in POTRF */ int TEMPLATE2 (CHOLMOD (gpu_triangular_solve)) ( Int nsrow2, /* L1 and S2 are nsrow2-by-nscol2 */ Int nscol2, /* L1 is nscol2-by-nscol2 */ Int nsrow, /* leading dimension of L1, L2, and S2 */ Int psx, /* L1 is at Lx+L_ENTRY*psx; * L2 at Lx+L_ENTRY*(psx+nscol2)*/ double *Lx, /* holds L1, L2, and S2 */ cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { double *devPtrA, *devPtrB ; cudaError_t cudaStat ; cublasStatus_t cublasStatus ; Int gpu_lda, gpu_ldb, gpu_rowstep ; Int gpu_row_start = 0 ; Int gpu_row_max_chunk, gpu_row_chunk; int ibuf = 0; int iblock = 0; int iHostBuff = (Common->ibuffer+CHOLMOD_HOST_SUPERNODE_BUFFERS-1) % CHOLMOD_HOST_SUPERNODE_BUFFERS; int i, j; Int iidx; int iwrap; #ifndef NTIMER double tstart ; #endif #ifdef REAL double alpha = 1.0 ; gpu_row_max_chunk = 768; #else cuDoubleComplex calpha = {1.0,0.0} ; gpu_row_max_chunk = 256; #endif if ( nsrow2 <= 0 ) { return (0) ; } #ifndef NTIMER tstart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_TRSM_CALLS++ ; #endif gpu_lda = ((nscol2+31)/32)*32 ; gpu_ldb = ((nsrow2+31)/32)*32 ; devPtrA = gpu_p->d_Lx[0]; devPtrB = gpu_p->d_Lx[1]; /* make sure the copy of B has completed */ cudaStreamSynchronize( Common->gpuStream[0] ); /* ---------------------------------------------------------------------- */ /* do the CUDA BLAS dtrsm */ /* ---------------------------------------------------------------------- */ while ( gpu_row_start < nsrow2 ) { gpu_row_chunk = nsrow2 - gpu_row_start; if ( gpu_row_chunk > gpu_row_max_chunk ) { gpu_row_chunk = gpu_row_max_chunk; } cublasStatus = cublasSetStream ( Common->cublasHandle, Common->gpuStream[ibuf] ); if ( cublasStatus != CUBLAS_STATUS_SUCCESS ) { ERROR ( CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream"); } #ifdef REAL cublasStatus = cublasDtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, gpu_row_chunk, nscol2, &alpha, devPtrA, gpu_lda, devPtrB + gpu_row_start, gpu_ldb) ; #else cublasStatus = cublasZtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_C, CUBLAS_DIAG_NON_UNIT, gpu_row_chunk, nscol2, &calpha, (const cuDoubleComplex *) devPtrA, gpu_lda, (cuDoubleComplex *)devPtrB + gpu_row_start , gpu_ldb) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* ------------------------------------------------------------------ */ /* copy result back to the CPU */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync ( gpu_p->h_Lx[iHostBuff] + L_ENTRY*(nscol2+gpu_row_start), nsrow * L_ENTRY * sizeof (Lx [0]), devPtrB + L_ENTRY*gpu_row_start, gpu_ldb * L_ENTRY * sizeof (devPtrB [0]), gpu_row_chunk * L_ENTRY * sizeof (devPtrB [0]), nscol2, cudaMemcpyDeviceToHost, Common->gpuStream[ibuf]); if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy from device") ; } cudaEventRecord ( Common->updateCBuffersFree[ibuf], Common->gpuStream[ibuf] ); gpu_row_start += gpu_row_chunk; ibuf++; ibuf = ibuf % CHOLMOD_HOST_SUPERNODE_BUFFERS; iblock ++; if ( iblock >= CHOLMOD_HOST_SUPERNODE_BUFFERS ) { Int gpu_row_start2 ; Int gpu_row_end ; /* then CHOLMOD_HOST_SUPERNODE_BUFFERS worth of work has been * scheduled, so check for completed events and copy result into * Lx before continuing. */ cudaEventSynchronize ( Common->updateCBuffersFree [iblock%CHOLMOD_HOST_SUPERNODE_BUFFERS] ); /* copy into Lx */ gpu_row_start2 = nscol2 + (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS) *gpu_row_max_chunk; gpu_row_end = gpu_row_start2+gpu_row_max_chunk; if ( gpu_row_end > nsrow ) gpu_row_end = nsrow; #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=gpu_row_start2*L_ENTRY; i<gpu_row_end*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } } /* Convenient to copy the L1 block here */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private ( iidx ) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=j*L_ENTRY; i<nscol2*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY + i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } /* now account for the last HSTREAMS buffers */ for ( iwrap=0; iwrap<CHOLMOD_HOST_SUPERNODE_BUFFERS; iwrap++ ) { int i, j; Int gpu_row_start2 = nscol2 + (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS) *gpu_row_max_chunk; if (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS >= 0 && gpu_row_start2 < nsrow ) { Int iidx; Int gpu_row_end = gpu_row_start2+gpu_row_max_chunk; if ( gpu_row_end > nsrow ) gpu_row_end = nsrow; cudaEventSynchronize ( Common->updateCBuffersFree [iblock%CHOLMOD_HOST_SUPERNODE_BUFFERS] ); /* copy into Lx */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=gpu_row_start2*L_ENTRY; i<gpu_row_end*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } iblock++; } /* ---------------------------------------------------------------------- */ /* return */ /* ---------------------------------------------------------------------- */ #ifndef NTIMER Common->CHOLMOD_GPU_TRSM_TIME += SuiteSparse_time ( ) - tstart ; #endif return (1) ; } /* ========================================================================== */ /* === gpu_copy_supernode =================================================== */ /* ========================================================================== */ /* * In the event gpu_triangular_sovle is not needed / called, this routine * copies the factored diagonal block from the GPU to the CPU. */ void TEMPLATE2 (CHOLMOD (gpu_copy_supernode)) ( cholmod_common *Common, double *Lx, Int psx, Int nscol, Int nscol2, Int nsrow, int supernodeUsedGPU, int iHostBuff, cholmod_gpu_pointers *gpu_p ) { Int iidx, i, j; if ( supernodeUsedGPU && nscol2 * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { cudaDeviceSynchronize(); #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx,i,j) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nscol*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } return; } #endif #undef REAL #undef COMPLEX #undef ZOMPLEX
multisort.c
#include <malloc.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6f\n",(_m), stamp); // N and MIN must be powers of 2 long N; long MIN_SORT_SIZE; long MIN_MERGE_SIZE; #define BLOCK_SIZE 1024L #define T int void basicsort(long n, T data[n]); void basicmerge(long n, T left[n], T right[n], T result[n*2], long start, long length); void merge(long n, T left[n], T right[n], T result[n*2], long start, long length) { if (length < MIN_MERGE_SIZE*2L) { // Base case basicmerge(n, left, right, result, start, length); } else { // Recursive decomposition merge(n, left, right, result, start, length/2); merge(n, left, right, result, start + length/2, length/2); } } void multisort(long n, T data[n], T tmp[n]) { if (n >= MIN_SORT_SIZE*4L) { // Recursive decomposition multisort(n/4L, &data[0], &tmp[0]); multisort(n/4L, &data[n/4L], &tmp[n/4L]); multisort(n/4L, &data[n/2L], &tmp[n/2L]); multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L]); merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L); merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L); merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n); } else { // Base case // #pragma omp task basicsort(n, data); } } static void initialize(long length, T data[length]) { long i; for (i = 0; i < length; i++) { if (i==0) { data[i] = rand(); } else { data[i] = ((data[i-1]+1) * i * 104723L) % N; } } } static void clear(long length, T data[length]) { long i; for (i = 0; i < length; i++) { data[i] = 0; } } void check_sorted(long n, T data[n]) { int unsorted=0; for (int i=1; i<n; i++) if (data[i-1] > data[i]) unsorted++; if (unsorted > 0) printf ("\nERROR: data is NOT properly sorted. There are %d unordered positions\n\n",unsorted); else { // printf ("data IS ordered; "); } } int main(int argc, char **argv) { if (argc != 4) { fprintf(stderr, "Usage: %s <vector size in K> <sort size in K> <merge size in K>\n", argv[0]); return 1; } N = atol(argv[1]) * BLOCK_SIZE; MIN_SORT_SIZE = atol(argv[2]) * BLOCK_SIZE; MIN_MERGE_SIZE = atol(argv[3]) * BLOCK_SIZE; T *data = malloc(N*sizeof(T)); T *tmp = malloc(N*sizeof(T)); double stamp; START_COUNT_TIME; initialize(N, data); clear(N, tmp); STOP_COUNT_TIME("Initialization time in seconds"); START_COUNT_TIME; // #pragma omp parallel // #pragma omp single multisort(N, data, tmp); STOP_COUNT_TIME("Multisort execution time"); START_COUNT_TIME; check_sorted (N, data); STOP_COUNT_TIME("Check sorted data execution time"); fprintf(stdout, "Multisort program finished\n"); return 0; }
linalg.h
/** * Copyright (c) 2020, Massachusetts Institute of Technology, * Cambridge, MA 02139 * All Rights Reserved * Authors: Jingnan Shi, et al. (see THANKS for the full author list) * See LICENSE for the license information */ #pragma once #include <iostream> #include <Eigen/Core> #include <Eigen/SparseCore> #include <Eigen/Eigenvalues> namespace teaser { /** * Return the hat map of the provided vector (a skew symmetric matrix). * @param u 3-by-1 vector * @param x 3-by-3 skew symmetric matrix */ Eigen::Matrix<float, 3, 3> hatmap(const Eigen::Matrix<float, 3, 1>& u) { Eigen::Matrix<float, 3, 3> x; // clang-format off x << 0, -u(2), u(1), u(2), 0, -u(0), -u(1), u(0), 0; // clang-format on return x; } /** * Vector-vector kronecker product function with fixed-size output * @tparam NumT * @tparam N size of the first vector * @tparam M size of the second vector * @param v1 [in] first vector * @param v2 [in] second vector * @param output [out] output vector */ template <typename NumT, int N, int M> void vectorKron(const Eigen::Matrix<NumT, N, 1>& v1, const Eigen::Matrix<NumT, M, 1>& v2, Eigen::Matrix<NumT, N * M, 1>* output) { #pragma omp parallel for collapse(2) shared(v1, v2, output) default(none) for (size_t i = 0; i < N; ++i) { for (size_t j = 0; j < M; ++j) { (*output)[i * M + j] = v1[i] * v2[j]; } } } /** * Vector-vector kronecker product function with dynamic-size output * @tparam NumT numerical type for Eigen matrices (float, float, etc.) * @param v1 [in] first vector * @param v2 [in] second vector * @return Result of kronecker product */ template <typename NumT, int N, int M> Eigen::Matrix<NumT, Eigen::Dynamic, 1> vectorKron(const Eigen::Matrix<NumT, N, 1>& v1, const Eigen::Matrix<NumT, M, 1>& v2) { Eigen::Matrix<float, Eigen::Dynamic, 1> output(v1.rows() * v2.rows(), 1); #pragma omp parallel for collapse(2) shared(v1, v2, output) default(none) for (size_t i = 0; i < v1.rows(); ++i) { for (size_t j = 0; j < v2.rows(); ++j) { output[i * v2.rows() + j] = v1[i] * v2[j]; } } return output; } /** * Find the nearest (in Frobenius norm) Symmetric Positive Definite matrix to A * * See: https://www.sciencedirect.com/science/article/pii/0024379588902236 * * @tparam NumT numerical type for Eigen matrices (float, float, etc.) * @param A [in] input matrix * @param nearestPSD [out] output neaest positive semi-definite matrix * @param eig_threshold [in] optional threshold of determining the smallest eigen values */ template <typename NumT> void getNearestPSD(const Eigen::Matrix<NumT, Eigen::Dynamic, Eigen::Dynamic>& A, Eigen::Matrix<NumT, Eigen::Dynamic, Eigen::Dynamic>* nearestPSD) { assert(A.rows() == A.cols()); nearestPSD->resize(A.rows(), A.cols()); // symmetrize A into B Eigen::MatrixXf B = (A + A.transpose()) / 2; // eigendecomposition of B Eigen::SelfAdjointEigenSolver<Eigen::MatrixXf> eig_B(B); Eigen::VectorXf De = eig_B.eigenvalues(); Eigen::MatrixXf De_positive = (De.array() < 0).select(0, De).asDiagonal(); Eigen::MatrixXf Ve = eig_B.eigenvectors(); *nearestPSD = Ve * De_positive * Ve.transpose(); } } // namespace teaser
QLA_D3_D_vpeq_spproj_M_times_pD.c
/**************** QLA_D3_D_vpeq_spproj_M_times_pD.c ********************/ #include <stdio.h> #include <qla_config.h> #include <qla_types.h> #include <qla_random.h> #include <qla_cmath.h> #include <qla_d3.h> #include <math.h> static void start_slice(){ __asm__ __volatile__ (""); } static void end_slice(){ __asm__ __volatile__ (""); } void QLA_D3_D_vpeq_spproj_M_times_pD ( QLA_D3_DiracFermion *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_DiracFermion *restrict *b, int mu, int sign , int n) { start_slice(); #ifdef HAVE_XLC #pragma disjoint(*r,*a,**b) __alignx(16,r); __alignx(16,a); #endif if(sign==1) { switch(mu) { case 0: { #pragma omp parallel for for(int i=0; i<n; i++) { #ifdef HAVE_XLC __alignx(16,b[i]); #endif QLA_D3_HalfFermion t1; QLA_D3_HalfFermion t2; { for(int i_c=0; i_c<3; i_c++) { QLA_c_eq_c_plus_ic(QLA_D3_elem_H(t1,i_c,0), QLA_D3_elem_D(*b[i],i_c,0), QLA_D3_elem_D(*b[i],i_c,3)); QLA_c_eq_c_plus_ic(QLA_D3_elem_H(t1,i_c,1), QLA_D3_elem_D(*b[i],i_c,1), QLA_D3_elem_D(*b[i],i_c,2)); } } { for(int i_c=0; i_c<3; i_c++) { for(int i_s=0; i_s<2; i_s++) { QLA_D_Complex x; QLA_c_eq_r(x,0.); for(int k_c=0; k_c<3; k_c++) { QLA_c_peq_c_times_c(x, QLA_D3_elem_M(a[i],i_c,k_c), QLA_D3_elem_H(t1,k_c,i_s)); } QLA_c_eq_c(QLA_D3_elem_H(t2,i_c,i_s),x); } } } { for(int i_c=0; i_c<3; i_c++) { QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,0), QLA_D3_elem_H(t2,i_c,0)); QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,1), QLA_D3_elem_H(t2,i_c,1)); QLA_c_meq_ic(QLA_D3_elem_D(r[i],i_c,2), QLA_D3_elem_H(t2,i_c,1)); QLA_c_meq_ic(QLA_D3_elem_D(r[i],i_c,3), QLA_D3_elem_H(t2,i_c,0)); } } } } break; case 1: { #pragma omp parallel for for(int i=0; i<n; i++) { #ifdef HAVE_XLC __alignx(16,b[i]); #endif QLA_D3_HalfFermion t1; QLA_D3_HalfFermion t2; { for(int i_c=0; i_c<3; i_c++) { QLA_c_eq_c_minus_c(QLA_D3_elem_H(t1,i_c,0), QLA_D3_elem_D(*b[i],i_c,0), QLA_D3_elem_D(*b[i],i_c,3)); QLA_c_eq_c_plus_c(QLA_D3_elem_H(t1,i_c,1), QLA_D3_elem_D(*b[i],i_c,1), QLA_D3_elem_D(*b[i],i_c,2)); } } { for(int i_c=0; i_c<3; i_c++) { for(int i_s=0; i_s<2; i_s++) { QLA_D_Complex x; QLA_c_eq_r(x,0.); for(int k_c=0; k_c<3; k_c++) { QLA_c_peq_c_times_c(x, QLA_D3_elem_M(a[i],i_c,k_c), QLA_D3_elem_H(t1,k_c,i_s)); } QLA_c_eq_c(QLA_D3_elem_H(t2,i_c,i_s),x); } } } { for(int i_c=0; i_c<3; i_c++) { QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,0), QLA_D3_elem_H(t2,i_c,0)); QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,1), QLA_D3_elem_H(t2,i_c,1)); QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,2), QLA_D3_elem_H(t2,i_c,1)); QLA_c_meq_c(QLA_D3_elem_D(r[i],i_c,3), QLA_D3_elem_H(t2,i_c,0)); } } } } break; case 2: { #pragma omp parallel for for(int i=0; i<n; i++) { #ifdef HAVE_XLC __alignx(16,b[i]); #endif QLA_D3_HalfFermion t1; QLA_D3_HalfFermion t2; { for(int i_c=0; i_c<3; i_c++) { QLA_c_eq_c_plus_ic(QLA_D3_elem_H(t1,i_c,0), QLA_D3_elem_D(*b[i],i_c,0), QLA_D3_elem_D(*b[i],i_c,2)); QLA_c_eq_c_minus_ic(QLA_D3_elem_H(t1,i_c,1), QLA_D3_elem_D(*b[i],i_c,1), QLA_D3_elem_D(*b[i],i_c,3)); } } { for(int i_c=0; i_c<3; i_c++) { for(int i_s=0; i_s<2; i_s++) { QLA_D_Complex x; QLA_c_eq_r(x,0.); for(int k_c=0; k_c<3; k_c++) { QLA_c_peq_c_times_c(x, QLA_D3_elem_M(a[i],i_c,k_c), QLA_D3_elem_H(t1,k_c,i_s)); } QLA_c_eq_c(QLA_D3_elem_H(t2,i_c,i_s),x); } } } { for(int i_c=0; i_c<3; i_c++) { QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,0), QLA_D3_elem_H(t2,i_c,0)); QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,1), QLA_D3_elem_H(t2,i_c,1)); QLA_c_meq_ic(QLA_D3_elem_D(r[i],i_c,2), QLA_D3_elem_H(t2,i_c,0)); QLA_c_peq_ic(QLA_D3_elem_D(r[i],i_c,3), QLA_D3_elem_H(t2,i_c,1)); } } } } break; case 3: { #pragma omp parallel for for(int i=0; i<n; i++) { #ifdef HAVE_XLC __alignx(16,b[i]); #endif QLA_D3_HalfFermion t1; QLA_D3_HalfFermion t2; { for(int i_c=0; i_c<3; i_c++) { QLA_c_eq_c_plus_c(QLA_D3_elem_H(t1,i_c,0), QLA_D3_elem_D(*b[i],i_c,0), QLA_D3_elem_D(*b[i],i_c,2)); QLA_c_eq_c_plus_c(QLA_D3_elem_H(t1,i_c,1), QLA_D3_elem_D(*b[i],i_c,1), QLA_D3_elem_D(*b[i],i_c,3)); } } { for(int i_c=0; i_c<3; i_c++) { for(int i_s=0; i_s<2; i_s++) { QLA_D_Complex x; QLA_c_eq_r(x,0.); for(int k_c=0; k_c<3; k_c++) { QLA_c_peq_c_times_c(x, QLA_D3_elem_M(a[i],i_c,k_c), QLA_D3_elem_H(t1,k_c,i_s)); } QLA_c_eq_c(QLA_D3_elem_H(t2,i_c,i_s),x); } } } { for(int i_c=0; i_c<3; i_c++) { QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,0), QLA_D3_elem_H(t2,i_c,0)); QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,1), QLA_D3_elem_H(t2,i_c,1)); QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,2), QLA_D3_elem_H(t2,i_c,0)); QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,3), QLA_D3_elem_H(t2,i_c,1)); } } } } break; case 4: { #pragma omp parallel for for(int i=0; i<n; i++) { #ifdef HAVE_XLC __alignx(16,b[i]); #endif QLA_D3_HalfFermion t1; QLA_D3_HalfFermion t2; { for(int i_c=0; i_c<3; i_c++) { QLA_c_eq_c(QLA_D3_elem_H(t1,i_c,0), QLA_D3_elem_D(*b[i],i_c,0)); QLA_c_eq_c(QLA_D3_elem_H(t1,i_c,1), QLA_D3_elem_D(*b[i],i_c,1)); } } { for(int i_c=0; i_c<3; i_c++) { for(int i_s=0; i_s<2; i_s++) { QLA_D_Complex x; QLA_c_eq_r(x,0.); for(int k_c=0; k_c<3; k_c++) { QLA_c_peq_c_times_c(x, QLA_D3_elem_M(a[i],i_c,k_c), QLA_D3_elem_H(t1,k_c,i_s)); } QLA_c_eq_c(QLA_D3_elem_H(t2,i_c,i_s),x); } } } { for(int i_c=0; i_c<3; i_c++) { QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,0), QLA_D3_elem_H(t2,i_c,0)); QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,1), QLA_D3_elem_H(t2,i_c,1)); } } } } break; } } else { switch(mu) { case 0: { #pragma omp parallel for for(int i=0; i<n; i++) { #ifdef HAVE_XLC __alignx(16,b[i]); #endif QLA_D3_HalfFermion t1; QLA_D3_HalfFermion t2; { for(int i_c=0; i_c<3; i_c++) { QLA_c_eq_c_minus_ic(QLA_D3_elem_H(t1,i_c,0), QLA_D3_elem_D(*b[i],i_c,0), QLA_D3_elem_D(*b[i],i_c,3)); QLA_c_eq_c_minus_ic(QLA_D3_elem_H(t1,i_c,1), QLA_D3_elem_D(*b[i],i_c,1), QLA_D3_elem_D(*b[i],i_c,2)); } } { for(int i_c=0; i_c<3; i_c++) { for(int i_s=0; i_s<2; i_s++) { QLA_D_Complex x; QLA_c_eq_r(x,0.); for(int k_c=0; k_c<3; k_c++) { QLA_c_peq_c_times_c(x, QLA_D3_elem_M(a[i],i_c,k_c), QLA_D3_elem_H(t1,k_c,i_s)); } QLA_c_eq_c(QLA_D3_elem_H(t2,i_c,i_s),x); } } } { for(int i_c=0; i_c<3; i_c++) { QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,0), QLA_D3_elem_H(t2,i_c,0)); QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,1), QLA_D3_elem_H(t2,i_c,1)); QLA_c_peq_ic(QLA_D3_elem_D(r[i],i_c,2), QLA_D3_elem_H(t2,i_c,1)); QLA_c_peq_ic(QLA_D3_elem_D(r[i],i_c,3), QLA_D3_elem_H(t2,i_c,0)); } } } } break; case 1: { #pragma omp parallel for for(int i=0; i<n; i++) { #ifdef HAVE_XLC __alignx(16,b[i]); #endif QLA_D3_HalfFermion t1; QLA_D3_HalfFermion t2; { for(int i_c=0; i_c<3; i_c++) { QLA_c_eq_c_plus_c(QLA_D3_elem_H(t1,i_c,0), QLA_D3_elem_D(*b[i],i_c,0), QLA_D3_elem_D(*b[i],i_c,3)); QLA_c_eq_c_minus_c(QLA_D3_elem_H(t1,i_c,1), QLA_D3_elem_D(*b[i],i_c,1), QLA_D3_elem_D(*b[i],i_c,2)); } } { for(int i_c=0; i_c<3; i_c++) { for(int i_s=0; i_s<2; i_s++) { QLA_D_Complex x; QLA_c_eq_r(x,0.); for(int k_c=0; k_c<3; k_c++) { QLA_c_peq_c_times_c(x, QLA_D3_elem_M(a[i],i_c,k_c), QLA_D3_elem_H(t1,k_c,i_s)); } QLA_c_eq_c(QLA_D3_elem_H(t2,i_c,i_s),x); } } } { for(int i_c=0; i_c<3; i_c++) { QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,0), QLA_D3_elem_H(t2,i_c,0)); QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,1), QLA_D3_elem_H(t2,i_c,1)); QLA_c_meq_c(QLA_D3_elem_D(r[i],i_c,2), QLA_D3_elem_H(t2,i_c,1)); QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,3), QLA_D3_elem_H(t2,i_c,0)); } } } } break; case 2: { #pragma omp parallel for for(int i=0; i<n; i++) { #ifdef HAVE_XLC __alignx(16,b[i]); #endif QLA_D3_HalfFermion t1; QLA_D3_HalfFermion t2; { for(int i_c=0; i_c<3; i_c++) { QLA_c_eq_c_minus_ic(QLA_D3_elem_H(t1,i_c,0), QLA_D3_elem_D(*b[i],i_c,0), QLA_D3_elem_D(*b[i],i_c,2)); QLA_c_eq_c_plus_ic(QLA_D3_elem_H(t1,i_c,1), QLA_D3_elem_D(*b[i],i_c,1), QLA_D3_elem_D(*b[i],i_c,3)); } } { for(int i_c=0; i_c<3; i_c++) { for(int i_s=0; i_s<2; i_s++) { QLA_D_Complex x; QLA_c_eq_r(x,0.); for(int k_c=0; k_c<3; k_c++) { QLA_c_peq_c_times_c(x, QLA_D3_elem_M(a[i],i_c,k_c), QLA_D3_elem_H(t1,k_c,i_s)); } QLA_c_eq_c(QLA_D3_elem_H(t2,i_c,i_s),x); } } } { for(int i_c=0; i_c<3; i_c++) { QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,0), QLA_D3_elem_H(t2,i_c,0)); QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,1), QLA_D3_elem_H(t2,i_c,1)); QLA_c_peq_ic(QLA_D3_elem_D(r[i],i_c,2), QLA_D3_elem_H(t2,i_c,0)); QLA_c_meq_ic(QLA_D3_elem_D(r[i],i_c,3), QLA_D3_elem_H(t2,i_c,1)); } } } } break; case 3: { #pragma omp parallel for for(int i=0; i<n; i++) { #ifdef HAVE_XLC __alignx(16,b[i]); #endif QLA_D3_HalfFermion t1; QLA_D3_HalfFermion t2; { for(int i_c=0; i_c<3; i_c++) { QLA_c_eq_c_minus_c(QLA_D3_elem_H(t1,i_c,0), QLA_D3_elem_D(*b[i],i_c,0), QLA_D3_elem_D(*b[i],i_c,2)); QLA_c_eq_c_minus_c(QLA_D3_elem_H(t1,i_c,1), QLA_D3_elem_D(*b[i],i_c,1), QLA_D3_elem_D(*b[i],i_c,3)); } } { for(int i_c=0; i_c<3; i_c++) { for(int i_s=0; i_s<2; i_s++) { QLA_D_Complex x; QLA_c_eq_r(x,0.); for(int k_c=0; k_c<3; k_c++) { QLA_c_peq_c_times_c(x, QLA_D3_elem_M(a[i],i_c,k_c), QLA_D3_elem_H(t1,k_c,i_s)); } QLA_c_eq_c(QLA_D3_elem_H(t2,i_c,i_s),x); } } } { for(int i_c=0; i_c<3; i_c++) { QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,0), QLA_D3_elem_H(t2,i_c,0)); QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,1), QLA_D3_elem_H(t2,i_c,1)); QLA_c_meq_c(QLA_D3_elem_D(r[i],i_c,2), QLA_D3_elem_H(t2,i_c,0)); QLA_c_meq_c(QLA_D3_elem_D(r[i],i_c,3), QLA_D3_elem_H(t2,i_c,1)); } } } } break; case 4: { #pragma omp parallel for for(int i=0; i<n; i++) { #ifdef HAVE_XLC __alignx(16,b[i]); #endif QLA_D3_HalfFermion t1; QLA_D3_HalfFermion t2; { for(int i_c=0; i_c<3; i_c++) { QLA_c_eq_c(QLA_D3_elem_H(t1,i_c,0), QLA_D3_elem_D(*b[i],i_c,2)); QLA_c_eq_c(QLA_D3_elem_H(t1,i_c,1), QLA_D3_elem_D(*b[i],i_c,3)); } } { for(int i_c=0; i_c<3; i_c++) { for(int i_s=0; i_s<2; i_s++) { QLA_D_Complex x; QLA_c_eq_r(x,0.); for(int k_c=0; k_c<3; k_c++) { QLA_c_peq_c_times_c(x, QLA_D3_elem_M(a[i],i_c,k_c), QLA_D3_elem_H(t1,k_c,i_s)); } QLA_c_eq_c(QLA_D3_elem_H(t2,i_c,i_s),x); } } } { for(int i_c=0; i_c<3; i_c++) { QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,2), QLA_D3_elem_H(t2,i_c,0)); QLA_c_peq_c(QLA_D3_elem_D(r[i],i_c,3), QLA_D3_elem_H(t2,i_c,1)); } } } } break; } } end_slice(); }
23_omp_sections.c
#include <stdio.h> #include <omp.h> int main() { printf("Hello in different sections\n"); #pragma omp parallel { #pragma omp sections { #pragma omp section printf("Hello %i\n", omp_get_thread_num()); #pragma omp section printf("Olá %i\n",omp_get_thread_num()); #pragma omp section printf("Hola %i\n",omp_get_thread_num()); #pragma omp section printf("Heghlu'meH QaQ jajvam %i\n",omp_get_thread_num()); } } return 0; }
GB_binop__pow_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__pow_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__pow_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint32) // C=scalar+B GB (_bind1st__pow_uint32) // C=scalar+B' GB (_bind1st_tran__pow_uint32) // C=A+scalar GB (_bind2nd__pow_uint32) // C=A'+scalar GB (_bind2nd_tran__pow_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = GB_pow_uint32 (aij, bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_uint32 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT32 || GxB_NO_POW_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pow_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_uint32 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_uint32 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint32 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint32 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_pack1to4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack1to4_fp16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float32x4_t _sum = vdupq_n_f32(0.f); if (bias_data_ptr) { _sum = vld1q_f32(bias_data_ptr + p * 4); } const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { float32x4_t _val = vcvt_f32_f16(vdup_n_f16(sptr[space_ofs[k]])); float32x4_t _w = vcvt_f32_f16(vld1_f16(kptr)); _sum = vfmaq_f32(_sum, _val, _w); kptr += 4; } } _sum = activation_ps(_sum, activation_type, activation_params); vst1_f16(outptr + j * 4, vcvt_f16_f32(_sum)); } outptr += outw * 4; } } } static void convolution_pack1to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const __fp16* bias_data_ptr = bias_data_fp16; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float16x4_t _sum = vdup_n_f16((__fp16)0.f); if (bias_data_ptr) { _sum = vld1_f16(bias_data_ptr + p * 4); } const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { float16x4_t _val = vdup_n_f16(sptr[space_ofs[k]]); float16x4_t _w = vld1_f16(kptr); _sum = vfma_f16(_sum, _val, _w); kptr += 4; } } _sum = activation_ps(_sum, activation_type, activation_params); vst1_f16(outptr + j * 4, _sum); } outptr += outw * 4; } } }
convolution_omp.c
#include <stdio.h> #include <omp.h> #include <time.h> #include "util.h" void normalize_output(int **img, int normalize_amount, int num_rows, int num_cols, int **output_img, double *parallel_time) { double itime, ftime; itime = omp_get_wtime(); #pragma omp parallel { #pragma omp for for (int i = 0; i < num_rows; i++) { for (int j = 0; j < num_cols; j++) { output_img[i][j] = (int)img[i][j] / normalize_amount; } } } ftime = omp_get_wtime(); *parallel_time += ftime - itime; } int kernel_sum(int **kernel, int kernel_size) { int sum = 0; for (int i = 0; i < kernel_size; i++) { for (int j = 0; j < kernel_size; j++) { sum += kernel[i][j]; } } if (sum == 0) return 1; else return sum; } int pixel_operation(int **kernel, int kernel_size, int **img, int row_index, int col_index) { int mac = 0; int half = (int)kernel_size / 2; int start_row = abs(row_index - half); int start_col = abs(col_index - half); int localmac; #pragma omp parallel private(localmac), shared(mac) { localmac = 0; #pragma omp for for (int i = start_row; i < start_row + kernel_size; i++) { for (int j = start_col; j < start_col + kernel_size; j++) { localmac += kernel[i - start_row][j - start_col] * img[i][j]; } } #pragma omp critical mac += localmac; } return mac; } int **extend_edges(int **img, int num_rows, int num_cols, int extend_amount, double *serial_time, double *parallel_time) { clock_t begin = clock(); int **extended = alloc_2d_matrix(num_rows + (extend_amount * 2), num_cols + (extend_amount * 2)); clock_t end = clock(); *serial_time += (double)(end - begin) / CLOCKS_PER_SEC; double itime, ftime; itime = omp_get_wtime(); #pragma omp parallel { #pragma omp for for (int i = 0; i < num_rows; i++) { for (int j = 0; j < num_cols; j++) { extended[extend_amount + i][extend_amount + j] = img[i][j]; } } } ftime = omp_get_wtime(); *parallel_time += ftime - itime; begin = clock(); for (int layer = extend_amount - 1; layer >= 0; layer--) { for (int i = layer; i < (num_rows + (extend_amount * 2)); i++) { for (int j = layer; j < (num_cols + (extend_amount * 2)); j++) { if (i >= extend_amount && i <= extend_amount + (num_rows - 1) && j < extend_amount) { extended[i][j] = extended[i][j + 1]; } if (i >= extend_amount && i <= extend_amount + (num_rows - 1) && j > extend_amount + (num_cols - 1)) { extended[i][j] = extended[i][j - 1]; } if (i < extend_amount && j < extend_amount) { extended[i][j] = extended[i + 1][j + 1]; } if (i < extend_amount && j > extend_amount + (num_cols - 1)) { extended[i][j] = extended[i][j - 1]; } if (i < extend_amount && j >= extend_amount && j <= extend_amount + (num_cols - 1)) { extended[i][j] = extended[i + 1][j]; } if (i > extend_amount + (num_rows - 1)) { extended[i][j] = extended[i - 1][j]; } } } } int **temp = img; img = extended; dealloc_2d_matrix(temp, num_rows, num_cols); end = clock(); *serial_time += (double)(end - begin) / CLOCKS_PER_SEC; return extended; } void convolve_image(int **kernel, int kernel_size, int **img, int num_rows, int num_cols, int **output_img, double *serial_time, double *parallel_time) { int extend_amount = (int)kernel_size / 2; int **ext_input = extend_edges(img, num_rows, num_cols, extend_amount, serial_time, parallel_time); double itime, ftime; itime = omp_get_wtime(); #pragma omp parallel { #pragma omp for for (int i = extend_amount; i < extend_amount + num_rows; i++) { for (int j = extend_amount; j < extend_amount + num_cols; j++) { output_img[i - extend_amount][j - extend_amount] = pixel_operation(kernel, kernel_size, ext_input, i, j); } } } ftime = omp_get_wtime(); *parallel_time += ftime - itime; clock_t begin = clock(); int kSum = kernel_sum(kernel, kernel_size); clock_t end = clock(); *serial_time += (double)(end - begin) / CLOCKS_PER_SEC; normalize_output(output_img, kSum, num_rows, num_cols, output_img, parallel_time); begin = clock(); dealloc_2d_matrix(ext_input, num_rows + (extend_amount * 2), num_cols + (extend_amount * 2)); end = clock(); *serial_time += (double)(end - begin) / CLOCKS_PER_SEC; } int main(int argc, char *argv[]) { double serial_main = 0.0; clock_t begin = clock(); if (argc != 4) { return -1; } // omp_set_num_threads(30); // read in image data int num_rows, num_columns; int **matrix = read_pgm_file(argv[1], &num_rows, &num_columns); // read in the kernel int kernel_size; int **kernel = read_pgm_file(argv[2], &kernel_size, &kernel_size); // create ouput int **output = alloc_2d_matrix(num_rows, num_columns); double *serial_convolve_time = malloc(sizeof(double)); double *parallel_convolve_time = malloc(sizeof(double)); clock_t end = clock(); serial_main += (double)(end - begin) / CLOCKS_PER_SEC; // convolve image convolve_image(kernel, kernel_size, matrix, num_rows, num_columns, output, serial_convolve_time, parallel_convolve_time); begin = clock(); serial_main += *serial_convolve_time; free(serial_convolve_time); FILE *to = fopen(argv[3], "w"); fprintf(to, "%d\n", num_rows); fprintf(to, "%d\n", num_columns); for (int i = 0; i < num_rows; i++) { for (int j = 0; j < num_columns; j++) { fprintf(to, "%d ", output[i][j]); } fprintf(to, "\n"); } // deallocate image matrix and kernel matrix dealloc_2d_matrix(kernel, kernel_size, kernel_size); dealloc_2d_matrix(output, num_rows, num_columns); end = clock(); serial_main += (double)(end - begin) * 1000 / CLOCKS_PER_SEC; printf("CONVOLUTION OMP\n"); printf("Parallel time: %f ms\n", *parallel_convolve_time * 1000); printf("Sequential time: %f ms\n", serial_main); free(parallel_convolve_time); return 0; }
nesting-2.c
void foo (void) { int i; #pragma omp taskloop for (i = 0; i < 64; i++) { int j; #pragma omp for /* { dg-error "region may not be closely nested inside of" } */ for (j = 0; j < 10; j++) ; #pragma omp single /* { dg-error "region may not be closely nested inside of" } */ ; #pragma omp sections /* { dg-error "region may not be closely nested inside of" } */ { #pragma omp section ; } #pragma omp barrier /* { dg-error "region may not be closely nested inside of" } */ #pragma omp master /* { dg-error "region may not be closely nested inside of" } */ ; #pragma omp ordered /* { dg-error "region may not be closely nested inside of" } */ ; #pragma omp ordered threads /* { dg-error "region may not be closely nested inside of" } */ ; #pragma omp ordered simd threads /* { dg-error ".ordered. .simd. must be closely nested inside .simd. region" } */ ; #pragma omp simd for (j = 0; j < 10; j++) #pragma omp ordered simd ; #pragma omp critical { #pragma omp simd for (j = 0; j < 10; j++) #pragma omp ordered simd ; } } #pragma omp taskloop for (i = 0; i < 64; i++) #pragma omp parallel { int j; #pragma omp for for (j = 0; j < 10; j++) ; #pragma omp single ; #pragma omp sections { #pragma omp section ; } #pragma omp barrier #pragma omp master ; #pragma omp ordered /* { dg-error ".ordered. region must be closely nested inside a loop region with an .ordered. clause" } */ ; #pragma omp ordered threads /* { dg-error ".ordered. region must be closely nested inside a loop region with an .ordered. clause" } */ ; #pragma omp simd for (j = 0; j < 10; j++) #pragma omp ordered simd ; #pragma omp critical { #pragma omp simd for (j = 0; j < 10; j++) #pragma omp ordered simd ; } } #pragma omp taskloop for (i = 0; i < 64; i++) #pragma omp target { int j; #pragma omp for for (j = 0; j < 10; j++) ; #pragma omp single ; #pragma omp sections { #pragma omp section ; } #pragma omp barrier #pragma omp master ; #pragma omp ordered /* { dg-error ".ordered. region must be closely nested inside a loop region with an .ordered. clause" } */ ; #pragma omp ordered threads /* { dg-error ".ordered. region must be closely nested inside a loop region with an .ordered. clause" } */ ; #pragma omp simd for (j = 0; j < 10; j++) #pragma omp ordered simd ; #pragma omp critical { #pragma omp simd for (j = 0; j < 10; j++) #pragma omp ordered simd ; } } #pragma omp ordered { #pragma omp ordered /* { dg-error "region may not be closely nested inside of" } */ ; } #pragma omp ordered threads { #pragma omp ordered /* { dg-error "region may not be closely nested inside of" } */ ; } #pragma omp ordered { #pragma omp ordered threads /* { dg-error "region may not be closely nested inside of" } */ ; } #pragma omp ordered threads { #pragma omp ordered threads /* { dg-error "region may not be closely nested inside of" } */ ; } #pragma omp critical { #pragma omp ordered simd /* { dg-error ".ordered. .simd. must be closely nested inside .simd. region" } */ ; } #pragma omp for ordered for (i = 0; i < 64; i++) #pragma omp parallel { #pragma omp ordered threads /* { dg-error ".ordered. region must be closely nested inside a loop region with an .ordered. clause" } */ ; } #pragma omp for ordered for (i = 0; i < 64; i++) #pragma omp parallel { #pragma omp ordered /* { dg-error ".ordered. region must be closely nested inside a loop region with an .ordered. clause" } */ ; } #pragma omp for ordered(1) for (i = 0; i < 64; i++) #pragma omp parallel { #pragma omp ordered depend(source) /* { dg-error ".ordered. construct with .depend. clause must be closely nested inside a loop with .ordered. clause with a parameter" } */ #pragma omp ordered depend(sink: i - 1) /* { dg-error ".ordered. construct with .depend. clause must be closely nested inside a loop with .ordered. clause with a parameter" } */ } }
gemm_mkl_ref.h
/* Copyright (c) 2018 NoobsHPC Authors All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef NBHPC_ICESWORD_OPERATOR_X86_GEMM_REF_H #define NBHPC_ICESWORD_OPERATOR_X86_GEMM_REF_H #pragma once #include "mkl.h" #include "omp_thread.h" #include "icesword/operator/gemm.h" #include <iostream> namespace noobshpc { namespace icesword { /* row major: mem_a: dim_m * dim_k mem_b: dim_k * dim_n mem_c: dim_m * dim_n col major: mem_a: dim_k * dim_m mem_b: dim_n * dim_k mem_c: dim_n * dim_m matrix(C) = beta * matrix(C) + offset_c + alpha * { matrix(A) + offset_a } * { op(B) + offset_b } */ template<DataType a_dtype, DataType b_dtype, DataType c_dtype> class GEMM_REF <X86, a_dtype, b_dtype, c_dtype> { public: typedef typename DataTrait<X86, a_dtype>::Dtype A_DType; typedef typename DataTrait<X86, b_dtype>::Dtype B_DType; typedef typename DataTrait<X86, c_dtype>::Dtype C_DType; GEMM_REF() : thread_num(ice_get_max_threads()) {} ~GEMM_REF() {} Status execute(const void* a_mem, const void* b_mem, const void* oc_mem, void* c_mem, const char oc_mode, const bool col_major, const size_t oa, const size_t ob, const size_t m, const size_t n, const size_t k, const bool trans_a, const bool trans_b, const float beta, const float alpha) { size_t lda, ldb, ldc; if (col_major) { lda = trans_a ? k : m; ldb = trans_b ? n : k; ldc = m; } else { lda = trans_a ? m : k; ldb = trans_b ? k : n; ldc = n; } return execute(a_mem, b_mem, oc_mem, c_mem, col_major, oa, ob, m, n, k, lda, ldb, ldc, trans_a, trans_b, beta, alpha, oc_mode); } Status execute(const void* a_mem, const void* b_mem, const void* oc_mem, void* c_mem, const bool col_major, const size_t oa, const size_t ob, const size_t m, // mem_c -> col_major ? width : hight const size_t n, // mem_c -> col_major ? hight : width const size_t k, // matrix a,b common dim const size_t lda, // len(mem_a) / m const size_t ldb, // len(mem_b) / n const size_t ldc, // len(mem_c) / k const bool trans_a, const bool trans_b, const float beta, const float alpha, const char oc_mode) { auto status = execute_check(a_mem, b_mem, oc_mem, c_mem, oc_mode); if (status != S_Success) { return S_InvalidValue; } bool a_trans = false; bool b_trans = false; size_t dim_m = 0; size_t dim_n = 0; size_t dim_k = 0; size_t stride_a = 0; size_t stride_b = 0; size_t offset_a = 0; size_t offset_b = 0; size_t oc_method = 1; const A_DType * mem_a = nullptr; const B_DType * mem_b = nullptr; const C_DType * mem_o = nullptr; C_DType * mem_c = nullptr; /* ROW_MAJOR : mem_a : {m, k} mem_b : {k, n} mem_c : {m, n} COL_MAJOR : mem_a : {k, m} mem_b : {n, k} mem_c : {n, m} Convert to row major */ if (col_major) { mem_a = static_cast<const A_DType *>(b_mem); mem_b = static_cast<const B_DType *>(a_mem); mem_o = static_cast<const C_DType *>(oc_mem); mem_c = static_cast<C_DType *>(c_mem); a_trans = trans_b; b_trans = trans_a; offset_a = ob; offset_b = oa; dim_m = n; dim_n = m; dim_k = k; stride_a = ldb; stride_b = lda; oc_method = oc_mode == 'F' ? 2 : oc_mode == 'R' ? 4 : oc_mode == 'C' ? 3 : oc_mode == 'A' ? 5 : 1; } else { mem_a = static_cast<const A_DType *>(a_mem); mem_b = static_cast<const B_DType *>(b_mem); mem_o = static_cast<const C_DType *>(oc_mem); mem_c = static_cast<C_DType *>(c_mem); a_trans = trans_a; b_trans = trans_b; offset_a = oa; offset_b = ob; dim_m = m; dim_n = n; dim_k = k; stride_a = lda; stride_b = ldb; oc_method = oc_mode == 'F' ? 2 : oc_mode == 'R' ? 3 : oc_mode == 'C' ? 4 : oc_mode == 'A' ? 5 : 1; } #ifdef ICESWORD_VERBOSE // compute as row major, row message LOG(INFO) << "GEMM_REF_VERBOSE {" << " transa:" << (a_trans ? "true" : "false") << " transb:" << (b_trans ? "true" : "false") << " m:" << dim_m << " n:" << dim_n << " k:" << dim_k << " oa:" << offset_a << " ob:" << offset_b << " lda:" << stride_a << " ldb:" << stride_b << " ldc:" << ldc << " beta:" << beta << " alpha:" << alpha << " }"; #endif // compute as row major #pragma omp parallel for collapse(2) num_threads(thread_num) for (auto m = 0; m < dim_m; ++m) { for (auto n = 0; n < dim_n; ++n) { C_DType ip_a_b = 0; auto c_index = m * ldc + n; float mem_c_beta = beta * mem_c[c_index]; #pragma omp simd for (auto k = 0; k < dim_k; ++k) { auto ab_index = index_calculate(stride_a, stride_b, m, n, k, a_trans, b_trans); auto a_index = ab_index[0]; auto b_index = ab_index[1]; ip_a_b += (mem_a[a_index] + offset_a) * (mem_b[b_index] + offset_b); } float alpha_ab_beta_c = alpha * ip_a_b + mem_c_beta; switch (oc_method) { case 1 : mem_c[c_index] = alpha_ab_beta_c; break; case 2 : mem_c[c_index] = alpha_ab_beta_c + mem_o[0]; break; case 3 : mem_c[c_index] = alpha_ab_beta_c + mem_o[n]; break; case 4 : mem_c[c_index] = alpha_ab_beta_c + mem_o[m]; break; } } } return S_Success; } private: size_t thread_num; Status execute_check(const void* mem_a, const void* mem_b, const void* mem_oc, void* mem_c, const char oc_mode) { if (mem_a == nullptr || mem_b == nullptr || mem_b == nullptr) { LOG(ERROR) << "wrong empty pointer !"; return S_InvalidValue; } if (oc_mode != 'N' && oc_mode != 'F' && oc_mode != 'C' && oc_mode != 'R' && oc_mode != 'A') { LOG(ERROR) << "wrong mem_oc mode !"; return S_InvalidValue; } if (oc_mode != 'N' && mem_oc == nullptr) { LOG(ERROR) << "wrong mem_oc pointer !"; return S_InvalidValue; } return S_Success; } std::vector<size_t> index_calculate(const size_t lda, const size_t ldb, const size_t m, const size_t n, const size_t k, const bool trans_a, const bool trans_b) { if (trans_a == false && trans_b == false) { // dim_m * dim_k, dim_k * dim_n return {m * lda + k, k * ldb + n}; } else if (trans_a == true && trans_b == false) { // dim_k * dim_m, dim_k * dim_n return {k * lda + m, k * ldb + n}; } else if (trans_a == false && trans_b == true) { // dim_m * dim_k, dim_n * dim_k return {m * lda + k, n * ldb + k}; } else if (trans_a == true && trans_b == true) { // dim_k * dim_m, dim_n * dim_k return {k * lda + m, n * ldb + k}; } return {0, 0}; }; }; // class end } // namespace icesword } // namespace noobshpc #endif // NBHPC_ICESWORD_OPERATOR_X86_GEMM_REF_H
struct.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> struct data { double *Matrix; int counter; }; int main() { struct data d; d.Matrix = malloc(sizeof(double)*4); d.Matrix[0] = 42; d.Matrix[1] = 43; d.Matrix[2] = 44; d.Matrix[3] = 45; d.counter = 0; #pragma omp parallel { int rank = omp_get_thread_num(); d.Matrix[rank] = rank; d.counter++; } printf("%d, %f %f %f %f\n", d.counter, d.Matrix[0],d.Matrix[1],d.Matrix[2],d.Matrix[3]); free(d.Matrix); }
GB_unop__identity_uint16_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint16_fc32 // op(A') function: GB_unop_tran__identity_uint16_fc32 // C type: uint16_t // A type: GxB_FC32_t // cast: uint16_t cij = GB_cast_to_uint16_t ((double) crealf (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint16_fc32 ( uint16_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint16_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dgeqrf.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgeqrf.c, normal z -> d, Fri Sep 28 17:38:01 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_geqrf * * Computes a tile QR factorization of a real or complex m-by-n matrix A. * The factorization has the form * \f[ A = Q \times R \f], * where Q is a matrix with orthonormal columns and R is an upper triangular * with positive diagonal. * ******************************************************************************* * * @param[in] m * The number of rows of the matrix A. * m >= 0. * * @param[in] n * The number of columns of the matrix A. * n >= 0. * * @param[in,out] pA * On entry, pointer to the m-by-n matrix A. * On exit, the elements on and above the diagonal of the array contain * the min(m,n)-by-n upper trapezoidal matrix R (R is upper triangular * if m >= n); the elements below the diagonal represent the orthogonal * matrix Q as a product of elementary reflectors stored by tiles. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[out] T * On exit, auxiliary factorization data, required by plasma_dgeqrs to * solve the system of equations. * Matrix in T is allocated inside this function and needs to be * destroyed by plasma_desc_destroy. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_dgeqrf * @sa plasma_cgeqrf * @sa plasma_dgeqrf * @sa plasma_sgeqrf * @sa plasma_dgeqrs * @sa plasma_dgels * ******************************************************************************/ int plasma_dgeqrf(int m, int n, double *pA, int lda, plasma_desc_t *T) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -4; } // quick return if (imin(m, n) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_geqrf(plasma, PlasmaRealDouble, m, n); // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; plasma_enum_t householder_mode = plasma->householder_mode; // Create tile matrix. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Prepare descriptor T. retval = plasma_descT_create(A, ib, householder_mode, T); if (retval != PlasmaSuccess) { plasma_error("plasma_descT_create() failed"); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = nb + ib*nb; // geqrt: tau + work retval = plasma_workspace_create(&work, lwork, PlasmaRealDouble); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_dge2desc(pA, lda, A, &sequence, &request); // Call the tile async function. plasma_omp_dgeqrf(A, *T, work, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_ddesc2ge(A, pA, lda, &sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrix A in tile layout. plasma_desc_destroy(&A); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_geqrf * * Computes a tile QR factorization of a matrix. * Non-blocking tile version of plasma_dgeqrf(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in,out] A * Descriptor of matrix A. * A is stored in the tile layout. * * @param[out] T * Descriptor of matrix T. * On exit, auxiliary factorization data, required by plasma_dgeqrs to * solve the system of equations. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For QR factorization, contains preallocated space for tau and work * arrays. Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_dgeqrf * @sa plasma_omp_cgeqrf * @sa plasma_omp_dgeqrf * @sa plasma_omp_sgeqrf * @sa plasma_omp_dgeqrs * @sa plasma_omp_dgeqrs * @sa plasma_omp_dgels * ******************************************************************************/ void plasma_omp_dgeqrf(plasma_desc_t A, plasma_desc_t T, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (imin(A.m, A.n) == 0) return; // Call the parallel function. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pdgeqrf_tree(A, T, work, sequence, request); } else { plasma_pdgeqrf(A, T, work, sequence, request); } }
mppush2.c
/* C Library for Skeleton 2D Electrostatic MPI/OpenMP PIC Code */ /* written by Viktor K. Decyk, UCLA */ #include <stdlib.h> #include <stdio.h> #include <complex.h> #include <math.h> #include "mppush2.h" #include "mpplib2.h" /*--------------------------------------------------------------------*/ double ranorm() { /* this program calculates a random number y from a gaussian distribution with zero mean and unit variance, according to the method of mueller and box: y(k) = (-2*ln(x(k)))**1/2*sin(2*pi*x(k+1)) y(k+1) = (-2*ln(x(k)))**1/2*cos(2*pi*x(k+1)), where x is a random number uniformly distributed on (0,1). written for the ibm by viktor k. decyk, ucla local data */ static int r1 = 885098780, r2 = 1824280461; static int r4 = 1396483093, r5 = 55318673; static int iflg = 0; static double h1l = 65531.0, h1u = 32767.0, h2l = 65525.0; static double r0 = 0.0; int isc, i1; double ranorm, r3, asc, bsc, temp; if (iflg==1) { ranorm = r0; r0 = 0.0; iflg = 0; return ranorm; } isc = 65536; asc = (double) isc; bsc = asc*asc; i1 = r1 - (r1/isc)*isc; r3 = h1l*(double) r1 + asc*h1u*(double) i1; i1 = r3/bsc; r3 -= ((double) i1)*bsc; bsc = 0.5*bsc; i1 = r2/isc; isc = r2 - i1*isc; r0 = h1l*(double) r2 + asc*h1u*(double) isc; asc = 1.0/bsc; isc = r0*asc; r2 = r0 - ((double) isc)*bsc; r3 += (double) isc + 2.0*h1u*(double) i1; isc = r3*asc; r1 = r3 - ((double) isc)*bsc; temp = sqrt(-2.0*log((((double) r1) + ((double) r2)*asc)*asc)); isc = 65536; asc = (double) isc; bsc = asc*asc; i1 = r4 - (r4/isc)*isc; r3 = h2l*(double) r4 + asc*h1u*(double) i1; i1 = r3/bsc; r3 -= ((double) i1)*bsc; bsc = 0.5*bsc; i1 = r5/isc; isc = r5 - i1*isc; r0 = h2l*(double) r5 + asc*h1u*(double) isc; asc = 1.0/bsc; isc = r0*asc; r5 = r0 - ((double) isc)*bsc; r3 += (double) isc + 2.0*h1u*(double) i1; isc = r3*asc; r4 = r3 - ((double) isc)*bsc; r0 = 6.28318530717959*((((double) r4) + ((double) r5)*asc)*asc); ranorm = temp*sin(r0); r0 = temp*cos(r0); iflg = 1; return ranorm; } /*--------------------------------------------------------------------*/ void cpdicomp2l(float edges[], int *nyp, int *noff, int *nypmx, int *nypmn, int ny, int kstrt, int nvp, int idps) { /* this subroutine determines spatial boundaries for uniform particle decomposition, calculates number of grid points in each spatial region, and the offset of these grid points from the global address nvp must be < ny. some combinations of ny and nvp result in a zero value of nyp. this is not supported. integer boundaries are set. input: ny, kstrt, nvp, idps, output: edges, nyp, noff, nypmx, nypmn edges[0] = lower boundary of particle partition edges[1] = upper boundary of particle partition nyp = number of primary (complete) gridpoints in particle partition noff = lowermost global gridpoint in particle partition nypmx = maximum size of particle partition, including guard cells nypmn = minimum value of nyp ny = system length in y direction kstrt = starting data block number (processor id + 1) nvp = number of real or virtual processors idps = number of partition boundaries local data */ int kb, kyp; float at1, any; int mypm[2], iwork2[2]; any = (float) ny; /* determine decomposition */ kb = kstrt - 1; kyp = (ny - 1)/nvp + 1; at1 = (float) kyp; edges[0] = at1*(float) kb; if (edges[0] > any) edges[0] = any; *noff = edges[0]; edges[1] = at1*(float) (kb + 1); if (edges[1] > any) edges[1] = any; kb = edges[1]; *nyp = kb - *noff; /* find maximum/minimum partition size */ mypm[0] = *nyp; mypm[1] = -(*nyp); cppimax(mypm,iwork2,2); *nypmx = mypm[0] + 1; *nypmn = -mypm[1]; return; } /*--------------------------------------------------------------------*/ void cpdistr2(float part[], float edges[], int *npp, int nps, float vtx, float vty, float vdx, float vdy, int npx, int npy, int nx, int ny, int idimp, int npmax, int idps, int ipbc, int *ierr) { /* for 2d code, this subroutine calculates initial particle co-ordinates and velocities with uniform density and maxwellian velocity with drift for distributed data. input: all except part, npp, ierr, output: part, npp, ierr part[n][0] = position x of particle n in partition part[n][1] = position y of particle n in partition part[n][2] = velocity vx of particle n in partition part[n][3] = velocity vy of particle n in partition edges[0] = lower boundary of particle partition edges[1] = upper boundary of particle partition npp = number of particles in partition nps = starting address of particles in partition vtx/vty = thermal velocity of electrons in x/y direction vdx/vdy = drift velocity of beam electrons in x/y direction npx/npy = initial number of particles distributed in x/y direction nx/ny = system length in x/y direction idimp = size of phase space = 4 npmax = maximum number of particles in each partition idps = number of partition boundaries ipbc = particle boundary condition = (0,1,2,3) = (none,2d periodic,2d reflecting,mixed reflecting/periodic) ierr = (0,1) = (no,yes) error condition exists ranorm = gaussian random number with zero mean and unit variance with spatial decomposition local data */ int j, k, npt, k1, npxyp; float edgelx, edgely, at1, at2, xt, yt, vxt, vyt; double dnpx, dnpxy, dt1; int ierr1[1], iwork1[1]; double sum3[3], work3[3]; *ierr = 0; /* particle distribution constant */ dnpx = (double) npx; /* set boundary values */ edgelx = 0.0; edgely = 0.0; at1 = (float) nx/(float) npx; at2 = (float) ny/(float) npy; if (ipbc==2) { edgelx = 1.0; edgely = 1.0; at1 = (float) (nx-2)/(float) npx; at2 = (float) (ny-2)/(float) npy; } else if (ipbc==3) { edgelx = 1.0; at1 = (float) (nx-2)/(float) npx; } npt = *npp; /* uniform density profile */ for (k = 0; k < npy; k++) { yt = edgely + at2*(((float) k) + 0.5); for (j = 0; j < npx; j++) { xt = edgelx + at1*(((float) j) + 0.5); /* maxwellian velocity distribution */ vxt = vtx*ranorm(); vyt = vty*ranorm(); if ((yt >= edges[0]) && (yt < edges[1])) { if (npt < npmax) { k1 = idimp*npt; part[k1] = xt; part[1+k1] = yt; part[2+k1] = vxt; part[3+k1] = vyt; npt += 1; } else *ierr += 1; } } } npxyp = 0; /* add correct drift */ sum3[0] = 0.0; sum3[1] = 0.0; for (j = nps-1; j < npt; j++) { npxyp += 1; sum3[0] += part[2+idimp*j]; sum3[1] += part[3+idimp*j]; } sum3[2] = npxyp; cppdsum(sum3,work3,3); dnpxy = sum3[2]; ierr1[0] = *ierr; cppimax(ierr1,iwork1,1); *ierr = ierr1[0]; dt1 = 1.0/dnpxy; sum3[0] = dt1*sum3[0] - vdx; sum3[1] = dt1*sum3[1] - vdy; for (j = nps-1; j < npt; j++) { part[2+idimp*j] -= sum3[0]; part[3+idimp*j] -= sum3[1]; } /* process errors */ dnpxy -= dnpx*(double) npy; if (dnpxy != 0.0) *ierr = dnpxy; *npp = npt; return; } /*--------------------------------------------------------------------*/ void cppdblkp2l(float part[], int kpic[], int npp, int noff, int *nppmx, int idimp, int npmax, int mx, int my, int mx1, int mxyp1, int *irc) { /* this subroutine finds the maximum number of particles in each tile of mx, my to calculate size of segmented particle array ppart linear interpolation, spatial decomposition in y direction input: all except kpic, nppmx, output: kpic, nppmx part = input particle array part[n][0] = position x of particle n in partition part[n][1] = position y of particle n in partition kpic = output number of particles per tile nppmx = return maximum number of particles in tile npp = number of particles in partition noff = backmost global gridpoint in particle partition idimp = size of phase space = 4 npmax = maximum number of particles in each partition mx/my = number of grids in sorting cell in x and y mx1 = (system length in x direction - 1)/mx + 1 mxyp1 = mx1*myp1, where myp1=(partition length in y direction-1)/my+1 irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int j, k, n, m, mnoff, isum, ist, npx, ierr; mnoff = noff; ierr = 0; /* clear counter array */ for (k = 0; k < mxyp1; k++) { kpic[k] = 0; } /* find how many particles in each tile */ for (j = 0; j < npp; j++) { n = part[idimp*j]; m = part[1+idimp*j]; n = n/mx; m = (m - mnoff)/my; m = n + mx1*m; if (m < mxyp1) { kpic[m] += 1; } else { ierr = ierr > m-mxyp1+1 ? ierr : m-mxyp1+1; } } /* find maximum */ isum = 0; npx = 0; for (k = 0; k < mxyp1; k++) { ist = kpic[k]; npx = npx > ist ? npx : ist; isum += ist; } *nppmx = npx; /* check for errors */ if (ierr > 0) { *irc = ierr; } else if (isum != npp) { *irc = -1; } return; } /*--------------------------------------------------------------------*/ void cpppmovin2l(float part[], float ppart[], int kpic[], int npp, int noff, int nppmx, int idimp, int npmax, int mx, int my, int mx1, int mxyp1, int *irc) { /* this subroutine sorts particles by x,y grid in tiles of mx, my and copies to segmented array ppart linear interpolation, spatial decomposition in y direction input: all except ppart, kpic, output: ppart, kpic part/ppart = input/output particle arrays part[n][0] = position x of particle n in partition part[n][1] = position y of particle n in partition kpic = output number of particles per tile nppmx = maximum number of particles in tile npp = number of particles in partition noff = backmost global gridpoint in particle partition idimp = size of phase space = 4 npmax = maximum number of particles in each partition mx/my = number of grids in sorting cell in x and y mx1 = (system length in x direction - 1)/mx + 1 mxyp1 = mx1*myp1, where myp1=(partition length in y direction-1)/my+1 irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int i, j, k, n, m, mnoff, ip, ierr; mnoff = noff; ierr = 0; /* clear counter array */ for (k = 0; k < mxyp1; k++) { kpic[k] = 0; } /* find addresses of particles at each tile and reorder particles */ for (j = 0; j < npp; j++) { n = part[idimp*j]; m = part[1+idimp*j]; n = n/mx; m = (m - mnoff)/my; m = n + mx1*m; ip = kpic[m]; if (ip < nppmx) { for (i = 0; i < idimp; i++) { ppart[i+idimp*(ip+nppmx*m)] = part[i+idimp*j]; } } else { ierr = ierr > ip-nppmx+1 ? ierr : ip-nppmx+1; } kpic[m] = ip + 1; } if (ierr > 0) *irc = ierr; return; } /*--------------------------------------------------------------------*/ void cpppcheck2l(float ppart[], int kpic[], int noff, int nyp, int idimp, int nppmx, int nx, int mx, int my, int mx1, int myp1, int *irc) { /* this subroutine performs a sanity check to make sure particles sorted by x,y grid in tiles of mx, my, are all within bounds. tiles are assumed to be arranged in 2D linear memory input: all except irc output: irc ppart[k][n][0] = position x of particle n in tile k ppart[k][n][1] = position y of particle n in tile k kpic[k] = number of reordered output particles in tile k noff = lowermost global gridpoint in particle partition. nyp = number of primary (complete) gridpoints in particle partition idimp = size of phase space = 4 nppmx = maximum number of particles in tile nx = system length in x direction mx/my = number of grids in sorting cell in x/y mx1 = (system length in x direction - 1)/mx + 1 myp1 = (partition length in y direction - 1)/my + 1 irc = particle error, returned only if error occurs, when irc > 0 local data */ int mxyp1, noffp, moffp, nppp, j, k, ist, nn, mm; float edgelx, edgely, edgerx, edgery, dx, dy; mxyp1 = mx1*myp1; /* loop over tiles */ #pragma omp parallel for \ private(j,k,noffp,moffp,nppp,nn,mm,ist,edgelx,edgely,edgerx,edgery,dx, \ dy) for (k = 0; k < mxyp1; k++) { noffp = k/mx1; moffp = my*noffp; noffp = mx*(k - mx1*noffp); nppp = kpic[k]; nn = nx - noffp; nn = mx < nn ? mx : nn; mm = nyp - moffp; mm = my < mm ? my : mm; edgelx = noffp; edgerx = noffp + nn; edgely = noff + moffp; edgery = noff + moffp + mm; /* loop over particles in tile */ for (j = 0; j < nppp; j++) { dx = ppart[idimp*(j+nppmx*k)]; dy = ppart[1+idimp*(j+nppmx*k)]; /* find particles going out of bounds */ ist = 0; if (dx < edgelx) ist = 1; if (dx >= edgerx) ist = 2; if (dy < edgely) ist += 3; if (dy >= edgery) ist += 6; if (ist > 0) *irc = k + 1; } } return; } /*--------------------------------------------------------------------*/ void cppgppush2l(float ppart[], float fxy[], int kpic[], int noff, int nyp, float qbm, float dt, float *ek, int nx, int ny, int mx, int my, int idimp, int nppmx, int nxv, int nypmx, int mx1, int mxyp1, int ipbc) { /* for 2d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with various boundary conditions OpenMP version using guard cells, for distributed data data read in tiles particles stored segmented array 42 flops/particle, 12 loads, 4 stores input: all, output: ppart, ek equations used are: vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t))*dt, vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t))*dt, where q/m is charge/mass, and x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt fx(x(t),y(t)) and fy(x(t),y(t)) are approximated by interpolation from the nearest grid points: fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1) + dx*fx(n+1,m+1)) fy(x,y) = (1-dy)*((1-dx)*fy(n,m)+dx*fy(n+1,m)) + dy*((1-dx)*fy(n,m+1) + dx*fy(n+1,m+1)) where n,m = leftmost grid points and dx = x-n, dy = y-m ppart[m][n][0] = position x of particle n in partition in tile m ppart[m][n][1] = position y of particle n in partition in tile m ppart[m][n][2] = velocity vx of particle n in partition in tile m ppart[m][n][3] = velocity vy of particle n in partition in tile m fxy[k][j][0] = x component of force/charge at grid (j,kk) fxy[k][j][1] = y component of force/charge at grid (j,kk) in other words, fxy are the convolutions of the electric field over the particle shape, where kk = k + noff kpic = number of particles per tile noff = lowermost global gridpoint in particle partition. nyp = number of primary (complete) gridpoints in particle partition qbm = particle charge/mass dt = time interval between successive calculations kinetic energy/mass at time t is also calculated, using ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2) nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y idimp = size of phase space = 4 nppmx = maximum number of particles in tile nxv = first dimension of field array, must be >= nx+1 nypmx = maximum size of particle partition, including guard cells. mx1 = (system length in x direction - 1)/mx + 1 mxyp1 = mx1*myp1, where myp1=(partition length in y direction-1)/my+1 ipbc = particle boundary condition = (0,1,2,3) = (none,2d periodic,2d reflecting,mixed reflecting/periodic) local data */ #define MXV 33 #define MYV 33 int noffp, moffp, npoff, nppp; int mnoff, i, j, k, nn, mm, mxv; float qtm, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy; float x, y, dx, dy, vx, vy; float sfxy[2*MXV*MYV]; /* float sfxy[2*(mx+1)*(my+1)]; */ double sum1, sum2; /* mxv2 = 2*MXV; */ mxv = mx + 1; qtm = qbm*dt; sum2 = 0.0; /* set boundary values */ edgelx = 0.0f; edgely = 1.0f; edgerx = (float) (nx); edgery = (float) (ny-1); if ((ipbc==2) || (ipbc==3)) { edgelx = 1.0f; edgerx = (float) (nx-1); } /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noffp,moffp,nppp,npoff,mnoff,nn,mm,x,y,dxp,dyp,amx,amy, \ dx,dy,vx,vy,sum1,sfxy) \ reduction(+:sum2) for (k = 0; k < mxyp1; k++) { noffp = k/mx1; moffp = my*noffp; noffp = mx*(k - mx1*noffp); nppp = kpic[k]; npoff = nppmx*k; mnoff = moffp + noff; /* load local fields from global array */ nn = (mx < nx-noffp ? mx : nx-noffp) + 1; mm = (my < nyp-moffp ? my : nyp-moffp) + 1; for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sfxy[2*(i+mxv*j)] = fxy[2*(i+noffp+nxv*(j+moffp))]; sfxy[1+2*(i+mxv*j)] = fxy[1+2*(i+noffp+nxv*(j+moffp))]; } } sum1 = 0.0; /* loop over particles in tile */ for (j = 0; j < nppp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = x - (float) nn; dyp = y - (float) mm; nn = 2*(nn - noffp) + 2*mxv*(mm - mnoff); amx = 1.0f - dxp; amy = 1.0f - dyp; /* find acceleration */ dx = amx*sfxy[nn]; dy = amx*sfxy[nn+1]; dx = amy*(dxp*sfxy[nn+2] + dx); dy = amy*(dxp*sfxy[nn+3] + dy); nn += 2*mxv; vx = amx*sfxy[nn]; vy = amx*sfxy[nn+1]; dx += dyp*(dxp*sfxy[nn+2] + vx); dy += dyp*(dxp*sfxy[nn+3] + vy); /* new velocity */ vx = ppart[2+idimp*(j+npoff)]; vy = ppart[3+idimp*(j+npoff)]; dx = vx + qtm*dx; dy = vy + qtm*dy; /* average kinetic energy */ vx += dx; vy += dy; sum1 += vx*vx + vy*vy; ppart[2+idimp*(j+npoff)] = dx; ppart[3+idimp*(j+npoff)] = dy; /* new position */ dx = x + dx*dt; dy = y + dy*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = ppart[idimp*(j+npoff)]; ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)]; } if ((dy < edgely) || (dy >= edgery)) { dy = ppart[1+idimp*(j+npoff)]; ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)]; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = ppart[idimp*(j+npoff)]; ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)]; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; } sum2 += sum1; } /* normalize kinetic energy */ *ek += 0.125f*sum2; return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cppgppushf2l(float ppart[], float fxy[], int kpic[], int ncl[], int ihole[], int noff, int nyp, float qbm, float dt, float *ek, int nx, int ny, int mx, int my, int idimp, int nppmx, int nxv, int nypmx, int mx1, int mxyp1, int ntmax, int *irc) { /* for 2d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with periodic boundary conditions also determines list of particles which are leaving this tile OpenMP version using guard cells, for distributed data data read in tiles particles stored segmented array 42 flops/particle, 12 loads, 4 stores input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc equations used are: vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t))*dt, vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t))*dt, where q/m is charge/mass, and x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt fx(x(t),y(t)) and fy(x(t),y(t)) are approximated by interpolation from the nearest grid points: fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1) + dx*fx(n+1,m+1)) fy(x,y) = (1-dy)*((1-dx)*fy(n,m)+dx*fy(n+1,m)) + dy*((1-dx)*fy(n,m+1) + dx*fy(n+1,m+1)) where n,m = leftmost grid points and dx = x-n, dy = y-m ppart[m][n][0] = position x of particle n in partition in tile m ppart[m][n][1] = position y of particle n in partition in tile m ppart[m][n][2] = velocity vx of particle n in partition in tile m ppart[m][n][3] = velocity vy of particle n in partition in tile m fxy[k][j][0] = x component of force/charge at grid (j,kk) fxy[k][j][1] = y component of force/charge at grid (j,kk) in other words, fxy are the convolutions of the electric field over the particle shape, where kk = k + noff kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = destination of particle leaving hole ihole[k][0][0] = ih, number of holes left (error, if negative) noff = lowermost global gridpoint in particle partition. nyp = number of primary (complete) gridpoints in particle partition qbm = particle charge/mass dt = time interval between successive calculations kinetic energy/mass at time t is also calculated, using ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2) nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y idimp = size of phase space = 4 nppmx = maximum number of particles in tile nxv = first dimension of field array, must be >= nx+1 nypmx = maximum size of particle partition, including guard cells. mx1 = (system length in x direction - 1)/mx + 1 mxyp1 = mx1*myp1, where myp1=(partition length in y direction-1)/my+1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version local data */ #define MXV 33 #define MYV 33 int noffp, moffp, npoff, nppp; int mnoff, i, j, k, ih, nh, nn, mm, mxv; float qtm, dxp, dyp, amx, amy; float x, y, dx, dy, vx, vy; float anx, any, edgelx, edgely, edgerx, edgery; float sfxy[2*MXV*MYV]; /* float sfxy[2*(mx+1)*(my+1)]; */ double sum1, sum2; mxv = mx + 1; qtm = qbm*dt; anx = (float) nx; any = (float) ny; sum2 = 0.0; /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noffp,moffp,nppp,npoff,nn,mm,ih,nh,mnoff,x,y,dxp,dyp, \ amx,amy,dx,dy,vx,vy,edgelx,edgely,edgerx,edgery,sum1,sfxy) \ reduction(+:sum2) for (k = 0; k < mxyp1; k++) { noffp = k/mx1; moffp = my*noffp; noffp = mx*(k - mx1*noffp); nppp = kpic[k]; npoff = nppmx*k; nn = nx - noffp; nn = mx < nn ? mx : nn; mm = nyp - moffp; mm = my < mm ? my : mm; edgelx = noffp; edgerx = noffp + nn; edgely = noff + moffp; edgery = noff + moffp + mm; ih = 0; nh = 0; nn += 1; mm += 1; mnoff = moffp + noff; /* load local fields from global array */ for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sfxy[2*(i+mxv*j)] = fxy[2*(i+noffp+nxv*(j+moffp))]; sfxy[1+2*(i+mxv*j)] = fxy[1+2*(i+noffp+nxv*(j+moffp))]; } } /* clear counters */ for (j = 0; j < 8; j++) { ncl[j+8*k] = 0; } sum1 = 0.0; /* loop over particles in tile */ for (j = 0; j < nppp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = x - (float) nn; dyp = y - (float) mm; nn = 2*(nn - noffp) + 2*mxv*(mm - mnoff); amx = 1.0f - dxp; amy = 1.0f - dyp; /* find acceleration */ dx = amx*sfxy[nn]; dy = amx*sfxy[nn+1]; dx = amy*(dxp*sfxy[nn+2] + dx); dy = amy*(dxp*sfxy[nn+3] + dy); nn += 2*mxv; vx = amx*sfxy[nn]; vy = amx*sfxy[nn+1]; dx += dyp*(dxp*sfxy[nn+2] + vx); dy += dyp*(dxp*sfxy[nn+3] + vy); /* new velocity */ vx = ppart[2+idimp*(j+npoff)]; vy = ppart[3+idimp*(j+npoff)]; dx = vx + qtm*dx; dy = vy + qtm*dy; /* average kinetic energy */ vx += dx; vy += dy; sum1 += vx*vx + vy*vy; ppart[2+idimp*(j+npoff)] = dx; ppart[3+idimp*(j+npoff)] = dy; /* new position */ dx = x + dx*dt; dy = y + dy*dt; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx -= anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy -= any; mm += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) mm += 3; else dy = 0.0; } else { mm += 3; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; /* increment counters */ if (mm > 0) { ncl[mm+8*k-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = mm; } else { nh = 1; } } } sum2 += sum1; /* set error and end of file flag */ /* ihole overflow */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; } /* normalize kinetic energy */ *ek += 0.125f*sum2; return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cppgppost2l(float ppart[], float q[], int kpic[], int noff, float qm, int idimp, int nppmx, int mx, int my, int nxv, int nypmx, int mx1, int mxyp1) { /* for 2d code, this subroutine calculates particle charge density using first-order linear interpolation, periodic boundaries OpenMP version using guard cells, for distributed data data deposited in tiles particles stored segmented array 17 flops/particle, 6 loads, 4 stores input: all, output: q charge density is approximated by values at the nearest grid points q(n,m)=qm*(1.-dx)*(1.-dy) q(n+1,m)=qm*dx*(1.-dy) q(n,m+1)=qm*(1.-dx)*dy q(n+1,m+1)=qm*dx*dy where n,m = leftmost grid points and dx = x-n, dy = y-m ppart[m][n][0] = position x of particle n in partition in tile m ppart[m][n][1] = position y of particle n in partition in tile m q[k][j] = charge density at grid point (j,kk), where kk = k + noff kpic = number of particles per tile noff = lowermost global gridpoint in particle partition. qm = charge on particle, in units of e idimp = size of phase space = 4 nppmx = maximum number of particles in tile mx/my = number of grids in sorting cell in x/y nxv = first dimension of charge array, must be >= nx+1 nypmx = maximum size of particle partition, including guard cells. mx1 = (system length in x direction - 1)/mx + 1 mxyp1 = mx1*myp1, where myp1=(partition length in y direction-1)/my+1 local data */ #define MXV 33 #define MYV 33 int noffp, moffp, npoff, nppp, mxv; int mnoff, i, j, k, nn, mm; float x, y, dxp, dyp, amx, amy; float sq[MXV*MYV]; /* float sq[(mx+1)*(my+1)]; */ mxv = mx + 1; /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noffp,moffp,nppp,npoff,mnoff,nn,mm,x,y,dxp,dyp,amx,amy, \ sq) for (k = 0; k < mxyp1; k++) { noffp = k/mx1; moffp = my*noffp; noffp = mx*(k - mx1*noffp); nppp = kpic[k]; npoff = nppmx*k; mnoff = moffp + noff; /* zero out local accumulator */ for (j = 0; j < my+1; j++) { for (i = 0; i < mx+1; i++) { sq[i+mxv*j] = 0.0f; } } /* loop over particles in tile */ for (j = 0; j < nppp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = qm*(x - (float) nn); dyp = y - (float) mm; nn = nn - noffp + mxv*(mm - mnoff); amx = qm - dxp; amy = 1.0f - dyp; /* deposit charge within tile to local accumulator */ x = sq[nn] + amx*amy; y = sq[nn+1] + dxp*amy; sq[nn] = x; sq[nn+1] = y; nn += mxv; x = sq[nn] + amx*dyp; y = sq[nn+1] + dxp*dyp; sq[nn] = x; sq[nn+1] = y; } /* deposit charge to interior points in global array */ nn = nxv - noffp; mm = nypmx - moffp; nn = mx < nn ? mx : nn; mm = my < mm ? my : mm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { q[i+noffp+nxv*(j+moffp)] += sq[i+mxv*j]; } } /* deposit charge to edge points in global array */ mm = nypmx - moffp; mm = my+1 < mm ? my+1 : mm; for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noffp+nxv*moffp] += sq[i]; if (mm > my) { #pragma omp atomic q[i+noffp+nxv*(mm+moffp-1)] += sq[i+mxv*(mm-1)]; } } nn = nxv - noffp; nn = mx+1 < nn ? mx+1 : nn; for (j = 0; j < mm; j++) { #pragma omp atomic q[noffp+nxv*(j+moffp)] += sq[mxv*j]; if (nn > mx) { #pragma omp atomic q[nn+noffp-1+nxv*(j+moffp)] += sq[nn-1+mxv*j]; } } } return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cppporder2la(float ppart[], float ppbuff[], float sbufl[], float sbufr[], int kpic[], int ncl[], int ihole[], int ncll[], int nclr[], int noff, int nyp, int idimp, int nppmx, int nx, int ny, int mx, int my, int mx1, int myp1, int npbmx, int ntmax, int nbmax, int *irc) { /* this subroutine performs first part of a particle sort by x,y grid in tiles of mx, my linear interpolation, with periodic boundary conditions for distributed data, with 1d domain decomposition in y. tiles are assumed to be arranged in 2D linear memory this part of the algorithm has 3 steps. first, one finds particles leaving tile and stores their number in each directon, location, and destination in ncl and ihole. then, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. finally, we buffer particles leaving the processor in sbufl and sbufr, and store particle number offsets in ncll and nclr. input: all except ppbuff, sbufl, sbufr, ncl, ihole, ncll, nclr, irc output: ppart, ppbuff, sbufl, sbufr, ncl, ihole, ncll, nclr, irc ppart[k][n][0] = position x of particle n in tile k ppart[k][n][1] = position y of particle n in tile k ppbuff[k][n][i] = i co-ordinate of particle n in tile k sbufl = buffer for particles being sent to lower processor sbufr = buffer for particles being sent to upper processor kpic[k] = number of particles in tile k ncl(i,k) = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = direction destination of particle leaving hole all for tile k ihole[k][0][0] = ih, number of holes left (error, if negative) ncll = number offset being sent to lower processor nclr = number offset being sent to upper processor noff = lowermost global gridpoint in particle partition. nyp = number of primary (complete) gridpoints in particle partition idimp = size of phase space = 4 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y mx1 = (system length in x direction - 1)/mx + 1 myp1 = (partition length in y direction - 1)/my + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles nbmax = size of buffers for passing particles between processors irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int mxyp1, noffp, moffp, nppp; int i, j, k, ii, jj, ih, nh, ist, nn, mm, isum, ip, j1, kk; float anx, any, edgelx, edgely, edgerx, edgery, dx, dy; mxyp1 = mx1*myp1; anx = (float) nx; any = (float) ny; /* find and count particles leaving tiles and determine destination */ /* update ppart, ihole, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(j,k,noffp,moffp,nppp,nn,mm,ih,nh,ist,dx,dy,edgelx,edgely, \ edgerx,edgery) for (k = 0; k < mxyp1; k++) { noffp = k/mx1; moffp = my*noffp; noffp = mx*(k - mx1*noffp); nppp = kpic[k]; nn = nx - noffp; nn = mx < nn ? mx : nn; mm = nyp - moffp; mm = my < mm ? my : mm; ih = 0; nh = 0; edgelx = noffp; edgerx = noffp + nn; edgely = noff + moffp; edgery = noff + moffp + mm; /* clear counters */ for (j = 0; j < 8; j++) { ncl[j+8*k] = 0; } /* loop over particles in tile */ for (j = 0; j < nppp; j++) { dx = ppart[idimp*(j+nppmx*k)]; dy = ppart[1+idimp*(j+nppmx*k)]; /* find particles going out of bounds */ ist = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) ppart[idimp*(j+nppmx*k)] = dx - anx; ist = 2; } else if (dx < edgelx) { if (dx < 0.0) { dx += anx; if (dx < anx) ist = 1; else dx = 0.0; ppart[idimp*(j+nppmx*k)] = dx; } else { ist = 1; } } if (dy >= edgery) { if (dy >= any) ppart[1+idimp*(j+nppmx*k)] = dy - any; ist += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) ist += 3; else dy = 0.0; ppart[1+idimp*(j+nppmx*k)] = dy; } else { ist += 3; } } if (ist > 0) { ncl[ist+8*k-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = ist; } else { nh = 1; } } } /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; } /* ihole overflow */ if (*irc > 0) return; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,isum,ist,nh,ip,j1,ii) for (k = 0; k < mxyp1; k++) { /* find address offset for ordered ppbuff array */ isum = 0; for (j = 0; j < 8; j++) { ist = ncl[j+8*k]; ncl[j+8*k] = isum; isum += ist; } nh = ihole[2*(ntmax+1)*k]; ip = 0; /* loop over particles leaving tile */ for (j = 0; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1; ist = ihole[1+2*(j+1+(ntmax+1)*k)]; ii = ncl[ist+8*k-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[i+idimp*(ii+npbmx*k)] = ppart[i+idimp*(j1+nppmx*k)]; } } else { ip = 1; } ncl[ist+8*k-1] = ii + 1; } /* set error */ if (ip > 0) *irc = ncl[7+8*k]; } /* ppbuff overflow */ if (*irc > 0) return; /* buffer particles and their number leaving the node: */ /* update sbufl, sbufr, ncll, nclr */ kk = mx1*(myp1 - 1); #pragma omp parallel for private(k) for (k = 0; k < mx1; k++) { ncll[3*k] = ncl[4+8*k] - ncl[1+8*k]; nclr[3*k] = ncl[7+8*(k+kk)] - ncl[4+8*(k+kk)]; } /* perform prefix scan */ kk = 1; L90: if (kk >= mx1) goto L110; #pragma omp parallel for private(k,ii,nn,mm) for (k = 0; k < mx1; k++) { ii = k/kk; nn = kk*ii; mm = 2*nn + kk - 1; nn += k + kk; if (nn < mx1) { ncll[3*nn] += ncll[3*mm]; nclr[3*nn] += nclr[3*mm]; } } kk += kk; goto L90; L110: kk = mx1*(myp1 - 1); #pragma omp parallel for private(i,j,k,ii,nn,mm) for (k = 0; k < mx1; k++) { ii = ncl[4+8*k] - ncl[1+8*k]; nn = ncll[3*k] - ii; jj = nbmax - nn; jj = ii < jj ? ii : jj; for (j = 0; j < jj; j++) { for (i = 0; i < idimp; i++) { sbufl[i+idimp*(j+nn)] = ppbuff[i+idimp*(j+ncl[1+8*k]+npbmx*k)]; } } for (i = 0; i < 3; i++) { ncll[i+3*k] = ncl[i+2+8*k] - ncl[1+8*k] + nn; } ii = ncl[7+8*(k+kk)] - ncl[4+8*(k+kk)]; mm = nclr[3*k] - ii; jj = nbmax - mm; jj = ii < jj ? ii : jj; for (j = 0; j < jj; j++) { for (i = 0; i < idimp; i++) { sbufr[i+idimp*(j+mm)] = ppbuff[i+idimp*(j+ncl[4+8*(k+kk)]+npbmx*(k+kk))]; } } for (i = 0; i < 3; i++) { nclr[i+3*k] = ncl[i+5+8*(k+kk)] - ncl[4+8*(k+kk)] + mm; } } /* sbufl or sbufr overflow */ nn = ncll[3*mx1-1]; mm = nclr[3*mx1-1]; ii = nn > mm ? nn : mm; if (ii > nbmax) *irc = ii; return; } /*--------------------------------------------------------------------*/ void cppporderf2la(float ppart[], float ppbuff[], float sbufl[], float sbufr[], int ncl[], int ihole[], int ncll[], int nclr[], int idimp, int nppmx, int mx1, int myp1, int npbmx, int ntmax, int nbmax, int *irc) { /* this subroutine performs first part of a particle sort by x,y grid in tiles of mx, my linear interpolation, with periodic boundary conditions for distributed data, with 1d domain decomposition in y. tiles are assumed to be arranged in 2D linear memory this part of the algorithm has 2 steps. first, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. then, we buffer particles leaving the processor in sbufl and sbufr, and store particle number offsets in ncll and nclr. it assumes that the number, location, and destination of particles leaving a tile have been previously stored in ncl and ihole by the cppgppushf2l procedure. input: all except ppbuff, sbufl, sbufr, ncll, nclr, irc output: ppart, ppbuff, sbufl, sbufr, ncl, ncll, nclr, irc ppart[k][n][0] = position x of particle n in tile k ppart[k][n][1] = position y of particle n in tile k ppbuff[k][n][i] = i co-ordinate of particle n in tile k sbufl = buffer for particles being sent to lower processor sbufr = buffer for particles being sent to upper processor ncl(i,k) = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = direction destination of particle leaving hole all for tile k ihole[k][0][0] = ih, number of holes left (error, if negative) ncll = number offset being sent to lower processor nclr = number offset being sent to upper processor idimp = size of phase space = 4 nppmx = maximum number of particles in tile mx1 = (system length in x direction - 1)/mx + 1 myp1 = (partition length in y direction - 1)/my + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles nbmax = size of buffers for passing particles between processors irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int mxyp1; int i, j, k, ii, jj, nh, ist, nn, mm, isum, ip, j1, kk; mxyp1 = mx1*myp1; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,isum,ist,nh,ip,j1,ii) for (k = 0; k < mxyp1; k++) { /* find address offset for ordered ppbuff array */ isum = 0; for (j = 0; j < 8; j++) { ist = ncl[j+8*k]; ncl[j+8*k] = isum; isum += ist; } nh = ihole[2*(ntmax+1)*k]; ip = 0; /* loop over particles leaving tile */ for (j = 0; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1; ist = ihole[1+2*(j+1+(ntmax+1)*k)]; ii = ncl[ist+8*k-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[i+idimp*(ii+npbmx*k)] = ppart[i+idimp*(j1+nppmx*k)]; } } else { ip = 1; } ncl[ist+8*k-1] = ii + 1; } /* set error */ if (ip > 0) *irc = ncl[7+8*k]; } /* ppbuff overflow */ if (*irc > 0) return; /* buffer particles and their number leaving the node: */ /* update sbufl, sbufr, ncll, nclr */ kk = mx1*(myp1 - 1); #pragma omp parallel for private(k) for (k = 0; k < mx1; k++) { ncll[3*k] = ncl[4+8*k] - ncl[1+8*k]; nclr[3*k] = ncl[7+8*(k+kk)] - ncl[4+8*(k+kk)]; } /* perform prefix scan */ kk = 1; L90: if (kk >= mx1) goto L110; #pragma omp parallel for private(k,ii,nn,mm) for (k = 0; k < mx1; k++) { ii = k/kk; nn = kk*ii; mm = 2*nn + kk - 1; nn += k + kk; if (nn < mx1) { ncll[3*nn] += ncll[3*mm]; nclr[3*nn] += nclr[3*mm]; } } kk += kk; goto L90; L110: kk = mx1*(myp1 - 1); #pragma omp parallel for private(i,j,k,ii,nn,mm) for (k = 0; k < mx1; k++) { ii = ncl[4+8*k] - ncl[1+8*k]; nn = ncll[3*k] - ii; jj = nbmax - nn; jj = ii < jj ? ii : jj; for (j = 0; j < jj; j++) { for (i = 0; i < idimp; i++) { sbufl[i+idimp*(j+nn)] = ppbuff[i+idimp*(j+ncl[1+8*k]+npbmx*k)]; } } for (i = 0; i < 3; i++) { ncll[i+3*k] = ncl[i+2+8*k] - ncl[1+8*k] + nn; } ii = ncl[7+8*(k+kk)] - ncl[4+8*(k+kk)]; mm = nclr[3*k] - ii; jj = nbmax - mm; jj = ii < jj ? ii : jj; for (j = 0; j < jj; j++) { for (i = 0; i < idimp; i++) { sbufr[i+idimp*(j+mm)] = ppbuff[i+idimp*(j+ncl[4+8*(k+kk)]+npbmx*(k+kk))]; } } for (i = 0; i < 3; i++) { nclr[i+3*k] = ncl[i+5+8*(k+kk)] - ncl[4+8*(k+kk)] + mm; } } /* sbufl or sbufr overflow */ nn = ncll[3*mx1-1]; mm = nclr[3*mx1-1]; ii = nn > mm ? nn : mm; if (ii > nbmax) *irc = ii; return; } /*--------------------------------------------------------------------*/ void cppporder2lb(float ppart[], float ppbuff[], float rbufl[], float rbufr[], int kpic[], int ncl[], int ihole[], int mcll[], int mclr[], int idimp, int nppmx, int mx1, int myp1, int npbmx, int ntmax, int nbmax, int *irc) { /* this subroutine performs second part of a particle sort by x,y grid in tiles of mx, my linear interpolation, with periodic boundary conditions for distributed data, with 1d domain decomposition in y. tiles are assumed to be arranged in 2D linear memory incoming particles from other tiles are copied from ppbuff, rbufl, and rbufr into ppart input: all except ppart, kpic, irc output: ppart, kpic, irc ppart[k][n][0] = position x of particle n in tile k ppart[k][n][1] = position y of particle n in tile k ppbuff[k][n][i] = i co-ordinate of particle n in tile k rbufl = buffer for particles being received from lower processor rbufr = buffer for particles being received from upper processor kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = direction destination of particle leaving hole all for tile k ihole[k][0][0] = ih, number of holes left (error, if negative) mcll = number offset being received from lower processor mclr = number offset being received from upper processor idimp = size of phase space = 4 nppmx = maximum number of particles in tile mx1 = (system length in x direction - 1)/mx + 1 myp1 = (partition length in y direction - 1)/my + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles nbmax = size of buffers for passing particles between processors irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int mxyp1, nppp, ncoff, noff, moff; int i, j, k, ii, kx, ky, ih, nh, ist; int ip, j1, j2, kxl, kxr, kk, kl, kr; int ks[8]; mxyp1 = mx1*myp1; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,ii,kk,nppp,kx,ky,kl,kr,kxl,kxr,ih,nh,ncoff,noff,moff, \ ist,j1,j2,ip,ks) for (k = 0; k < mxyp1; k++) { nppp = kpic[k]; ky = k/mx1; /* loop over tiles in y */ kk = ky*mx1; /* find tile above */ kl = (ky - 1)*mx1; /* find tile below */ kr = (ky + 1)*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk; ks[1] = kxl + kk; ks[2] = kx + kr; ks[3] = kxr + kr; ks[4] = kxl + kr; ks[5] = kx + kl; ks[6] = kxr + kl; ks[7] = kxl + kl; /* loop over directions */ nh = ihole[2*(ntmax+1)*k]; noff = 0; moff = 0; if (ky==0) { if (kx > 0) noff = mcll[2+3*(kx-1)]; } if (ky==(myp1-1)) { if (kx > 0) moff = mclr[2+3*(kx-1)]; } ncoff = 0; ih = 0; ist = 0; j1 = 0; for (ii = 0; ii < 8; ii++) { /* ip = number of particles coming from direction ii */ if (ks[ii] < 0) { if (ii > 5) noff = mcll[ii-6+3*(ks[ii]+mx1)]; ip = mcll[ii-5+3*(ks[ii]+mx1)] - noff; } else if (ks[ii] >= mxyp1) { if (ii > 2) moff = mclr[ii-3+3*(ks[ii]-mxyp1)]; ip = mclr[ii-2+3*(ks[ii]-mxyp1)] - moff; } else { if (ii > 0) ncoff = ncl[ii-1+8*ks[ii]]; ip = ncl[ii+8*ks[ii]] - ncoff; } for (j = 0; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+(ntmax+1)*k)] - 1; } /* place overflow at end of array */ else { j1 = nppp; nppp += 1; } if (j1 < nppmx) { if (ks[ii] < 0) { for (i = 0; i < idimp; i++) { ppart[i+idimp*(j1+nppmx*k)] = rbufl[i+idimp*(j+noff)]; } } else if (ks[ii] >= mxyp1) { for (i = 0; i < idimp; i++) { ppart[i+idimp*(j1+nppmx*k)] = rbufr[i+idimp*(j+moff)]; } } else { for (i = 0; i < idimp; i++) { ppart[i+idimp*(j1+nppmx*k)] = ppbuff[i+idimp*(j+ncoff+npbmx*ks[ii])]; } } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ if (ih < nh) { ip = nh - ih; for (j = 0; j < ip; j++) { j1 = nppp - j - 1; j2 = ihole[2*(nh-j+(ntmax+1)*k)] - 1; if (j1 > j2) { /* move particle only if it is below current hole */ for (i = 0; i < idimp; i++) { ppart[i+idimp*(j2+nppmx*k)] = ppart[i+idimp*(j1+nppmx*k)]; } } } nppp -= ip; } kpic[k] = nppp; } return; } /*--------------------------------------------------------------------*/ void cppcguard2xl(float fxy[], int nyp, int nx, int ndim, int nxe, int nypmx) { /* replicate extended periodic vector field in x direction linear interpolation, for distributed data nyp = number of primary (complete) gridpoints in particle partition nx = system length in x direction ndim = leading dimension of array fxy nxe = first dimension of field arrays, must be >= nx+1 nypmx = maximum size of particle partition, including guard cells local data */ int i, k, kk, myp1; /* replicate edges of extended field */ myp1 = nyp + 1; for (k = 0; k < myp1; k++) { kk = ndim*nxe*k; for (i = 0; i < ndim; i++) { fxy[i+ndim*nx+kk] = fxy[i+kk]; } } return; } /*--------------------------------------------------------------------*/ void cppaguard2xl(float q[], int nyp, int nx, int nxe, int nypmx) { /* accumulate extended periodic scalar field in x direction linear interpolation, for distributed data nyp = number of primary (complete) gridpoints in particle partition nx = system length in x direction nxe = first dimension of field arrays, must be >= nx+1 nypmx = maximum size of particle partition, including guard cells local data */ int k, myp1; /* accumulate edges of extended field */ myp1 = nyp + 1; for (k = 0; k < myp1; k++) { q[nxe*k] += q[nx+nxe*k]; q[nx+nxe*k] = 0.0; } return; } /*--------------------------------------------------------------------*/ void cmppois22(float complex q[], float complex fxy[], int isign, float complex ffc[], float ax, float ay, float affp, float *we, int nx, int ny, int kstrt, int nyv, int kxp, int nyhd) { /* this subroutine solves 2d poisson's equation in fourier space for force/charge (or convolution of electric field over particle shape) with periodic boundary conditions, for distributed data. for isign = 0, input: isign,ax,ay,affp,nx,ny,jblok,nyv,kxp,nyhd, output: ffc for isign /= 0, input: q,ffc,isign,nx,ny,nyv,kxp,jblok,nyhd, output: fxy,we approximate flop count is: 33*nxc*nyc + 15*(nxc + nyc) where nxc = (nx/2-1)/nvp, nyc = ny/2 - 1, and nvp = number of procs the equation used is: fx[ky][kx] = -sqrt(-1)*kx*g[ky][kx]*s[ky][kx]*q[ky][kx], fy[ky][kx] = -sqrt(-1)*ky*g[ky][kx]*s[ky][kx]*q[ky][kx], where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers, g[ky][kx] = (affp/(kx**2+ky**2))*s[ky][kx], s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for fx(kx=pi) = fy(kx=pi) = fx(ky=pi) = fy(ky=pi) = 0, and fx(kx=0,ky=0) = fy(kx=0,ky=0) = 0. q[k][j] = complex charge density for fourier mode (jj,k) fxy[k][j][0] = x component of complex force/charge, fxy[k][j][1] = y component of complex force/charge, for fourier mode (jj,k), where jj = j + kxp*(kstrt - 1) kxp = number of data values per block kstrt = starting data block number if isign = 0, form factor array is prepared if isign is not equal to 0, force/charge is calculated. aimag(ffc[k][j]) = finite-size particle shape factor s real(ffc[k][j])) = potential green's function g for fourier mode (jj,k), where jj = j + kxp*(kstrt - 1) ax/ay = half-width of particle in x/y direction affp = normalization constant = nx*ny/np, where np=number of particles electric field energy is also calculated, using we = nx*ny*sum((affp/(kx**2+ky**2))*|q[ky][kx]*s[ky][kx]|**2) nx/ny = system length in x/y direction nyv = first dimension of field arrays, must be >= ny nyhd = first dimension of form factor array, must be >= nyh local data */ int nxh, nyh, ks, joff, kxps, j, jj, jk, jk2, k, k1; float dnx, dny, dkx, dky, at1, at2, at3, at4; float complex zero, zt1, zt2; double wp, sum1; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; ks = kstrt - 1; joff = kxp*ks; kxps = nxh - joff; kxps = 0 > kxps ? 0 : kxps; kxps = kxp < kxps ? kxp : kxps; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; zero = 0.0 + 0.0*_Complex_I; if (isign != 0) goto L30; if (kstrt > nxh) return; /* prepare form factor array */ for (j = 0; j < kxps; j++) { dkx = dnx*(float) (j + joff); jj = nyhd*j; at1 = dkx*dkx; at2 = pow((dkx*ax),2); for (k = 0; k < nyh; k++) { dky = dny*(float) k; at3 = dky*dky + at1; at4 = exp(-.5*(pow((dky*ay),2) + at2)); if (at3==0.0) { ffc[k+jj] = affp + 1.0*_Complex_I; } else { ffc[k+jj] = (affp*at4/at3) + at4*_Complex_I; } } } return; /* calculate force/charge and sum field energy */ L30: sum1 = 0.0; if (kstrt > nxh) goto L70; /* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */ #pragma omp parallel for \ private(j,k,k1,jj,jk,jk2,dkx,at1,at2,at3,zt1,zt2,wp) \ reduction(+:sum1) for (j = 0; j < kxps; j++) { dkx = dnx*(float) (j + joff); jj = nyhd*j; jk = nyv*j; jk2 = 2*jk; wp = 0.0; if ((j+joff) > 0) { for (k = 1; k < nyh; k++) { k1 = ny - k; at1 = crealf(ffc[k+jj])*cimagf(ffc[k+jj]); at2 = dkx*at1; at3 = dny*at1*(float) k; zt1 = cimagf(q[k+jk]) - crealf(q[k+jk])*_Complex_I; zt2 = cimagf(q[k1+jk]) - crealf(q[k1+jk])*_Complex_I; fxy[2*k+jk2] = at2*zt1; fxy[1+2*k+jk2] = at3*zt1; fxy[2*k1+jk2] = at2*zt2; fxy[1+2*k1+jk2] = -at3*zt2; wp += at1*(q[k+jk]*conjf(q[k+jk]) + q[k1+jk]*conjf(q[k1+jk])); } /* mode numbers ky = 0, ny/2 */ k1 = nyh; at1 = crealf(ffc[jj])*cimagf(ffc[jj]); at3 = dkx*at1; zt1 = cimagf(q[jk]) - crealf(q[jk])*_Complex_I; fxy[jk2] = at3*zt1; fxy[1+jk2] = zero; fxy[2*k1+jk2] = zero; fxy[1+2*k1+jk2] = zero; wp += at1*(q[jk]*conjf(q[jk])); } sum1 += wp; } wp = 0.0; /* mode numbers kx = 0, nx/2 */ if (ks==0) { for (k = 1; k < nyh; k++) { k1 = ny - k; at1 = crealf(ffc[k])*cimagf(ffc[k]); at2 = dny*at1*(float) k; zt1 = cimagf(q[k]) - crealf(q[k])*_Complex_I; fxy[2*k] = zero; fxy[1+2*k] = at2*zt1; fxy[2*k1] = zero; fxy[1+2*k1] = zero; wp += at1*(q[k]*conjf(q[k])); } k1 = 2*nyh; fxy[0] = zero; fxy[1] = zero; fxy[k1] = zero; fxy[1+k1] = zero; } sum1 += wp; L70: *we = sum1*((float) nx)*((float) ny); return; } /*--------------------------------------------------------------------*/ void cwpfft2rinit(int mixup[], float complex sct[], int indx, int indy, int nxhyd, int nxyhd) { /* this subroutine calculates tables needed by a two dimensional real to complex fast fourier transform and its inverse. input: indx, indy, nxhyd, nxyhd output: mixup, sct mixup = array of bit reversed addresses sct = sine/cosine table indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy nxhyd = maximum of (nx/2,ny) nxyhd = one half of maximum of (nx,ny) written by viktor k. decyk, ucla local data */ int indx1, indx1y, nx, ny, nxy, nxhy, nxyh; int j, k, lb, ll, jb, it; float dnxy, arg; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; ny = 1L<<indy; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; /* bit-reverse index table: mixup[j] = 1 + reversed bits of j */ for (j = 0; j < nxhy; j++) { lb = j; ll = 0; for (k = 0; k < indx1y; k++) { jb = lb/2; it = lb - 2*jb; lb = jb; ll = 2*ll + it; } mixup[j] = ll + 1; } /* sine/cosine table for the angles 2*n*pi/nxy */ nxyh = nxy/2; dnxy = 6.28318530717959/(float) nxy; for (j = 0; j < nxyh; j++) { arg = dnxy*(float) j; sct[j] = cosf(arg) - sinf(arg)*_Complex_I; } return; } /*--------------------------------------------------------------------*/ void cppfft2rmxx(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int kstrt, int kypi, int kypp, int nxvh, int kypd, int nxhyd, int nxyhd) { /* this subroutine performs the x part of a two dimensional real to complex fast fourier transform and its inverse, for a subset of y, using complex arithmetic, with OpenMP, for data which is distributed in blocks for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 10)/nvp for isign = 1, approximate flop count: N*(5*log2(N) + 8)/nvp where N = (nx/2)*ny, and nvp = number of procs indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, an inverse fourier transform is performed f[m][n] = (1/nx*ny)*sum(f[k][j]*exp(-sqrt(-1)*2pi*n*j/nx) if isign = 1, a forward fourier transform is performed f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*n*j/nx) kstrt = starting data block number kypi = initial y index used kypp = number of y indices used nxvh = first dimension of f kypd = second dimension of f mixup = array of bit reversed addresses sct = sine/cosine table nxhyd = maximum of (nx/2,ny) nxyhd = one half of maximum of (nx,ny) the real data is stored in a complex array of length nx/2, ny with the odd/even x points stored in the real/imaginary parts. in complex notation, fourier coefficients are stored as follows: f[k][j] = mode j,kk, where kk = k + kyp*(kstrt - 1) 0 <= j < nx/2 and 0 <= kk < ny, except for f[k][0] = mode nx/2,kk, where ny/2+1 <= kk < ny, and imaginary part of f[0][0] = real part of mode nx/2,0 on mode kstrt=0 imaginary part of f[0][0] = real part of mode nx/2,ny/2 on mode kstrt=(ny/2)/kyp written by viktor k. decyk, ucla parallel, RISC optimized version local data */ int indx1, indx1y, nx, nxh, nxhh, ny; int nxy, nxhy, kypt, j, k, nrx; int i, m, ns, ns2, km, kmr, k1, k2, j1, j2, nrxb, joff; float ani; float complex s, t, t1; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; kypt = kypi + kypp - 1; if (kstrt > ny) return; if (isign > 0) goto L70; /* inverse fourier transform */ ani = 0.5/(((float) nx)*((float) ny)); nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,joff,s,t,t1) for (i = kypi-1; i < kypt; i++) { joff = nxvh*i; /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { t = f[j1+joff]; f[j1+joff] = f[j+joff]; f[j+joff] = t; } } /* then transform in x */ ns = 1; for (m = 0; m < indx1; m++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; s = sct[kmr*j]; t = s*f[j2+joff]; f[j2+joff] = f[j1+joff] - t; f[j1+joff] += t; } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxy/nx; for (j = 1; j < nxhh; j++) { t1 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; t = conjf(f[nxh-j+joff]); s = f[j+joff] + t; t = (f[j+joff] - t)*t1; f[j+joff] = ani*(s + t); f[nxh-j+joff] = ani*conjf(s - t); } f[joff] = 2.0*ani*((crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I); if (nxhh > 0) f[nxhh+joff] = 2.0*ani*conjf(f[nxhh+joff]); } return; /* forward fourier transform */ L70: nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,joff,s,t,t1) for (i = kypi-1; i < kypt; i++) { joff = nxvh*i; /* scramble coefficients */ kmr = nxy/nx; for (j = 1; j < nxhh; j++) { t1 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; t = conjf(f[nxh-j+joff]); s = f[j+joff] + t; t = (f[j+joff] - t)*t1; f[j+joff] = s + t; f[nxh-j+joff] = conjf(s - t); } f[joff] = (crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I; if (nxhh > 0) f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]); /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { t = f[j1+joff]; f[j1+joff] = f[j+joff]; f[j+joff] = t; } } /* then transform in x */ ns = 1; for (m = 0; m < indx1; m++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; s = conjf(sct[kmr*j]); t = s*f[j2+joff]; f[j2+joff] = f[j1+joff] - t; f[j1+joff] += t; } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void cppfft2rmxy(float complex g[], int isign, int mixup[], float complex sct[], int indx, int indy, int kstrt, int kxpi, int kxpp, int nyv, int kxp, int nxhyd, int nxyhd) { /* this subroutine performs the y part of a two dimensional real to complex fast fourier transform and its inverse, for a subset of x, using complex arithmetic, with OpenMP, for data which is distributed in blocks for isign = (-1,1), input: all, output: g for isign = -1, approximate flop count: N*(5*log2(N) + 10)/nvp for isign = 1, approximate flop count: N*(5*log2(N) + 8)/nvp where N = (nx/2)*ny, and nvp = number of procs indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, an inverse fourier transform is performed g[m][n] = sum(g[k][j]*exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, a forward fourier transform is performed g[k][j] = sum(g[m][n]*exp(sqrt(-1)*2pi*m*k/ny)) kstrt = starting data block number kxp = number of x indices per block kxpi = initial x index used kxpp = number of x indices used nyv = first dimension of g kxp = number of data values per block in x mixup = array of bit reversed addresses sct = sine/cosine table nxhyd = maximum of (nx/2,ny) nxyhd = one half of maximum of (nx,ny) the real data is stored in a complex array of length nx/2, ny with the odd/even x points stored in the real/imaginary parts. in complex notation, fourier coefficients are stored as follows: g[k][j] = mode jj,k, where jj = j + kxp*(kstrt - 1) 0 <= jj < nx/2 and 0 <= k < ny, except for g[0][k] = mode nx/2,k, where ny/2+1 <= k < ny, and imaginary part of g[0][0] = real part of mode nx/2,0 and imaginary part of g[1][ny/2] = real part of mode nx/2,ny/2 on node kstrt=0 written by viktor k. decyk, ucla parallel, RISC optimized version local data */ int indx1, indx1y, nx, nxh, ny, nyh; int nxy, nxhy, ks, kxpt, j, k, nry; int i, m, ns, ns2, km, kmr, k1, k2, j1, j2, nryb, koff; float complex s, t; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; nxh = nx/2; ny = 1L<<indy; nyh = ny/2; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; ks = kstrt - 1; kxpt = kxpi + kxpp - 1; if (kstrt > nxh) return; if (isign > 0) goto L70; /* inverse fourier transform */ nryb = nxhy/ny; nry = nxy/ny; #pragma omp parallel for \ private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,koff,s,t) for (i = kxpi-1; i < kxpt; i++) { koff = nyv*i; /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { k1 = (mixup[k] - 1)/nryb; if (k < k1) { t = g[k1+koff]; g[k1+koff] = g[k+koff]; g[k+koff] = t; } } /* then transform in y */ ns = 1; for (m = 0; m < indy; m++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; s = sct[kmr*j]; t = s*g[j2+koff]; g[j2+koff] = g[j1+koff] - t; g[j1+koff] += t; } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ if ((ks==0) && (kxpi==1)) { for (k = 1; k < nyh; k++) { s = g[ny-k]; g[ny-k] = 0.5*(cimagf(g[k] + s) + crealf(g[k] - s)*_Complex_I); g[k] = 0.5*(crealf(g[k] + s) + cimagf(g[k] - s)*_Complex_I); } } return; /* forward fourier transform */ L70: nryb = nxhy/ny; nry = nxy/ny; /* scramble modes kx = 0, nx/2 */ if ((ks==0) && (kxpi==1)) { for (k = 1; k < nyh; k++) { s = cimagf(g[ny-k]) + crealf(g[ny-k])*_Complex_I; g[ny-k] = conjf(g[k] - s); g[k] += s; } } #pragma omp parallel for \ private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,koff,s,t) for (i = kxpi-1; i < kxpt; i++) { koff = nyv*i; /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { k1 = (mixup[k] - 1)/nryb; if (k < k1) { t = g[k1+koff]; g[k1+koff] = g[k+koff]; g[k+koff] = t; } } /* then transform in y */ ns = 1; for (m = 0; m < indy; m++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; s = conjf(sct[kmr*j]); t = s*g[j2+koff]; g[j2+koff] = g[j1+koff] - t; g[j1+koff] += t; } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void cppfft2rm2xx(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int kstrt, int kypi, int kypp, int nxvh, int kypd, int nxhyd, int nxyhd) { /* this subroutine performs the x part of 2 two dimensional real to complex fast fourier transforms and their inverses, for a subset of y, using complex arithmetic, with OpenMP, for data which is distributed in blocks for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 10)/nvp for isign = 1, approximate flop count: N*(5*log2(N) + 8)/nvp where N = (nx/2)*ny, and nvp = number of procs indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, an inverse fourier transform is performed f[m][n][0:1] = (1/nx*ny)*sum(f[k][j][0:1]*exp(-sqrt(-1)*2pi*n*j/nx) if isign = 1, a forward fourier transform is performed f[k][j][0:1] = sum(f[m][n][0:1]*exp(sqrt(-1)*2pi*n*j/nx)* kstrt = starting data block number kypi = initial y index used kypp = number of y indices used nxvh = first dimension of f kypd = second dimension of f mixup = array of bit reversed addresses sct = sine/cosine table nxhyd = maximum of (nx/2,ny) nxyhd = one half of maximum of (nx,ny) the real data is stored in a complex array of length nx/2, ny with the odd/even x points stored in the real/imaginary parts. in complex notation, fourier coefficients are stored as follows: f[k][j][0:1] = mode j,kk, where kk = k + kyp*(kstrt - 1) 0 <= j < nx/2 and 0 <= kk < ny, except for f[k][0][0:1] = mode nx/2,kk, where ny/2+1 <= kk < ny, and imaginary part of f[0][0][0:1] = real part of mode nx/2,0 on mode kstrt=0 imaginary part of f[0][0][0:1] = real part of mode nx/2,ny/2 on mode kstrt=(ny/2)/kyp written by viktor k. decyk, ucla parallel, RISC optimized version local data */ int indx1, indx1y, nx, nxh, nxhh, ny; int nxy, nxhy, kypt, j, k, nrx; int i, m, ns, ns2, km, kmr, k1, k2, j1, j2, nrxb, joff; float ani, at1; float complex s, t, t1, t2; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; kypt = kypi + kypp - 1; if (kstrt > ny) return; if (isign > 0) goto L100; /* inverse fourier transform */ ani = 0.5/(((float) nx)*((float) ny)); nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,joff,at1,s,t,t1,t2) for (i = kypi-1; i < kypt; i++) { joff = 2*nxvh*i; /* swap complex components */ for (j = 0; j < nxh; j++) { at1 = cimagf(f[2*j+joff]); f[2*j+joff] = crealf(f[2*j+joff]) + crealf(f[1+2*j+joff])*_Complex_I; f[1+2*j+joff] = at1 + cimagf(f[1+2*j+joff])*_Complex_I; } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { t1 = f[2*j1+joff]; t2 = f[1+2*j1+joff]; f[2*j1+joff] = f[2*j+joff]; f[1+2*j1+joff] = f[1+2*j+joff]; f[2*j+joff] = t1; f[1+2*j+joff] = t2; } } /* then transform in x */ ns = 1; for (m = 0; m < indx1; m++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; s = sct[kmr*j]; t1 = s*f[2*j2+joff]; t2 = s*f[1+2*j2+joff]; f[2*j2+joff] = f[2*j1+joff] - t1; f[1+2*j2+joff] = f[1+2*j1+joff] - t2; f[2*j1+joff] += t1; f[1+2*j1+joff] += t2; } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxy/nx; for (j = 1; j < nxhh; j++) { t1 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; for (k = 0; k < 2; k++) { t = conjf(f[k+2*(nxh-j)+joff]); s = f[k+2*j+joff] + t; t = (f[k+2*j+joff] - t)*t1; f[k+2*j+joff] = ani*(s + t); f[k+2*(nxh-j)+joff] = ani*conjf(s - t); } } for (k = 0; k < 2; k++) { f[k+joff] = 2.0*ani*((crealf(f[k+joff]) + cimagf(f[k+joff])) + (crealf(f[k+joff]) - cimagf(f[k+joff]))*_Complex_I); if (nxhh > 0) f[k+2*nxhh+joff] = 2.0*ani*conjf(f[k+2*nxhh+joff]); } } return; /* forward fourier transform */ L100: nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,joff,at1,s,t,t1,t2) for (i = kypi-1; i < kypt; i++) { joff = 2*nxvh*i; /* scramble coefficients */ kmr = nxy/nx; for (j = 1; j < nxhh; j++) { t1 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; for (k = 0; k < 2; k++) { t = conjf(f[k+2*(nxh-j)+joff]); s = f[k+2*j+joff] + t; t = (f[k+2*j+joff] - t)*t1; f[k+2*j+joff] = s + t; f[k+2*(nxh-j)+joff] = conjf(s - t); } } for (k = 0; k < 2; k++) { f[k+joff] = (crealf(f[k+joff]) + cimagf(f[k+joff])) + (crealf(f[k+joff]) - cimagf(f[k+joff]))*_Complex_I; if (nxhh > 0) f[k+2*nxhh+joff] = 2.0*conjf(f[k+2*nxhh+joff]); } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { t1 = f[2*j1+joff]; t2 = f[1+2*j1+joff]; f[2*j1+joff] = f[2*j+joff]; f[1+2*j1+joff] = f[1+2*j+joff]; f[2*j+joff] = t1; f[1+2*j+joff] = t2; } } /* then transform in x */ ns = 1; for (m = 0; m < indx1; m++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; s = conjf(sct[kmr*j]); t1 = s*f[2*j2+joff]; t2 = s*f[1+2*j2+joff]; f[2*j2+joff] = f[2*j1+joff] - t1; f[1+2*j2+joff] = f[1+2*j1+joff] - t2; f[2*j1+joff] += t1; f[1+2*j1+joff] += t2; } } ns = ns2; } /* swap complex components */ for (j = 0; j < nxh; j++) { at1 = cimagf(f[2*j+joff]); f[2*j+joff] = crealf(f[2*j+joff]) + crealf(f[1+2*j+joff])*_Complex_I; f[1+2*j+joff] = at1 + cimagf(f[1+2*j+joff])*_Complex_I; } } return; } /*--------------------------------------------------------------------*/ void cppfft2rm2xy(float complex g[], int isign, int mixup[], float complex sct[], int indx, int indy, int kstrt, int kxpi, int kxpp, int nyv, int kxp, int nxhyd, int nxyhd) { /* this subroutine performs the y part of 2 two dimensional real to complex fast fourier transforms and their inverses, for a subset of x, using complex arithmetic, with OpenMP, for data which is distributed in blocks for isign = (-1,1), input: all, output: g for isign = -1, approximate flop count: N*(5*log2(N) + 10)/nvp for isign = 1, approximate flop count: N*(5*log2(N) + 8)/nvp where N = (nx/2)*ny, and nvp = number of procs indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, an inverse fourier transform is performed g[n][m][0:1] = sum(g[j][k][0:1]*exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, a forward fourier transform is performed g[j][k][0:1] = sum(g[n][m][0:1]*exp(sqrt(-1)*2pi*m*k/ny)) kstrt = starting data block number kxpi = initial x index used kxpp = number of x indices used nyv = first dimension of g kxp = number of data values per block in x mixup = array of bit reversed addresses sct = sine/cosine table nxhyd = maximum of (nx/2,ny) nxyhd = one half of maximum of (nx,ny) the real data is stored in a complex array of length nx/2, ny with the odd/even x points stored in the real/imaginary parts. in complex notation, fourier coefficients are stored as follows: g[j][k][0:1] = mode jj,k, where jj = j + kxp*(kstrt - 1) 0 <= jj < nx/2 and 0 <= k < ny, except for g[0][k][0:1] = mode nx/2,k, where ny/2+1 <= k < ny, and imaginary part of g[0][0][0:1] = real part of mode nx/2,0 and imaginary part of g[0][ny/2][0:1] = real part of mode nx/2,ny/2 on node kstrt=0 written by viktor k. decyk, ucla parallel, RISC optimized version local data */ int indx1, indx1y, nx, nxh, ny, nyh; int nxy, nxhy, ks, kxpt, j, k, nry; int i, m, ns, ns2, km, kmr, k1, k2, j1, j2, nryb, koff; float complex s, t1, t2; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; nxh = nx/2; ny = 1L<<indy; nyh = ny/2; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; ks = kstrt - 1; kxpt = kxpi + kxpp - 1; if (kstrt > nxh) return; if (isign > 0) goto L80; /* inverse fourier transform */ nryb = nxhy/ny; nry = nxy/ny; #pragma omp parallel for \ private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,koff,s,t1,t2) for (i = kxpi-1; i < kxpt; i++) { koff = 2*nyv*i; /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { k1 = (mixup[k] - 1)/nryb; if (k < k1) { t1 = g[2*k1+koff]; t2 = g[1+2*k1+koff]; g[2*k1+koff] = g[2*k+koff]; g[1+2*k1+koff] = g[1+2*k+koff]; g[2*k+koff] = t1; g[1+2*k+koff] = t2; } } /* then transform in y */ ns = 1; for (m = 0; m < indy; m++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; s = sct[kmr*j]; t1 = s*g[2*j2+koff]; t2 = s*g[1+2*j2+koff]; g[2*j2+koff] = g[2*j1+koff] - t1; g[1+2*j2+koff] = g[1+2*j1+koff] - t2; g[2*j1+koff] += t1; g[1+2*j1+koff] += t2; } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ if ((ks==0) && (kxpi==1)) { for (k = 1; k < nyh; k++) { for (j = 0; j < 2; j++) { s = g[j+2*(ny-k)]; g[j+2*(ny-k)] = 0.5*(cimagf(g[j+2*k] + s) + crealf(g[j+2*k] - s)*_Complex_I); g[j+2*k] = 0.5*(crealf(g[j+2*k] + s) + cimagf(g[j+2*k] - s)*_Complex_I); } } } return; /* forward fourier transform */ L80: nryb = nxhy/ny; nry = nxy/ny; /* scramble modes kx = 0, nx/2 */ if ((ks==0) && (kxpi==1)) { for (k = 1; k < nyh; k++) { for (j = 0; j < 2; j++) { s = cimagf(g[j+2*(ny-k)]) + crealf(g[j+2*(ny-k)])*_Complex_I; g[j+2*(ny-k)] = conjf(g[j+2*k] - s); g[j+2*k] += s; } } } #pragma omp parallel for \ private(i,j,k,m,ns,ns2,km,kmr,k1,k2,j1,j2,koff,s,t1,t2) for (i = kxpi-1; i < kxpt; i++) { koff = 2*nyv*i; /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { k1 = (mixup[k] - 1)/nryb; if (k < k1) { t1 = g[2*k1+koff]; t2 = g[1+2*k1+koff]; g[2*k1+koff] = g[2*k+koff]; g[1+2*k1+koff] = g[1+2*k+koff]; g[2*k+koff] = t1; g[1+2*k+koff] = t2; } } /* then transform in y */ ns = 1; for (m = 0; m < indy; m++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; s = conjf(sct[kmr*j]); t1 = s*g[2*j2+koff]; t2 = s*g[1+2*j2+koff]; g[2*j2+koff] = g[2*j1+koff] - t1; g[1+2*j2+koff] = g[1+2*j1+koff] - t2; g[2*j1+koff] += t1; g[1+2*j1+koff] += t2; } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void cwppfft2rm(float complex f[], float complex g[], float complex bs[], float complex br[], int isign, int ntpose, int mixup[], float complex sct[], float *ttp, int indx, int indy, int kstrt, int nvp, int nxvh, int nyv, int kxp, int kyp, int kypd, int nxhyd, int nxyhd) { /* wrapper function for parallel real to complex fft */ /* parallelized with OpenMP */ /* local data */ int nxh, ny, ks, kxpp, kypp; static int kxpi = 1, kypi = 1; float tf; double dtime; /* calculate range of indices */ nxh = 1L<<(indx - 1); ny = 1L<<indy; ks = kstrt - 1; kxpp = nxh - kxp*ks; kxpp = 0 > kxpp ? 0 : kxpp; kxpp = kxp < kxpp ? kxp : kxpp; kypp = ny - kyp*ks; kypp = 0 > kypp ? 0 : kypp; kypp = kyp < kypp ? kyp : kypp; /* inverse fourier transform */ if (isign < 0) { /* perform x fft */ cppfft2rmxx(f,isign,mixup,sct,indx,indy,kstrt,kypi,kypp,nxvh,kypd, nxhyd,nxyhd); /* transpose f array to g */ cpwtimera(-1,ttp,&dtime); cpptpose(f,g,bs,br,nxh,ny,kxp,kyp,kstrt,nvp,nxvh,nyv,kxp,kypd); cpwtimera(1,ttp,&dtime); /* perform y fft */ cppfft2rmxy(g,isign,mixup,sct,indx,indy,kstrt,kxpi,kxpp,nyv,kxp, nxhyd,nxyhd); /* transpose g array to f */ if (ntpose==0) { cpwtimera(-1,&tf,&dtime); cpptpose(g,f,br,bs,ny,nxh,kyp,kxp,kstrt,nvp,nyv,nxvh,kypd,kxp); cpwtimera(1,&tf,&dtime); } } /* forward fourier transform */ else if (isign > 0) { /* transpose f array to g */ if (ntpose==0) { cpwtimera(-1,&tf,&dtime); cpptpose(f,g,bs,br,nxh,ny,kxp,kyp,kstrt,nvp,nxvh,nyv,kxp,kypd); cpwtimera(1,&tf,&dtime); } /* perform y fft */ cppfft2rmxy(g,isign,mixup,sct,indx,indy,kstrt,kxpi,kxpp,nyv,kxp, nxhyd,nxyhd); /* transpose g array to f */ cpwtimera(-1,ttp,&dtime); cpptpose(g,f,br,bs,ny,nxh,kyp,kxp,kstrt,nvp,nyv,nxvh,kypd,kxp); cpwtimera(1,ttp,&dtime); /* perform x fft */ cppfft2rmxx(f,isign,mixup,sct,indx,indy,kstrt,kypi,kypp,nxvh,kypd, nxhyd,nxyhd); } if (ntpose==0) *ttp += tf; return; } /*--------------------------------------------------------------------*/ void cwppfft2rm2(float complex f[], float complex g[], float complex bs[], float complex br[], int isign, int ntpose, int mixup[], float complex sct[], float *ttp, int indx, int indy, int kstrt, int nvp, int nxvh, int nyv, int kxp, int kyp, int kypd, int nxhyd, int nxyhd) { /* wrapper function for parallel real to complex fft */ /* parallelized with OpenMP */ /* local data */ int nxh, ny, ks, kxpp, kypp; static int kxpi = 1, kypi = 1; float tf; double dtime; /* calculate range of indices */ nxh = 1L<<(indx - 1); ny = 1L<<indy; ks = kstrt - 1; kxpp = nxh - kxp*ks; kxpp = 0 > kxpp ? 0 : kxpp; kxpp = kxp < kxpp ? kxp : kxpp; kypp = ny - kyp*ks; kypp = 0 > kypp ? 0 : kypp; kypp = kyp < kypp ? kyp : kypp; /* inverse fourier transform */ if (isign < 0) { /* perform x fft */ cppfft2rm2xx(f,isign,mixup,sct,indx,indy,kstrt,kypi,kypp,nxvh, kypd,nxhyd,nxyhd); /* transpose f array to g */ cpwtimera(-1,ttp,&dtime); cppntpose(f,g,bs,br,nxh,ny,kxp,kyp,kstrt,nvp,2,nxvh,nyv,kxp,kypd); cpwtimera(1,ttp,&dtime); /* perform y fft */ cppfft2rm2xy(g,isign,mixup,sct,indx,indy,kstrt,kxpi,kxpp,nyv,kxp, nxhyd,nxyhd); /* transpose g array to f */ if (ntpose==0) { cpwtimera(-1,&tf,&dtime); cppntpose(g,f,br,bs,ny,nxh,kyp,kxp,kstrt,nvp,2,nyv,nxvh,kypd, kxp); cpwtimera(1,&tf,&dtime); } } /* forward fourier transform */ else if (isign > 0) { /* transpose f array to g */ if (ntpose==0) { cpwtimera(-1,&tf,&dtime); cppntpose(f,g,bs,br,nxh,ny,kxp,kyp,kstrt,nvp,2,nxvh,nyv,kxp, kypd); cpwtimera(1,&tf,&dtime); } /* perform y fft */ cppfft2rm2xy(g,isign,mixup,sct,indx,indy,kstrt,kxpi,kxpp,nyv,kxp, nxhyd,nxyhd); /* transpose g array to f */ cpwtimera(-1,ttp,&dtime); cppntpose(g,f,br,bs,ny,nxh,kyp,kxp,kstrt,nvp,2,nyv,nxvh,kypd,kxp); cpwtimera(1,ttp,&dtime); /* perform x fft */ cppfft2rm2xx(f,isign,mixup,sct,indx,indy,kstrt,kypi,kypp,nxvh, kypd,nxhyd,nxyhd); } if (ntpose==0) *ttp += tf; return; } /*--------------------------------------------------------------------*/ void cpppcopyout(float part[], float ppart[], int kpic[], int *npp, int npmax, int nppmx, int idimp, int mxyp1, int *irc) { /* for 2d code, this subroutine copies segmented particle data ppart to the array part with original tiled layout spatial decomposition in y direction input: all except part, npp, irc, output: part, npp, irc part[j][i] = i-th coordinate for particle j ppart[k][j][i] = i-th coordinate for particle j in tile k kpic = number of particles per tilees npp = number of particles in partition npmax = maximum number of particles in each partition nppmx = maximum number of particles in tile idimp = size of phase space = 5 mxyp1 = total number of tiles in partition irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int i, j, k, npoff, nppp, ne, ierr; npoff = 0; ierr = 0; /* loop over tiles */ for (k = 0; k < mxyp1; k++) { nppp = kpic[k]; ne = nppp + npoff; if (ne > npmax) ierr = ierr > ne-npmax ? ierr : ne-npmax; if (ierr > 0) nppp = 0; /* loop over particles in tile */ for (j = 0; j < nppp; j++) { for (i = 0; i < idimp; i++) { part[i+idimp*(j+npoff)] = ppart[i+idimp*(j+nppmx*k)]; } } npoff += nppp; } *npp = npoff; if (ierr > 0) *irc = ierr; return; } /* Interfaces to Fortran */ /*--------------------------------------------------------------------*/ void cpdicomp2l_(float *edges, int *nyp, int *noff, int *nypmx, int *nypmn, int *ny, int *kstrt, int *nvp, int *idps) { cpdicomp2l(edges,nyp,noff,nypmx,nypmn,*ny,*kstrt,*nvp,*idps); return; } /*--------------------------------------------------------------------*/ void cpdistr2_(float *part, float *edges, int *npp, int *nps, float *vtx, float *vty, float *vdx, float *vdy, int *npx, int *npy, int *nx, int *ny, int *idimp, int *npmax, int *idps, int *ipbc, int *ierr) { cpdistr2(part,edges,npp,*nps,*vtx,*vty,*vdx,*vdy,*npx,*npy,*nx,*ny, *idimp,*npmax,*idps,*ipbc,ierr); return; } /*--------------------------------------------------------------------*/ void cppdblkp2l_(float *part, int *kpic, int *npp, int *noff, int *nppmx, int *idimp, int *npmax, int *mx, int *my, int *mx1,int *mxyp1, int *irc) { cppdblkp2l(part,kpic,*npp,*noff,nppmx,*idimp,*npmax,*mx,*my,*mx1, *mxyp1,irc); return; } /*--------------------------------------------------------------------*/ void cpppmovin2l_(float *part, float *ppart, int *kpic, int *npp, int *noff, int *nppmx, int *idimp, int *npmax, int *mx, int *my, int *mx1, int *mxyp1, int *irc) { cpppmovin2l(part,ppart,kpic,*npp,*noff,*nppmx,*idimp,*npmax,*mx,*my, *mx1,*mxyp1,irc); return; } /*--------------------------------------------------------------------*/ void cpppcheck2l_(float *ppart, int *kpic, int *noff, int *nyp, int *idimp, int *nppmx, int *nx, int *mx, int *my, int *mx1, int *myp1, int *irc) { cpppcheck2l(ppart,kpic,*noff,*nyp,*idimp,*nppmx,*nx,*mx,*my,*mx1, *myp1,irc); return; } /*--------------------------------------------------------------------*/ void cppgppush2l_(float *ppart, float *fxy, int *kpic, int *noff, int *nyp, float *qbm, float *dt, float *ek, int *nx, int *ny, int *mx, int *my, int *idimp, int *nppmx, int *nxv, int *nypmx, int *mx1, int *mxyp1, int *ipbc) { cppgppush2l(ppart,fxy,kpic,*noff,*nyp,*qbm,*dt,ek,*nx,*ny,*mx,*my, *idimp,*nppmx,*nxv,*nypmx,*mx1,*mxyp1,*ipbc); return; } /*--------------------------------------------------------------------*/ void cppgppushf2l_(float *ppart, float *fxy, int *kpic, int *ncl, int *ihole, int *noff, int *nyp, float *qbm, float *dt, float *ek, int *nx, int *ny, int *mx, int *my, int *idimp, int *nppmx, int *nxv, int *nypmx, int *mx1, int *mxyp1, int *ntmax, int *irc) { cppgppushf2l(ppart,fxy,kpic,ncl,ihole,*noff,*nyp,*qbm,*dt,ek,*nx,*ny, *mx,*my,*idimp,*nppmx,*nxv,*nypmx,*mx1,*mxyp1,*ntmax, irc); return; } /*--------------------------------------------------------------------*/ void cppgppost2l_(float *ppart, float *q, int *kpic, int *noff, float *qm, int *idimp, int *nppmx, int *mx, int *my, int *nxv, int *nypmx, int *mx1, int *mxyp1) { cppgppost2l(ppart,q,kpic,*noff, *qm,*idimp,*nppmx,*mx,*my,*nxv, *nypmx,*mx1,*mxyp1); return; } /*--------------------------------------------------------------------*/ void cppporder2la_(float *ppart, float *ppbuff, float *sbufl, float *sbufr, int *kpic, int *ncl, int *ihole, int *ncll, int *nclr, int *noff, int *nyp, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *mx1, int *myp1, int *npbmx, int *ntmax, int *nbmax, int *irc) { cppporder2la(ppart,ppbuff,sbufl,sbufr,kpic,ncl,ihole,ncll,nclr,*noff, *nyp,*idimp,*nppmx,*nx,*ny,*mx,*my,*mx1,*myp1,*npbmx, *ntmax,*nbmax,irc); return; } /*--------------------------------------------------------------------*/ void cppporderf2la_(float *ppart, float *ppbuff, float *sbufl, float *sbufr, int *ncl, int *ihole, int *ncll, int *nclr, int *idimp, int *nppmx, int *mx1, int *myp1, int *npbmx, int *ntmax, int *nbmax, int *irc) { cppporderf2la(ppart,ppbuff,sbufl,sbufr,ncl,ihole,ncll,nclr,*idimp, *nppmx,*mx1,*myp1,*npbmx,*ntmax,*nbmax,irc); return; } /*--------------------------------------------------------------------*/ void cppporder2lb_(float *ppart, float *ppbuff, float *rbufl, float *rbufr, int *kpic, int *ncl, int *ihole, int *mcll, int *mclr, int *idimp, int *nppmx, int *mx1, int *myp1, int *npbmx, int *ntmax, int *nbmax, int *irc) { cppporder2lb(ppart,ppbuff,rbufl,rbufr,kpic,ncl,ihole,mcll,mclr, *idimp,*nppmx,*mx1,*myp1,*npbmx,*ntmax,*nbmax,irc); return; } /*--------------------------------------------------------------------*/ void cppcguard2xl_(float *fxy, int *nyp, int *nx, int *ndim, int *nxe, int *nypmx) { cppcguard2xl(fxy,*nyp,*nx,*ndim,*nxe,*nypmx); return; } /*--------------------------------------------------------------------*/ void cppaguard2xl_(float *q, int *nyp, int *nx, int *nxe, int *nypmx) { cppaguard2xl(q,*nyp,*nx,*nxe,*nypmx); return; } /*--------------------------------------------------------------------*/ void cmppois22_(float complex *q, float complex *fxy, int *isign, float complex *ffc, float *ax, float *ay, float *affp, float *we, int *nx, int *ny, int *kstrt, int *nyv, int *kxp, int *nyhd) { cmppois22(q,fxy,*isign,ffc,*ax,*ay,*affp,we,*nx,*ny,*kstrt,*nyv,*kxp, *nyhd); return; } /*--------------------------------------------------------------------*/ void cwpfft2rinit_(int *mixup, float complex *sct, int *indx, int *indy, int *nxhyd, int *nxyhd) { cwpfft2rinit(mixup,sct,*indx,*indy,*nxhyd,*nxyhd); return; } /*--------------------------------------------------------------------*/ void cppfft2rmxx_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *kstrt, int *kypi, int *kypp, int *nxvh, int *kypd, int *nxhyd, int *nxyhd) { cppfft2rmxx(f,*isign,mixup,sct,*indx,*indy,*kstrt,*kypi,*kypp,*nxvh, *kypd,*nxhyd,*nxyhd); return; } /*--------------------------------------------------------------------*/ void cppfft2rmxy_(float complex *g, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *kstrt, int *kxpi, int *kxpp, int *nyv, int *kxp, int *nxhyd, int *nxyhd) { cppfft2rmxy(g,*isign,mixup,sct,*indx,*indy,*kstrt,*kxpi,*kxpp,*nyv, *kxp,*nxhyd,*nxyhd); return; } /*--------------------------------------------------------------------*/ void cppfft2rm2xx_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *kstrt, int *kypi, int *kypp, int *nxvh, int *kypd, int *nxhyd, int *nxyhd) { cppfft2rm2xx(f,*isign,mixup,sct,*indx,*indy,*kstrt,*kypi,*kypp,*nxvh, *kypd,*nxhyd,*nxyhd); return; } /*--------------------------------------------------------------------*/ void cppfft2rm2xy_(float complex *g, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *kstrt, int *kxpi, int *kxpp, int *nyv, int *kxp, int *nxhyd, int *nxyhd) { cppfft2rm2xy(g,*isign,mixup,sct,*indx,*indy,*kstrt,*kxpi,*kxpp,*nyv, *kxp,*nxhyd,*nxyhd); return; } /*--------------------------------------------------------------------*/ void cwppfft2rm_(float complex *f, float complex *g, float complex *bs, float complex *br, int *isign, int *ntpose, int *mixup, float complex *sct, float *ttp, int *indx, int *indy, int *kstrt, int *nvp, int *nxvh, int *nyv, int *kxp, int *kyp, int *kypd, int *nxhyd, int *nxyhd) { cwppfft2rm(f,g,bs,br,*isign,*ntpose,mixup,sct,ttp,*indx,*indy,*kstrt, *nvp,*nxvh,*nyv,*kxp,*kyp,*kypd,*nxhyd,*nxyhd); return; } /*--------------------------------------------------------------------*/ void cwppfft2rm2_(float complex *f, float complex *g, float complex *bs, float complex *br, int *isign, int *ntpose, int *mixup, float complex *sct, float *ttp, int *indx, int *indy, int *kstrt, int *nvp, int *nxvh, int *nyv, int *kxp, int *kyp, int *kypd, int *nxhyd, int *nxyhd) { cwppfft2rm2(f,g,bs,br,*isign,*ntpose,mixup,sct,ttp,*indx,*indy, *kstrt,*nvp,*nxvh,*nyv,*kxp,*kyp,*kypd,*nxhyd,*nxyhd); return; }
ch_ompss.c
#include "ch_common.h" #include "../extrae.h" #include "../timing.h" /** * TODO: What is the lower bound for a circular deadlock? (0 waits for 1 waits for 2 waits for 0) * Example: Execution order on 1 is reversed: * 0 waits for 1/2/3, * 1 waits for 3/2/0, * 2 waits for 0/1/3, * 3 waits for 0/1/2 * OR * 0 waits for 1/2/3/4, * 1 waits for 0/2/3/4, * 2 waits for 4/3/2/0, * 3 waits for 0/1/2/4, * 4 waits for 0/1/2/3 * OR * 0 waits for 1/2/3/4/5, * 1 waits for 0/2/3/4/5, * 2 waits for 5/4/3/2/0, * 3 waits for 0/1/2/4/5, * 4 waits for 0/1/2/3/5, * 5 waits for 0/1/2/3/5 * * NOTE: circular dependencies may happen if at least one of the inner ranks * (1 or 2, not 0 or 3) reverse their order * HYPOTHESIS: we need at least (p-(p/2)) (ceil(0.5p)) threads to avoid deadlock from reversal * Generalization to some ordered graph traversal problem? */ void cholesky_mpi(const int ts, const int nt, double *A[nt][nt], double *B, double *C[nt], int *block_rank) { int *send_blocks = malloc((nt) * sizeof(int)); int *recv_blocks = malloc((nt) * sizeof(int)); REGISTER_EXTRAE(); #pragma omp parallel { #pragma omp single { INIT_TIMING(omp_get_num_threads()); char dst_sentinels[np]; START_TIMING(TIME_TOTAL); { START_TIMING(TIME_CREATE); for (int k = 0; k < nt; k++) { if (block_rank[k*nt+k] == mype) { #pragma omp task out(A[k][k]) firstprivate(k) no_copy_deps { EXTRAE_ENTER(EVENT_POTRF); START_TIMING(TIME_POTRF); omp_potrf(A[k][k], ts, ts); END_TIMING(TIME_POTRF); EXTRAE_EXIT(EVENT_POTRF); } } if (block_rank[k*nt+k] == mype && np != 1) { #pragma omp task in(A[k][k]) firstprivate(k) no_copy_deps untied { START_TIMING(TIME_COMM); MPI_Request *reqs = NULL; int nreqs = 0; char send_flags[np]; reset_send_flags(send_flags); for (int kk = k+1; kk < nt; kk++) { if (!send_flags[block_rank[k*nt+kk]]) { ++nreqs; send_flags[block_rank[k*nt+kk]] = 1; } } reqs = malloc(sizeof(MPI_Request)*nreqs); nreqs = 0; for (int dst = 0; dst < np; dst++) { if (send_flags[dst] && dst != mype) { MPI_Request send_req; MPI_Isend(A[k][k], ts*ts, MPI_DOUBLE, dst, k*nt+k, MPI_COMM_WORLD, &send_req); reqs[nreqs++] = send_req; } } waitall(reqs, nreqs); free(reqs); END_TIMING(TIME_COMM); } } if (block_rank[k*nt+k] != mype) { #pragma omp task out(B) firstprivate(k) no_copy_deps untied { START_TIMING(TIME_COMM); int recv_flag = 0; for (int i = k + 1; i < nt; i++) { if (block_rank[k*nt+i] == mype) { recv_flag = 1; break; } } if (recv_flag) { MPI_Request recv_req; MPI_Irecv(B, ts*ts, MPI_DOUBLE, block_rank[k*nt+k], k*nt+k, MPI_COMM_WORLD, &recv_req); waitall(&recv_req, 1); } END_TIMING(TIME_COMM); } } for (int i = k + 1; i < nt; i++) { if (block_rank[k*nt+i] == mype) { if (block_rank[k*nt+k] == mype) { #pragma omp task in(A[k][k]) out(A[k][i]) firstprivate(k, i) no_copy_deps { EXTRAE_ENTER(EVENT_TRSM); START_TIMING(TIME_TRSM); omp_trsm(A[k][k], A[k][i], ts, ts); END_TIMING(TIME_TRSM); EXTRAE_EXIT(EVENT_TRSM); } } else { #pragma omp task in(B) out(A[k][i]) firstprivate(k, i) no_copy_deps { EXTRAE_ENTER(EVENT_TRSM); START_TIMING(TIME_TRSM); omp_trsm(B, A[k][i], ts, ts); END_TIMING(TIME_TRSM); EXTRAE_EXIT(EVENT_TRSM); } } } } for (int dst = 0; dst < np; dst++) { if (dst == mype) continue; int send_cnt = 0; int recv_cnt = 0; // populate list of blocks to send/recv to/from this unit for (int i = k + 1; i < nt; i++) { if (block_rank[k*nt+i] == mype && np != 1) { int send_flag = 0; for (int ii = k + 1; ii < i; ii++) { if (!send_flag && block_rank[ii*nt+i] == dst) { send_flag = 1; break; } } for (int ii = i + 1; ii < nt; ii++) { if (!send_flag && block_rank[i*nt+ii] == dst) { send_flag = 1; break; } } if (!send_flag && block_rank[i*nt+i] == dst) send_flag = 1; if (send_flag) { send_blocks[send_cnt++] = i; } } if (block_rank[k*nt+i] != mype && block_rank[k*nt+i] == dst) { int recv_flag = 0; for (int ii = k + 1; ii < i; ii++) { if (block_rank[ii*nt+i] == mype) recv_flag = 1; } for (int ii = i + 1; ii < nt; ii++) { if (block_rank[i*nt+ii] == mype) recv_flag = 1; } if (block_rank[i*nt+i] == mype) recv_flag = 1; if (recv_flag) { recv_blocks[recv_cnt++] = i; } } } //printf("send_cnt: %d, recv_cnt: %d, blocks: %d\n", send_cnt, recv_cnt, (nt-(k+1))); // NOTE: we have to wait for all of the above tasks using comm_sentinel // dependency iterators might help here #pragma omp task no_copy_deps firstprivate(k, dst) out({C[recv_blocks[it]], it=0;recv_cnt}) in({A[k][send_blocks[it]], it=0;send_cnt}) untied { START_TIMING(TIME_COMM); int nreqs = 0; // upper bound in case all our blocks have to be sent int max_req = (nt-k); MPI_Request *reqs = malloc(sizeof(*reqs)*max_req); for (int i = k + 1; i < nt; i++) { if (block_rank[k*nt+i] == mype && np != 1) { int send_flag = 0; for (int ii = k + 1; ii < i; ii++) { if (!send_flag && block_rank[ii*nt+i] == dst) { send_flag = 1; } } for (int ii = i + 1; ii < nt; ii++) { if (!send_flag && block_rank[i*nt+ii] == dst) { send_flag = 1; } } if (!send_flag && block_rank[i*nt+i] == dst) send_flag = 1; if (send_flag) { MPI_Request send_req; MPI_Isend(A[k][i], ts*ts, MPI_DOUBLE, dst, k*nt+i, MPI_COMM_WORLD, &send_req); reqs[nreqs++] = send_req; } } if (block_rank[k*nt+i] != mype && block_rank[k*nt+i] == dst) { int recv_flag = 0; for (int ii = k + 1; ii < i; ii++) { if (block_rank[ii*nt+i] == mype) recv_flag = 1; } for (int ii = i + 1; ii < nt; ii++) { if (block_rank[i*nt+ii] == mype) recv_flag = 1; } if (block_rank[i*nt+i] == mype) recv_flag = 1; if (recv_flag) { MPI_Request recv_req; MPI_Irecv(C[i], ts*ts, MPI_DOUBLE, block_rank[k*nt+i], k*nt+i, MPI_COMM_WORLD, &recv_req); reqs[nreqs++] = recv_req; } } } //printf("Waiting for trsm blocks from %d in k=%d\n", dst, k); waitall(reqs, nreqs); free(reqs); END_TIMING(TIME_COMM); } } for (int i = k + 1; i < nt; i++) { for (int j = k + 1; j < i; j++) { if (block_rank[j*nt+i] == mype) { if (block_rank[k*nt+i] == mype && block_rank[k*nt+j] == mype) { #pragma omp task in(A[k][i], A[k][j]) out(A[j][i]) firstprivate(k, j, i) no_copy_deps { EXTRAE_ENTER(EVENT_GEMM); START_TIMING(TIME_GEMM); omp_gemm(A[k][i], A[k][j], A[j][i], ts, ts); END_TIMING(TIME_GEMM); EXTRAE_EXIT(EVENT_GEMM); } } else if (block_rank[k*nt+i] != mype && block_rank[k*nt+j] == mype) { #pragma omp task in(A[k][j], C[i]) out(A[j][i]) firstprivate(k, j, i) no_copy_deps { EXTRAE_ENTER(EVENT_GEMM); START_TIMING(TIME_GEMM); omp_gemm(C[i], A[k][j], A[j][i], ts, ts); END_TIMING(TIME_GEMM); EXTRAE_EXIT(EVENT_GEMM); } } else if (block_rank[k*nt+i] == mype && block_rank[k*nt+j] != mype) { // TODO: the content of C[j] may be overwritten but we cannot specify a dependency on it :( #pragma omp task in(A[k][i], C[j]) out(A[j][i]) firstprivate(k, j, i) no_copy_deps { EXTRAE_ENTER(EVENT_GEMM); START_TIMING(TIME_GEMM); omp_gemm(A[k][i], C[j], A[j][i], ts, ts); END_TIMING(TIME_GEMM); EXTRAE_EXIT(EVENT_GEMM); } } else { #pragma omp task in(C[i], C[j]) out(A[j][i]) firstprivate(k, j, i) no_copy_deps { EXTRAE_ENTER(EVENT_GEMM); START_TIMING(TIME_GEMM); omp_gemm(C[i], C[j], A[j][i], ts, ts); END_TIMING(TIME_GEMM); EXTRAE_EXIT(EVENT_GEMM); } } } } if (block_rank[i*nt+i] == mype) { if (block_rank[k*nt+i] == mype) { #pragma omp task in(A[k][i]) out(A[i][i]) firstprivate(k, i) no_copy_deps { EXTRAE_ENTER(EVENT_SYRK); START_TIMING(TIME_SYRK); omp_syrk(A[k][i], A[i][i], ts, ts); END_TIMING(TIME_SYRK); EXTRAE_EXIT(EVENT_SYRK); } } else { #pragma omp task in(C[i]) out(A[i][i]) firstprivate(k, i) no_copy_deps { EXTRAE_ENTER(EVENT_SYRK); START_TIMING(TIME_SYRK); omp_syrk(C[i], A[i][i], ts, ts); END_TIMING(TIME_SYRK); EXTRAE_EXIT(EVENT_SYRK); } } } } } END_TIMING(TIME_CREATE); } #pragma omp taskwait END_TIMING(TIME_TOTAL); MPI_Barrier(MPI_COMM_WORLD); PRINT_TIMINGS(); FREE_TIMING(); }// pragma omp single }// pragma omp parallel free(send_blocks); free(recv_blocks); }
bfsdfs.h
namespace TSnap { ///////////////////////////////////////////////// // BFS and DFS /// Returns a directed Breadth-First-Search tree rooted at StartNId. ##GetBfsTree1 template <class PGraph> PNGraph GetBfsTree(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn); /// Returns the BFS tree size (number of nodes) and depth (number of levels) by following in-links (parameter FollowIn = true) and/or out-links (parameter FollowOut = true) of node StartNId. template <class PGraph> int GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, int& TreeSzX, int& TreeDepthX); /// Finds IDs of all nodes that are at distance Hop from node StartNId. ##GetSubTreeSz template <class PGraph> int GetNodesAtHop(const PGraph& Graph, const int& StartNId, const int& Hop, TIntV& NIdV, const bool& IsDir=false); /// Returns the number of nodes at each hop distance from the starting node StartNId. ##GetNodesAtHops template <class PGraph> int GetNodesAtHops(const PGraph& Graph, const int& StartNId, TIntPrV& HopCntV, const bool& IsDir=false); ///////////////////////////////////////////////// // Shortest paths /// Returns the length of the shortest path from node SrcNId to node DstNId. ##GetShortPath1 template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir=false); /// Returns the length of the shortest path from node SrcNId to all other nodes in the network. ##GetShortPath2 template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir=false, const int& MaxDist=TInt::Mx); ///////////////////////////////////////////////// // Diameter /// Returns the (approximation of the) Diameter (maximum shortest path length) of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsFullDiam template <class PGraph> int GetBfsFullDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir=false); /// Returns the (approximation of the) Effective Diameter (90-th percentile of the distribution of shortest path lengths) of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiam1 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir=false); /// Returns the (approximation of the) Effective Diameter and the Diameter of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiam2 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX); /// Returns the (approximation of the) Effective Diameter, the Diameter and the Average Shortest Path length in a graph (by performing BFS from NTestNodes random starting nodes). GetBfsEffDiam3 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX, double& AvgSPLX); /// Use the whole graph (all edges) to measure the shortest path lengths but only report the path lengths between nodes in the SubGraphNIdV. GetBfsEffDiam4 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const TIntV& SubGraphNIdV, const bool& IsDir, double& EffDiamX, int& FullDiamX); // TODO: Implement in the future //template <class PGraph> int GetRangeDist(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir=false); //template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir=false, const int& MaxDist=1000); //template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const TIntSet& TargetSet, const bool& IsDir, TIntV& PathNIdV); //template <class PGraph> int GetShortPath(TIntH& NIdPrnH, TCcQueue<int>& NIdQ, const PGraph& Graph, const int& SrcNId, const TIntSet& TargetSet, const bool& IsDir, TIntV& PathNIdV); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir=false); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, int& MxDistNId); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, int& MxDistNId, TCcQueue<int>& NIdQ, TCcQueue<int>& DistQ, TIntSet& VisitedH); //template <class PGraph> int GetMxGreedyDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir=false); //template <class PGraph> int GetMxGreedyDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, TCcQueue<int>& NIdQ, TCcQueue<int>& DistQ, TIntSet& VisitedH); //template <class PGraph> PNGraph GetShortPathsSubGraph(const PGraph& Graph, const TIntV& SubGraphNIdV); //template <class PGraph> PGraph GetWccPathsSubGraph(const PGraph& Graph, const TIntV& NIdV); //template <class PGraph> void GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOutEdges, int& TreeSz, int& TreeDepth); } // namespace TSnap //#////////////////////////////////////////////// /// Breath-First-Search class. /// The class is meant for executing many BFSs over a fixed graph. This means that the class can keep the hash tables and queues initialized between different calls of the DoBfs() function. template<class PGraph> class TBreathFS { public: PGraph Graph; TSnapQueue<int> Queue; TInt StartNId; TIntH NIdDistH; public: TBreathFS(const PGraph& GraphPt, const bool& InitBigQ=true) : Graph(GraphPt), Queue(InitBigQ?Graph->GetNodes():1024), NIdDistH(InitBigQ?Graph->GetNodes():1024) { } /// Sets the graph to be used by the BFS to GraphPt and resets the data structures. void SetGraph(const PGraph& GraphPt); /// Performs BFS from node id StartNode for at maps MxDist steps by only following in-links (parameter FollowIn = true) and/or out-links (parameter FollowOut = true). int DoBfs(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId=-1, const int& MxDist=TInt::Mx); /// Same functionality as DoBfs with better performance. int DoBfsHybrid(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId=-1, const int& MxDist=TInt::Mx); /// Returns the number of nodes visited/reached by the BFS. int GetNVisited() const { return NIdDistH.Len(); } /// Returns the IDs of the nodes visited/reached by the BFS. void GetVisitedNIdV(TIntV& NIdV) const { NIdDistH.GetKeyV(NIdV); } /// Returns the shortst path distance between SrcNId and DistNId. /// Note you have to first call DoBFs(). SrcNId must be equal to StartNode, otherwise return value is -1. int GetHops(const int& SrcNId, const int& DstNId) const; /// Returns a random shortest path from SrcNId to DstNId. /// Note you have to first call DoBFs(). SrcNId must be equal to StartNode, otherwise return value is -1. int GetRndPath(const int& SrcNId, const int& DstNId, TIntV& PathNIdV) const; /* Private variables and functions for DoBfsHybrid */ private: int Stage; // 0, 2: top down, 1: bottom up static const unsigned int alpha = 100; static const unsigned int beta = 20; /* Private functions */ bool TopDownStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn); bool BottomUpStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn); }; template<class PGraph> void TBreathFS<PGraph>::SetGraph(const PGraph& GraphPt) { Graph=GraphPt; const int N=GraphPt->GetNodes(); if (Queue.Reserved() < N) { Queue.Gen(N); } if (NIdDistH.GetReservedKeyIds() < N) { NIdDistH.Gen(N); } } template<class PGraph> int TBreathFS<PGraph>::DoBfs(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId, const int& MxDist) { StartNId = StartNode; IAssert(Graph->IsNode(StartNId)); // const typename PGraph::TObj::TNodeI StartNodeI = Graph->GetNI(StartNode); // IAssertR(StartNodeI.GetOutDeg() > 0, TStr::Fmt("No neighbors from start node %d.", StartNode)); NIdDistH.Clr(false); NIdDistH.AddDat(StartNId, 0); Queue.Clr(false); Queue.Push(StartNId); int v, MaxDist = 0; while (! Queue.Empty()) { const int NId = Queue.Top(); Queue.Pop(); const int Dist = NIdDistH.GetDat(NId); if (Dist == MxDist) { break; } // max distance limit reached const typename PGraph::TObj::TNodeI NodeI = Graph->GetNI(NId); if (FollowOut) { // out-links for (v = 0; v < NodeI.GetOutDeg(); v++) { // out-links const int DstNId = NodeI.GetOutNId(v); if (! NIdDistH.IsKey(DstNId)) { NIdDistH.AddDat(DstNId, Dist+1); MaxDist = TMath::Mx(MaxDist, Dist+1); if (DstNId == TargetNId) { return MaxDist; } Queue.Push(DstNId); } } } if (FollowIn) { // in-links for (v = 0; v < NodeI.GetInDeg(); v++) { const int DstNId = NodeI.GetInNId(v); if (! NIdDistH.IsKey(DstNId)) { NIdDistH.AddDat(DstNId, Dist+1); MaxDist = TMath::Mx(MaxDist, Dist+1); if (DstNId == TargetNId) { return MaxDist; } Queue.Push(DstNId); } } } } return MaxDist; } template<class PGraph> int TBreathFS<PGraph>::DoBfsHybrid(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId, const int& MxDist) { StartNId = StartNode; IAssert(Graph->IsNode(StartNId)); if (TargetNId == StartNode) return 0; const typename PGraph::TObj::TNodeI StartNodeI = Graph->GetNI(StartNode); // Initialize vector TIntV NIdDistV(Graph->GetMxNId() + 1); for (int i = 0; i < NIdDistV.Len(); i++) { NIdDistV.SetVal(i, -1); } TIntV *Frontier = new TIntV(Graph->GetNodes(), 0); TIntV *NextFrontier = new TIntV(Graph->GetNodes(), 0); NIdDistV.SetVal(StartNId, 0); Frontier->Add(StartNId); Stage = 0; int MaxDist = -1; const unsigned int TotalNodes = Graph->GetNodes(); unsigned int UnvisitedNodes = Graph->GetNodes(); while (! Frontier->Empty()) { MaxDist += 1; NextFrontier->Clr(false); if (MaxDist == MxDist) { break; } // max distance limit reached UnvisitedNodes -= Frontier->Len(); if (Stage == 0 && UnvisitedNodes / Frontier->Len() < alpha) { Stage = 1; } else if (Stage == 1 && TotalNodes / Frontier->Len() > beta) { Stage = 2; } // Top down or bottom up depending on stage bool targetFound = false; if (Stage == 0 || Stage == 2) { targetFound = TopDownStep(NIdDistV, Frontier, NextFrontier, MaxDist, TargetNId, FollowOut, FollowIn); } else { targetFound = BottomUpStep(NIdDistV, Frontier, NextFrontier, MaxDist, TargetNId, FollowOut, FollowIn); } if (targetFound) { MaxDist = NIdDistV[TargetNId]; break; } // swap Frontier and NextFrontier TIntV *temp = Frontier; Frontier = NextFrontier; NextFrontier = temp; } delete Frontier; delete NextFrontier; // Transform vector to hash table NIdDistH.Clr(false); for (int NId = 0; NId < NIdDistV.Len(); NId++) { if (NIdDistV[NId] != -1) { NIdDistH.AddDat(NId, NIdDistV[NId]); } } return MaxDist; } template<class PGraph> bool TBreathFS<PGraph>::TopDownStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn) { for (TIntV::TIter it = Frontier->BegI(); it != Frontier->EndI(); ++it) { // loop over frontier const int NId = *it; const int Dist = NIdDistV[NId]; IAssert(Dist == MaxDist); // Must equal to MaxDist const typename PGraph::TObj::TNodeI NodeI = Graph->GetNI(NId); if (FollowOut) { for (int v = 0; v < NodeI.GetOutDeg(); v++) { const int NeighborNId = NodeI.GetOutNId(v); if (NIdDistV[NeighborNId] == -1) { NIdDistV.SetVal(NeighborNId, Dist+1); if (NeighborNId == TargetNId) return true; NextFrontier->Add(NeighborNId); } } } if (FollowIn) { for (int v = 0; v < NodeI.GetInDeg(); v++) { const int NeighborNId = NodeI.GetInNId(v); if (NIdDistV[NeighborNId] == -1) { NIdDistV.SetVal(NeighborNId, Dist+1); if (NeighborNId == TargetNId) return true; NextFrontier->Add(NeighborNId); } } } } return false; } template<class PGraph> bool TBreathFS<PGraph>::BottomUpStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn) { for (typename PGraph::TObj::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) { const int NId = NodeI.GetId(); if (NIdDistV[NId] == -1) { if (FollowOut) { for (int v = 0; v < NodeI.GetInDeg(); v++) { const int ParentNId = NodeI.GetInNId(v); if (NIdDistV[ParentNId] == MaxDist) { NIdDistV[NId] = MaxDist + 1; if (NId == TargetNId) return true; NextFrontier->Add(NId); break; } } } if (FollowIn && NIdDistV[NId] == -1) { for (int v = 0; v < NodeI.GetOutDeg(); v++) { const int ParentNId = NodeI.GetOutNId(v); if (NIdDistV[ParentNId] == MaxDist) { NIdDistV[NId] = MaxDist + 1; if (NId == TargetNId) return true; NextFrontier->Add(NId); break; } } } } } return false; } template<class PGraph> int TBreathFS<PGraph>::GetHops(const int& SrcNId, const int& DstNId) const { TInt Dist; if (SrcNId!=StartNId) { return -1; } if (! NIdDistH.IsKeyGetDat(DstNId, Dist)) { return -1; } return Dist.Val; } template<class PGraph> int TBreathFS<PGraph>::GetRndPath(const int& SrcNId, const int& DstNId, TIntV& PathNIdV) const { PathNIdV.Clr(false); if (SrcNId!=StartNId || ! NIdDistH.IsKey(DstNId)) { return -1; } PathNIdV.Add(DstNId); TIntV CloserNIdV; int CurNId = DstNId; TInt CurDist, NextDist; while (CurNId != SrcNId) { typename PGraph::TObj::TNodeI NI = Graph->GetNI(CurNId); IAssert(NIdDistH.IsKeyGetDat(CurNId, CurDist)); CloserNIdV.Clr(false); for (int e = 0; e < NI.GetDeg(); e++) { const int Next = NI.GetNbrNId(e); if (NIdDistH.IsKeyGetDat(Next, NextDist)) { if (NextDist == CurDist-1) { CloserNIdV.Add(Next); } } } IAssert(! CloserNIdV.Empty()); CurNId = CloserNIdV[TInt::Rnd.GetUniDevInt(CloserNIdV.Len())]; PathNIdV.Add(CurNId); } PathNIdV.Reverse(); return PathNIdV.Len()-1; } ///////////////////////////////////////////////// // Implementation namespace TSnap { template <class PGraph> PNGraph GetBfsTree(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, FollowOut, FollowIn, -1, TInt::Mx); PNGraph Tree = TNGraph::New(); BFS.NIdDistH.SortByDat(); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { const int NId = BFS.NIdDistH.GetKey(i); const int Dist = BFS.NIdDistH[i]; typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); if (!Tree->IsNode(NId)) { Tree->AddNode(NId); } if (FollowOut) { for (int e = 0; e < NI.GetInDeg(); e++) { const int Prev = NI.GetInNId(e); if (Tree->IsNode(Prev) && BFS.NIdDistH.GetDat(Prev)==Dist-1) { Tree->AddEdge(Prev, NId); } } } if (FollowIn) { for (int e = 0; e < NI.GetOutDeg(); e++) { const int Prev = NI.GetOutNId(e); if (Tree->IsNode(Prev) && BFS.NIdDistH.GetDat(Prev)==Dist-1) { Tree->AddEdge(Prev, NId); } } } } return Tree; } template <class PGraph> int GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, int& TreeSz, int& TreeDepth) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, FollowOut, FollowIn, -1, TInt::Mx); TreeSz = BFS.NIdDistH.Len(); TreeDepth = 0; for (int i = 0; i < BFS.NIdDistH.Len(); i++) { TreeDepth = TMath::Mx(TreeDepth, BFS.NIdDistH[i].Val); } return TreeSz; } template <class PGraph> int GetNodesAtHop(const PGraph& Graph, const int& StartNId, const int& Hop, TIntV& NIdV, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, true, !IsDir, -1, Hop); NIdV.Clr(false); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { if (BFS.NIdDistH[i] == Hop) { NIdV.Add(BFS.NIdDistH.GetKey(i)); } } return NIdV.Len(); } template <class PGraph> int GetNodesAtHops(const PGraph& Graph, const int& StartNId, TIntPrV& HopCntV, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, true, !IsDir, -1, TInt::Mx); TIntH HopCntH; for (int i = 0; i < BFS.NIdDistH.Len(); i++) { HopCntH.AddDat(BFS.NIdDistH[i]) += 1; } HopCntH.GetKeyDatPrV(HopCntV); HopCntV.Sort(); return HopCntV.Len(); } template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir, const int& MaxDist) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(SrcNId, true, ! IsDir, -1, MaxDist); NIdToDistH.Clr(); NIdToDistH.Swap(BFS.NIdDistH); return NIdToDistH[NIdToDistH.Len()-1]; } template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(SrcNId, true, ! IsDir, DstNId, TInt::Mx); return BFS.GetHops(SrcNId, DstNId); } template <class PGraph> int GetBfsFullDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir) { int FullDiam; double EffDiam; GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam); return FullDiam; } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir) { int FullDiam; double EffDiam; GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam); return EffDiam; } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam) { double AvgDiam; EffDiam = -1; FullDiam = -1; return GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam, AvgDiam); } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam, double& AvgSPL) { EffDiam = -1; FullDiam = -1; AvgSPL = -1; TIntFltH DistToCntH; TBreathFS<PGraph> BFS(Graph); // shotest paths TIntV NodeIdV; Graph->GetNIdV(NodeIdV); NodeIdV.Shuffle(TInt::Rnd); for (int tries = 0; tries < TMath::Mn(NTestNodes, Graph->GetNodes()); tries++) { const int NId = NodeIdV[tries]; BFS.DoBfs(NId, true, ! IsDir, -1, TInt::Mx); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { DistToCntH.AddDat(BFS.NIdDistH[i]) += 1; } } TIntFltKdV DistNbrsPdfV; double SumPathL=0, PathCnt=0; for (int i = 0; i < DistToCntH.Len(); i++) { DistNbrsPdfV.Add(TIntFltKd(DistToCntH.GetKey(i), DistToCntH[i])); SumPathL += DistToCntH.GetKey(i) * DistToCntH[i]; PathCnt += DistToCntH[i]; } DistNbrsPdfV.Sort(); EffDiam = TSnap::TSnapDetail::CalcEffDiamPdf(DistNbrsPdfV, 0.9); // effective diameter (90-th percentile) FullDiam = DistNbrsPdfV.Last().Key; // approximate full diameter (max shortest path length over the sampled nodes) AvgSPL = SumPathL/PathCnt; // average shortest path length return EffDiam; } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const TIntV& SubGraphNIdV, const bool& IsDir, double& EffDiam, int& FullDiam) { EffDiam = -1; FullDiam = -1; TIntFltH DistToCntH; TBreathFS<PGraph> BFS(Graph); // shotest paths TIntV NodeIdV(SubGraphNIdV); NodeIdV.Shuffle(TInt::Rnd); TInt Dist; for (int tries = 0; tries < TMath::Mn(NTestNodes, SubGraphNIdV.Len()); tries++) { const int NId = NodeIdV[tries]; BFS.DoBfs(NId, true, ! IsDir, -1, TInt::Mx); for (int i = 0; i < SubGraphNIdV.Len(); i++) { if (BFS.NIdDistH.IsKeyGetDat(SubGraphNIdV[i], Dist)) { DistToCntH.AddDat(Dist) += 1; } } } TIntFltKdV DistNbrsPdfV; for (int i = 0; i < DistToCntH.Len(); i++) { DistNbrsPdfV.Add(TIntFltKd(DistToCntH.GetKey(i), DistToCntH[i])); } DistNbrsPdfV.Sort(); EffDiam = TSnap::TSnapDetail::CalcEffDiamPdf(DistNbrsPdfV, 0.9); // effective diameter (90-th percentile) FullDiam = DistNbrsPdfV.Last().Key; // approximate full diameter (max shortest path length over the sampled nodes) return EffDiam; // average shortest path length } template <class PGraph> int GetShortestDistances(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, TIntV& ShortestDists) { PSOut StdOut = TStdOut::New(); int MxNId = Graph->GetMxNId(); int NonNodeDepth = 2147483647; // INT_MAX int InfDepth = 2147483646; // INT_MAX - 1 ShortestDists.Gen(MxNId); for (int NId = 0; NId < MxNId; NId++) { if (Graph->IsNode(NId)) { ShortestDists[NId] = InfDepth; } else { ShortestDists[NId] = NonNodeDepth; } } TIntV Vec1(MxNId, 0); // ensure enough capacity TIntV Vec2(MxNId, 0); // ensure enough capacity ShortestDists[StartNId] = 0; TIntV* PCurV = &Vec1; PCurV->Add(StartNId); TIntV* PNextV = &Vec2; int Depth = 0; // current depth while (!PCurV->Empty()) { Depth++; // increase depth for (int i = 0; i < PCurV->Len(); i++) { int NId = PCurV->GetVal(i); typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); for (int e = 0; e < NI.GetOutDeg(); e++) { const int OutNId = NI.GetOutNId(e); if (ShortestDists[OutNId].Val == InfDepth) { ShortestDists[OutNId] = Depth; PNextV->Add(OutNId); } } } // swap pointer, no copying TIntV* Tmp = PCurV; PCurV = PNextV; PNextV = Tmp; // clear next PNextV->Reduce(0); // reduce length, does not initialize new array } return Depth-1; } #ifdef USE_OPENMP template <class PGraph> int GetShortestDistancesMP2(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, TIntV& ShortestDists) { int MxNId = Graph->GetMxNId(); int NonNodeDepth = 2147483647; // INT_MAX int InfDepth = 2147483646; // INT_MAX - 1 ShortestDists.Gen(MxNId); #pragma omp parallel for schedule(dynamic,10000) for (int NId = 0; NId < MxNId; NId++) { if (Graph->IsNode(NId)) { ShortestDists[NId] = InfDepth; } else { ShortestDists[NId] = NonNodeDepth; } } TIntV Vec1(MxNId, 0); // ensure enough capacity TIntV Vec2(MxNId, 0); // ensure enough capacity ShortestDists[StartNId] = 0; TIntV* PCurV = &Vec1; PCurV->Add(StartNId); TIntV* PNextV = &Vec2; int Depth = 0; // current depth while (!PCurV->Empty()) { Depth++; // increase depth #pragma omp parallel for schedule(dynamic,10000) for (int i = 0; i < PCurV->Len(); i++) { int NId = PCurV->GetVal(i); typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); for (int e = 0; e < NI.GetOutDeg(); e++) { const int OutNId = NI.GetOutNId(e); if (__sync_bool_compare_and_swap(&(ShortestDists[OutNId].Val), InfDepth, Depth)) { PNextV->AddMP(OutNId); } } } // #pragma omp parallel for schedule(dynamic,10000) // for (int NId = 0; NId < MxNId; NId++) { // if (ShortestDists[NId] == InfDepth) { // typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); // for (int e = 0; e < NI.GetInDeg(); e++) { // const int InNId = NI.GetInNId(e); // if (ShortestDists[InNId] < Depth) { // ShortestDists[NId] = Depth; // PNextV->AddMP(NId); // break; // } // } // } // } // swap pointer, no copying TIntV* Tmp = PCurV; PCurV = PNextV; PNextV = Tmp; // clear next PNextV->Reduce(0); // reduce length, does not initialize new array } return Depth-1; } #endif // USE_OPENMP } // namespace TSnap
constructs.c
// Load the OpenMP functions library #include<omp.h> int main() { // Set variables int num_threads=0, tnum=0, i=0, total=0; // Create parallel block //#pragma omp parallel // { //Create a section block //#pragma omp sections private(tnum, i) nowait // { // Ask an available thread to print out the thread number. //#pragma omp section // { tnum = omp_get_thread_num(); printf("I am thread number %d\n", tnum); // } // Ask another section to add up the thread numbers //#pragma omp section // { num_threads = omp_get_num_threads(); tnum = omp_get_thread_num(); total = 0; for (i=1; i<=num_threads; i++) total = total + i; printf("thread number %d says total = %d\n", tnum, total); // } // Close the section block. Normally this sets a barrier that requires all // the threads to have completed processing by this point, but we've // bypassed it with the "nowait" argument. // } // Print out the fact that the section block has finished. How many threads // are still functional at this point? printf("Finished sections block\n"); // We only want one thread to operate here. //#pragma omp single // { tnum = omp_get_thread_num(); printf("Single thread = %d\n", tnum); // } // End parallel block // } return 0; }
gi_numeric_integrator_path_compressing.h
/* * * Copyright (C) 2018 Attila Gyulassy <jediati@sci.utah.edu> * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #ifndef NUMERIC_INTEGRATOR_PATH_COMPRESSING_H #define NUMERIC_INTEGRATOR_PATH_COMPRESSING_H #include <set> #include <queue> #include <stack> #include "gi_basic_types.h" #include "gi_vectors.h" #include "gi_labeling.h" #include "gi_regular_grid_3d.h" #include "gi_regular_grid_trilinear_function.h" #include "gi_adaptive_euler_advector_3d.h" #include "gi_timing.h" #include "gi_union_find_labeling.h" #include "gi_index_comparer.h" #include "gi_topological_regular_grid_3d.h" #include "gi_array_index_partition.h" #include "omp.h" //#define OUTPUTINTERMEDIATE namespace GInt { template< class Advector, class GridFuncType > class NumericIntegratorPathCompressingToTerminal { protected: DenseLabeling<DestType>* m_desttype; DenseLabeling<int>* m_dest_label; RegularGrid3D* m_grid; GridFuncType* m_func; int m_num_iterations_left; Vec3i m_xyz; Vec3b m_periodic; FLOATTYPE m_error_threshold; FLOATTYPE m_gradient_threshold; FLOATTYPE m_filterValue; public: NumericIntegratorPathCompressingToTerminal(GridFuncType* func, RegularGrid3D* grid, FLOATTYPE error_threshold, FLOATTYPE gradient_threshold, int interation_limit) : m_num_iterations_left(interation_limit), m_xyz(func->GetGrid()->XYZ()), m_periodic(func->GetGrid()->Periodic()), m_func(func), m_grid(grid), m_gradient_threshold(gradient_threshold), m_error_threshold(error_threshold) { m_filterValue = std::numeric_limits<FLOATTYPE>::min(); } ~NumericIntegratorPathCompressingToTerminal() { delete m_desttype; delete m_dest_label; } DenseLabeling<int>* GetOutputLabels() { return m_dest_label; } RegularGrid3D* GetGrid() { return m_grid; } GridFuncType* GetFunction() { return m_func; } template <typename T> static void accumulate_and_clear_sets(std::vector<std::set<T>> & sets, std::set<T> &accSet) { accSet.clear(); for(unsigned int i = 0; i < sets.size(); i++) { accSet.insert( sets[i].begin(), sets[i].end() ); sets[i].clear(); } } #if 1 void BeginIntegration(const std::unordered_map<INT_TYPE, std::vector<INDEX_TYPE> >& remap, DenseLabeling<int>* teminals, bool verbose = false) { ThreadedTimer gtimer(1); gtimer.StartGlobal(); if(verbose) printf(" -- Performing numeric integration for volume assignment (%f)...\n", m_filterValue); m_dest_label = teminals; m_desttype = new DenseLabeling<DestType>(m_grid->NumElements()); const INDEX_TYPE t_num_vertices = m_grid->NumElements(); AdvectionChecker* inside_voxel_critical_advection_checker = new TerminateNearPathCompressedRegion(m_desttype, m_grid); ThreadedTimer ltimer0(1); ltimer0.StartGlobal(); // --------------------------------------------------------------------- if (verbose){ printf(" -- finding extrema to terminate integral lines...\n"); fflush(stdout); } #pragma omp parallel for for (INDEX_TYPE i = 0; i < t_num_vertices; i++) { // Harsh added a new label for filtering //if (fabs(m_func->SampleImage(i)) <= m_filterValue) { // m_desttype->SetLabel(i, DestType::BACKGROUND); // m_dest_label->SetLabel(i, -2); // continue; //} switch (m_dest_label->GetLabel(i)) { case -1: m_desttype->SetLabel(i, DestType::UNASSIGNED); break; default: m_desttype->SetLabel(i, DestType::CERTAIN_TERMINAL); } } ltimer0.EndGlobal(); for (auto lp : remap) { for (auto id : lp.second) { m_func->SetGradExplicit(id, Vec3d(0, 0, 0)); } } #ifdef OUTPUTINTERMEDIATE m_dest_label->OutputToFile("certains.raw"); m_desttype->OutputToIntFile("certains_type.raw"); { m_dest_label->OutputToFile("certain_expansion.raw"); m_dest_label->OutputToIntFile("dest_original.raw"); } TopologicalRegularGridRestricted* ttgrid = new TopologicalRegularGridRestricted(m_grid); VertexLabelingToBoundaryLabeling<int>* tedge = new VertexLabelingToBoundaryLabeling<int>(m_dest_label, ttgrid); tedge->ComputeBoundary(); tedge->OutputEdgesToFile("certain_edges.txt"); FILE* fout = fopen("Linesout.txt", "w"); #endif if (verbose){ //printf(" expansion done!\n", mExtrema.size()); printf(" -- doing numerical integration first pass with path compression..."); fflush(stdout); } // --------------------------------------------------------------------- ThreadedTimer ltimer1(1); ltimer1.StartGlobal(); int t1, t2; t1 = t2 = 0; #pragma omp parallel { Advector t_advector(m_grid, m_func, m_gradient_threshold, m_error_threshold, inside_voxel_critical_advection_checker); std::vector<INDEX_TYPE> t_path; t_path.reserve(100); int num_threads = omp_get_num_threads(); int thread_num = omp_get_thread_num(); std::vector<INDEX_TYPE> partition; GInt::ArrayIndexPartitioner::EvenChunkSplit(t_num_vertices, num_threads, partition); INDEX_TYPE num_to_do = (partition[thread_num + 1] - partition[thread_num]); for (INDEX_TYPE kk = 1; kk <= num_to_do * 4; kk*=2) { INDEX_TYPE startsize = num_to_do / kk; INDEX_TYPE stepsize = (num_to_do*2) / kk; if (stepsize == 0) continue; for (INDEX_TYPE i = partition[thread_num] + startsize; i < partition[thread_num + 1]; i += stepsize) { // early skip if this is already a maximum if (m_desttype->GetLabel(i) != DestType::UNASSIGNED) { continue; } t_path.clear(); t_path.push_back(i); Vec3l t_coords = m_grid->XYZ3d(i); // get the coordinates of the point Vec3d t_current_point = t_coords; int t_num_iterations_left = m_num_iterations_left; bool t_continue = true; #ifdef OUTPUTINTERMEDIATE std::vector<Vec3d> line_soup; line_soup.push_back(t_current_point); #endif while (t_continue) { ADVECTION_EVENT t_return_code; if (m_grid->DistToBoundary(t_coords) <= 1) { t_return_code = t_advector.AdvectThroughVoxelNearBoundary(t_current_point, t_num_iterations_left); t_coords = m_grid->Inbounds(t_current_point + 0.5); // get nearest integer voxel t1++; } else { t_return_code = t_advector.AdvectThroughVoxelNoCheck(t_current_point, t_num_iterations_left); t_coords = (t_current_point + 0.5); t2++; } INDEX_TYPE t_next_id = m_grid->Index3d(t_coords); t_path.push_back(t_next_id); if (t_return_code == ADVECTION_EVENT::OUT_OF_VOXEL) continue; #ifdef OUTPUTINTERMEDIATE line_soup.push_back(t_current_point); #endif // if we terminated or hit a critical point, then we are done if (t_return_code == ADVECTION_EVENT::LOW_GRADIENT || t_return_code == ADVECTION_EVENT::HIT_EXTREMUM || t_return_code == ADVECTION_EVENT::HIT_PREASSIGNED || t_return_code == ADVECTION_EVENT::OVER_MAX_ITERATIONS) { int t_dest_label = m_dest_label->GetLabel(t_next_id); //#pragma omp critical { for (int j = 0; j < t_path.size(); j++) { INDEX_TYPE jj = t_path[j]; if (m_desttype->GetLabel(jj) == DestType::UNASSIGNED) { m_dest_label->SetLabel(jj, t_dest_label); m_desttype->SetLabel(jj, DestType::ASSIGNED); } } } #ifdef OUTPUTINTERMEDIATE #pragma omp critical { int tn = omp_get_thread_num(); fprintf(fout, "%d %d %d %d\n", i, tn, line_soup.size(), t_dest_label); for (int j = 0; j < line_soup.size(); j++) { fprintf(fout, "%f %f %f\n", line_soup[j][0], line_soup[j][1], line_soup[j][2]); } } #endif t_continue = false; } } } } } #ifdef OUTPUTINTERMEDIATE fclose(fout); m_dest_label->OutputToIntFile("first_integration.raw"); m_dest_label->OutputToIntFile("dests_after_first_integration.raw"); m_dest_label->OutputToIntFile("first_integration.raw"); m_desttype->OutputToIntFile("first_integration_type.raw"); #endif ltimer1.EndGlobal(); // --------------------------------------------------------------------- if (verbose){ printf(" done!"); ltimer1.PrintAll(); printf(" -- checking unambiguous voxels..."); fflush(stdout); } ThreadedTimer ltimer2(1); ltimer2.StartGlobal(); // we will process vertices in iterations // this set will contain verts to be processed in the next iteration std::set<INDEX_TYPE> verts_2b_processed_set; // to avoid locking by threads, we will use local copies where threads will add verts // later, we will merge these into the main set defined above std::vector< std::set<INDEX_TYPE> > verts_thrds ( omp_get_max_threads() ); #pragma omp parallel for for (INDEX_TYPE i = 0; i < t_num_vertices; i++) { if (m_desttype->GetLabel(i) == DestType::BACKGROUND) continue; //if (m_desttype->GetLabel(i) == DestType::ASSIGNED) { // verts_thrds[omp_get_thread_num()].insert(i); //} Vec3l t_coords = m_grid->XYZ3d(i); // get the coordinates of the poitn Vec3l negs[6]; int nn = m_grid->GatherExistingNeighborsAll6(t_coords, negs); for (int j = 0; j < nn; j++) { INDEX_TYPE v2 = m_grid->Index3d(negs[j]); if (v2 > i) continue; if (m_dest_label->GetLabel(i) != m_dest_label->GetLabel(v2)) { if (m_desttype->GetLabel(i) == DestType::ASSIGNED) { verts_thrds[ omp_get_thread_num() ].insert(i); } if (m_desttype->GetLabel(v2) == DestType::ASSIGNED) { verts_thrds[ omp_get_thread_num() ].insert(v2); } } } } // collect the results of all threads accumulate_and_clear_sets( verts_thrds, verts_2b_processed_set ); ltimer2.EndGlobal(); // --------------------------------------------------------------------- if (verbose){ printf(" done!"); ltimer2.PrintAll(); fflush(stdout); //printf(" -- found %d points needed correction...", verts_2b_processed_set.size()); //fflush(stdout); } ThreadedTimer ltimer3(1); ltimer3.StartGlobal(); AdvectionChecker* inside_voxel_nostop_advection_checker = new TerminateNearOriginalCertain(m_desttype, m_grid); size_t totalfixed = 0; // this loop will iterate until no more verts need to be processed for(unsigned int itern = 0; !verts_2b_processed_set.empty(); itern++) { // transfer from set to vector to start processing // use the set to store the verts needed in the next iteration std::vector<INDEX_TYPE> verts_2b_processed_vector ( verts_2b_processed_set.begin(), verts_2b_processed_set.end() ); if (verbose){ printf(" -- iteration %d will process %d vertices\n", itern, verts_2b_processed_vector.size()); } totalfixed += verts_2b_processed_vector.size(); #pragma omp parallel for for(int i = 0; i < verts_2b_processed_vector.size(); i++) { Advector t_advector(m_grid, m_func, m_gradient_threshold, m_error_threshold, inside_voxel_nostop_advection_checker); INDEX_TYPE current_vertex = verts_2b_processed_vector[i]; int init_label = m_dest_label->GetLabel(current_vertex); // INTEGRATE // INTEGRATE Vec3l t_coords = m_grid->XYZ3d(current_vertex); // get the coordinates of the poitn //if (t_coords[0] == 0 && t_coords[1] == 0) printf("doing %d\n", t_coords[2]); Vec3d t_current_point = t_coords; int t_num_iterations_left = m_num_iterations_left; bool t_continue = true; int new_label; while (t_continue) { ADVECTION_EVENT t_return_code; if (m_grid->DistToBoundary(t_coords) <= 1) { t_return_code = t_advector.AdvectThroughVoxelNearBoundary(t_current_point, t_num_iterations_left); t_coords = m_grid->Inbounds(t_current_point + 0.5); // get nearest integer voxel t1++; } else { t_return_code = t_advector.AdvectThroughVoxelNoCheck(t_current_point, t_num_iterations_left); t_coords = (t_current_point + 0.5); t2++; } INDEX_TYPE t_next_id = m_grid->Index3d(t_coords); // if we terminated or hit a critical point, then we are done if (t_return_code == ADVECTION_EVENT::LOW_GRADIENT || t_return_code == ADVECTION_EVENT::HIT_EXTREMUM || t_return_code == ADVECTION_EVENT::HIT_PREASSIGNED || t_return_code == ADVECTION_EVENT::OVER_MAX_ITERATIONS) { new_label = m_dest_label->GetLabel(t_next_id); if (m_desttype->GetLabel(t_next_id) != DestType::CERTAIN_TERMINAL){ //printf("whoatherenelly %d %d\n", m_desttype->GetLabel(t_next_id), t_return_code); } t_continue = false; } } // INTEGRATE // INTEGRATE // m_dest_label->SetLabel(current_vertex, new_label); m_desttype->SetLabel(current_vertex, DestType::CERTAIN_NONTERMINAL); if (new_label != init_label){ // ENQUEUE NEIGHBORS Vec3l t_coords = m_grid->XYZ3d(current_vertex); // get the coordinates of the poitn Vec3l negs[6]; //INDEX_TYPE negids[6]; int nn = m_grid->GatherExistingNeighborsAll6(t_coords, negs); // for each neigbhor for (int j = 0; j < nn; j++){ INDEX_TYPE negid = m_grid->Index3d(negs[j]); // only if it has not yet been added to our update set if (m_desttype->GetLabel(negid) == DestType::ASSIGNED && m_dest_label->GetLabel(negid) != new_label) { verts_thrds[ omp_get_thread_num() ].insert(negid); } } } } accumulate_and_clear_sets( verts_thrds, verts_2b_processed_set ); } ltimer3.EndGlobal(); #ifdef OUTPUTINTERMEDIATE m_desttype->OutputToIntFile("classes_type.raw"); #endif if (verbose){ printf(" -- done! fixed a total of %d vertices!", totalfixed); ltimer3.PrintAll(); } gtimer.EndGlobal(); if(verbose){ printf(" -- done numerical integration!"); gtimer.PrintAll(); } } #else void BeginIntegration(bool verbose = false) { verbose = true; ThreadedTimer gtimer(1); gtimer.StartGlobal(); if(verbose) printf(" -- Performing numeric integration for volume assignment (%f)...\n", m_filterValue); //m_func->ComputeGradFromImage(m_rkindex); m_dest_label = new DenseLabeling<int>(m_grid->NumElements()); m_desttype = new DenseLabeling<DestType>(m_grid->NumElements()); const INDEX_TYPE t_num_vertices = m_grid->NumElements(); // THIS WILL NEED TO CHANGE AdvectionChecker* inside_voxel_critical_advection_checker = new TerminateNearPathCompressedRegion(m_desttype, m_grid); //AdvectionChecker* no_check = new NoTermination();//AdvectionChecker* inside_voxel_advection_checker = new TerminateNearAssigned(m_destinations, m_grid); ThreadedTimer ltimer0(1); ltimer0.StartGlobal(); if (verbose){ printf(" -- finding extrema to terminate integral lines..."); fflush(stdout); } // set all potential extrema, so we terminate near them //#pragma omp parallel for for (INDEX_TYPE i = 0; i < t_num_vertices; i++) { //printf(" thread %d of %d max %d does vertx %d\n", // omp_get_thread_num(), omp_get_num_threads(), omp_get_max_threads(), i ); // Harsh added a new label for filtering if (fabs(m_func->SampleImage(i)) <= m_filterValue) { m_desttype->SetLabel(i, DestType::BACKGROUND); m_dest_label->SetLabel(i, -2); continue; } m_desttype->SetLabel(i, DestType::UNASSIGNED); m_dest_label->SetLabel(i, -1); if (IsExtremeVertexIn6Neighborhood(i)) { //#pragma omp critical { mExtrema.push_back(i); //m_extrema.insert(i); //m_dest_label->SetLabel(i, 0); } } } #ifdef OUTPUTINTERMEDIATE m_dest_label->OutputToIntFile("crits.raw"); #endif ltimer0.EndGlobal(); ltimer0.PrintAll(); if (verbose){ printf(" done! found %d extrema!\n", mExtrema.size()); printf(" -- expanding extrema certain regions..."); fflush(stdout); } int num_extrema = mExtrema.size(); //#pragma omp parallel shared(mExtrema) { //#pragma omp for schedule(dynamic) nowait for (int m = 0; m < num_extrema; m++) { INDEX_TYPE maximum = mExtrema[m]; Expand_Lower_Neighborhood(maximum, m); m_func->SetGradExplicit(maximum, Vec3d(0, 0, 0)); } } #ifdef OUTPUTINTERMEDIATE m_dest_label->OutputToFile("certains.raw"); m_desttype->OutputToIntFile("certains_type.raw"); { m_dest_label->OutputToFile("certain_expansion.raw"); m_dest_label->OutputToIntFile("dest_original.raw"); } TopologicalRegularGridRestricted* ttgrid = new TopologicalRegularGridRestricted(m_grid); VertexLabelingToBoundaryLabeling<int>* tedge = new VertexLabelingToBoundaryLabeling<int>(m_dest_label, ttgrid); tedge->ComputeBoundary(); tedge->OutputEdgesToFile("certain_edges.txt"); FILE* fout = fopen("Linesout.txt", "w"); #endif if (verbose){ printf(" expansion done!\n", mExtrema.size()); printf(" -- doing numerical integration first pass with path compression..."); fflush(stdout); } ThreadedTimer ltimer1(1); ltimer1.StartGlobal(); int t1, t2; t1 = t2 = 0; //#pragma omp parallel { Advector t_advector(m_grid, m_func, m_gradient_threshold, m_error_threshold, inside_voxel_critical_advection_checker); std::vector<INDEX_TYPE> t_path; t_path.reserve(100); int num_threads = omp_get_num_threads(); int thread_num = omp_get_thread_num(); printf(" thread %d of %d\n", thread_num, num_threads); std::vector<INDEX_TYPE> partition; GInt::ArrayIndexPartitioner::EvenChunkSplit(t_num_vertices, num_threads, partition); INDEX_TYPE num_to_do = (partition[thread_num + 1] - partition[thread_num]); for (INDEX_TYPE kk = 1; kk <= num_to_do * 4; kk*=2) { INDEX_TYPE startsize = num_to_do / kk; INDEX_TYPE stepsize = (num_to_do*2) / kk; if (stepsize == 0) continue; //printf("%d %d %d\n", num_to_do, kk, stepsize); for (INDEX_TYPE i = partition[thread_num] + startsize; i < partition[thread_num + 1]; i += stepsize) { //#pragma omp for schedule(guided) nowait // for (INDEX_TYPE i = 0; i < t_num_vertices; i++) { // early skip if this is already a maximum if (m_desttype->GetLabel(i) != DestType::UNASSIGNED) { continue; } t_path.clear(); t_path.push_back(i); Vec3l t_coords = m_grid->XYZ3d(i); // get the coordinates of the poitn //if (t_coords[0] == 0 && t_coords[1] == 0) printf("doing %d\n", t_coords[2]); Vec3d t_current_point = t_coords; int t_num_iterations_left = m_num_iterations_left; bool t_continue = true; #ifdef OUTPUTINTERMEDIATE std::vector<Vec3d> line_soup; line_soup.push_back(t_current_point); #endif while (t_continue) { Vec3d t_next_point; ADVECTION_EVENT t_return_code; if (m_grid->DistToBoundary(t_coords) <= 1) { t_return_code = t_advector.AdvectThroughVoxelNearBoundary(t_current_point, t_num_iterations_left); t_coords = m_grid->Inbounds(t_current_point + 0.5); // get nearest integer voxel t1++; } else { t_return_code = t_advector.AdvectThroughVoxelNoCheck(t_current_point, t_num_iterations_left); t_coords = (t_current_point + 0.5); t2++; } INDEX_TYPE t_next_id = m_grid->Index3d(t_coords); t_path.push_back(t_next_id); if (t_return_code == ADVECTION_EVENT::OUT_OF_VOXEL) continue; #ifdef OUTPUTINTERMEDIATE line_soup.push_back(t_current_point); #endif // if we terminated or hit a critical point, then we are done if (t_return_code == ADVECTION_EVENT::LOW_GRADIENT || t_return_code == ADVECTION_EVENT::HIT_EXTREMUM || t_return_code == ADVECTION_EVENT::HIT_PREASSIGNED || t_return_code == ADVECTION_EVENT::OVER_MAX_ITERATIONS) { int t_dest_label = m_dest_label->GetLabel(t_next_id); //DestType t_certain_label = m_certains->GetLabel(t_next_id); //#pragma omp critical //if (t_dest_label == -1) { // printf("who there got here %d %d %d %d %d\n", // m_desttype->GetLabel(t_next_id), t_next_id, t_return_code, t_num_iterations_left, t_path.size()); // m_grid->XYZ3d(i).PrintInt(); printf("->"); t_coords.PrintInt(); //} //#pragma omp critical { for (int j = 0; j < t_path.size(); j++) { INDEX_TYPE jj = t_path[j]; if (m_desttype->GetLabel(jj) == DestType::UNASSIGNED) { m_dest_label->SetLabel(jj, t_dest_label); m_desttype->SetLabel(jj, DestType::ASSIGNED); } //m_certains->SetLabel(t_path[j], t_certain_label); } #ifdef OUTPUTINTERMEDIATE #pragma omp critical { int tn = omp_get_thread_num(); fprintf(fout, "%d %d %d %d\n", i, tn, line_soup.size(), t_dest_label); for (int j = 0; j < line_soup.size(); j++) { fprintf(fout, "%f %f %f\n", line_soup[j][0], line_soup[j][1], line_soup[j][2]); } } #endif } t_continue = false; } } } } } #ifdef OUTPUTINTERMEDIATE fclose(fout); m_dest_label->OutputToIntFile("first_integration.raw"); m_dest_label->OutputToIntFile("dests_after_first_integration.raw"); m_dest_label->OutputToIntFile("first_integration.raw"); m_desttype->OutputToIntFile("first_integration_type.raw"); #endif ltimer1.EndGlobal(); ltimer1.PrintAll(); if (verbose){ printf(" done!\n"); printf(" -- checking unambiguous voxels..."); fflush(stdout); } ThreadedTimer ltimer2(1); ltimer2.StartGlobal(); std::unordered_set<INDEX_TYPE> added_vertices; std::stack<INDEX_TYPE> vertex_stack; //#pragma omp parallel for for (INDEX_TYPE i = 0; i < t_num_vertices; i++) { Vec3l t_coords = m_grid->XYZ3d(i); // get the coordinates of the poitn Vec3l negs[6]; //INDEX_TYPE negids[6]; int nn = m_grid->GatherExistingNeighborsSameBdry6(t_coords, negs); for (int j = 0; j < nn; j++) { INDEX_TYPE v2 = m_grid->Index3d(negs[j]); if (v2 > i) continue; //if (m_desttype->GetLabel(i) != DestType::ASSIGNED || m_desttype->GetLabel(v2) != DestType::ASSIGNED) continue; if (m_dest_label->GetLabel(i) != m_dest_label->GetLabel(v2)) { if (m_desttype->GetLabel(i) == DestType::ASSIGNED) { //#pragma omp critical { if (added_vertices.count(i) == 0) { added_vertices.insert(i); vertex_stack.push(i); } } } if (m_desttype->GetLabel(v2) == DestType::ASSIGNED) { //#pragma omp critical { if (added_vertices.count(v2) == 0) { added_vertices.insert(v2); vertex_stack.push(v2); } } } } } } // TopologicalRegularGrid3D* tgrid = new TopologicalRegularGrid3D(m_grid); // int num_cells = tgrid->numCells(); // // // /// in parallel gather the vertices that need to be updated //#pragma omp parallel // { // // int num_threads = omp_get_num_threads(); // int thread_num = omp_get_thread_num(); // // std::vector<INDEX_TYPE> partition; // ArrayIndexPartitioner::EvenChunkSplit(tgrid->numCells(), num_threads, partition); // TopologicalRegularGrid3D::DCellsIterator edges(tgrid, 1, partition[thread_num], partition[thread_num + 1]); // for (edges.begin(); edges.valid(); edges.advance()) { // TopologicalRegularGrid3D::FacetsIterator fit(tgrid); // fit.begin(edges.value()); // INDEX_TYPE tv1 = fit.value(); // fit.advance(); // INDEX_TYPE tv2 = fit.value(); // // INDEX_TYPE v1 = tgrid->VertexNumberFromCellID(tv1); // INDEX_TYPE v2 = tgrid->VertexNumberFromCellID(tv2); // // if (m_desttype->GetLabel(v1) != DestType::ASSIGNED || m_desttype->GetLabel(v2) != DestType::ASSIGNED) continue; // if (m_dest_label->GetLabel(v1) != m_dest_label->GetLabel(v2)) { // // if (m_desttype->GetLabel(v1) == DestType::ASSIGNED) { //#pragma omp critical // { // if (added_vertices.count(v1) == 0) { // added_vertices.insert(v1); // vertex_stack.push(v1); // } // } // } // if (m_desttype->GetLabel(v2) == DestType::ASSIGNED) { //#pragma omp critical // { // if (added_vertices.count(v2) == 0) { // added_vertices.insert(v2); // vertex_stack.push(v2); // } // } // } // } // } // } // END PARALLEL SECTION // //for (auto id : added_vertices) { // m_desttype->SetLabel(id, DestType::BACKGROUND); //} //m_desttype->OutputToIntFile("reintegrate.raw"); if (verbose){ printf(" done!"); ltimer2.EndGlobal(); ltimer2.PrintAll(); printf(" -- found %d points needed correction %d...", added_vertices.size(), vertex_stack.size()); fflush(stdout); } ThreadedTimer ltimer3(1); ltimer3.StartGlobal(); // NOW FIX LABELS AdvectionChecker* inside_voxel_nostop_advection_checker = new TerminateNearOriginalCertain(m_desttype, m_grid); //#pragma omp parallel { int cnt = 0; Advector t_advector(m_grid, m_func, m_gradient_threshold, m_error_threshold, inside_voxel_nostop_advection_checker); bool keep_going = true; while (keep_going) { INDEX_TYPE current_vertex; //#pragma omp critical { if (vertex_stack.size() > 0) { current_vertex = vertex_stack.top(); vertex_stack.pop(); } else { keep_going = false; } } if (cnt ++ < 10 ){ printf(" [%d] = %d\n", cnt, current_vertex); } if (keep_going) { // INITIAL VALUE int init_label = m_dest_label->GetLabel(current_vertex); // INTEGRATE // INTEGRATE Vec3l t_coords = m_grid->XYZ3d(current_vertex); // get the coordinates of the poitn //if (t_coords[0] == 0 && t_coords[1] == 0) printf("doing %d\n", t_coords[2]); Vec3d t_current_point = t_coords; int t_num_iterations_left = m_num_iterations_left; bool t_continue = true; int new_label; while (t_continue) { Vec3d t_next_point; ADVECTION_EVENT t_return_code; if (m_grid->DistToBoundary(t_coords) <= 1) { t_return_code = t_advector.AdvectThroughVoxelNearBoundary(t_current_point, t_num_iterations_left); t_coords = m_grid->Inbounds(t_current_point + 0.5); // get nearest integer voxel t1++; } else { t_return_code = t_advector.AdvectThroughVoxelNoCheck(t_current_point, t_num_iterations_left); t_coords = (t_current_point + 0.5); t2++; } INDEX_TYPE t_next_id = m_grid->Index3d(t_coords); // if we terminated or hit a critical point, then we are done if (t_return_code == ADVECTION_EVENT::LOW_GRADIENT || t_return_code == ADVECTION_EVENT::HIT_EXTREMUM || t_return_code == ADVECTION_EVENT::HIT_PREASSIGNED || t_return_code == ADVECTION_EVENT::OVER_MAX_ITERATIONS) { new_label = m_dest_label->GetLabel(t_next_id); if (m_desttype->GetLabel(t_next_id) != DestType::CERTAIN_TERMINAL) printf("whoatherenelly %d %d\n", m_desttype->GetLabel(t_next_id), t_return_code); t_continue = false; } } // INTEGRATE // INTEGRATE m_dest_label->SetLabel(current_vertex, new_label); m_desttype->SetLabel(current_vertex, DestType::CERTAIN_NONTERMINAL); if (new_label != init_label) { // ENQUEUE NEIGHBORS Vec3l t_coords = m_grid->XYZ3d(current_vertex); // get the coordinates of the poitn Vec3l negs[6]; INDEX_TYPE negids[6]; int nn = m_grid->GatherExistingNeighborsSameBdry6(t_coords, negs); for (int j = 0; j < nn; j++) negids[j] = m_grid->Index3d(negs[j]); //#pragma omp critical { // for each neigbhor for (int j = 0; j < nn; j++) { INDEX_TYPE negid = negids[j]; // only if it has not yet been added to our update set if (added_vertices.count(negid) == 0) { if (m_desttype->GetLabel(negid) == DestType::ASSIGNED && m_dest_label->GetLabel(negid) != new_label) { added_vertices.insert(negid); vertex_stack.push(negid); } } } } } } } // END WHILE } // END PARALLEL ltimer3.EndGlobal(); ltimer3.PrintAll(); #ifdef OUTPUTINTERMEDIATE m_desttype->OutputToIntFile("classes_type.raw"); #endif if (verbose){ printf(" done! fixed a total of %d vertices\n", added_vertices.size()); } gtimer.EndGlobal(); gtimer.PrintAll(); } #endif }; } #endif
GB_unop__identity_fp32_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp32_fc64 // op(A') function: GB_unop_tran__identity_fp32_fc64 // C type: float // A type: GxB_FC64_t // cast: float cij = (float) creal (aij) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) creal (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) creal (aij) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp32_fc64 ( float *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; float z = (float) creal (aij) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp32_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__min_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_int16) // A.*B function (eWiseMult): GB (_AemultB_08__min_int16) // A.*B function (eWiseMult): GB (_AemultB_02__min_int16) // A.*B function (eWiseMult): GB (_AemultB_04__min_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_int16) // A*D function (colscale): GB (_AxD__min_int16) // D*A function (rowscale): GB (_DxB__min_int16) // C+=B function (dense accum): GB (_Cdense_accumB__min_int16) // C+=b function (dense accum): GB (_Cdense_accumb__min_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_int16) // C=scalar+B GB (_bind1st__min_int16) // C=scalar+B' GB (_bind1st_tran__min_int16) // C=A+scalar GB (_bind2nd__min_int16) // C=A'+scalar GB (_bind2nd_tran__min_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMIN (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_INT16 || GxB_NO_MIN_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__min_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__min_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__min_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_bool_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_bool_uint8 // op(A') function: GB_tran__abs_bool_uint8 // C type: bool // A type: uint8_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_bool_uint8 ( bool *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_bool_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB034-truedeplinear-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A linear expression is used as array subscription. Data race pair: a[2*i+1]@66:5 vs. a[i]@66:14 */ #include <stdlib.h> int main(int argc, char* argv[]) { int i; int len=2000; if (argc>1) len = atoi(argv[1]); int a[len]; #pragma omp parallel for private(i ) for (i=0; i<len; i++) a[i]=i; for (i=0;i<len/2;i++) a[2*i+1]=a[i]+1; for (i=0; i<len; i++) printf("%d\n", a[i]); return 0; }
GB_binop__isgt_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__isgt_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__isgt_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint32) // A*D function (colscale): GB (_AxD__isgt_uint32) // D*A function (rowscale): GB (_DxB__isgt_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint32) // C=scalar+B GB (_bind1st__isgt_uint32) // C=scalar+B' GB (_bind1st_tran__isgt_uint32) // C=A+scalar GB (_bind2nd__isgt_uint32) // C=A'+scalar GB (_bind2nd_tran__isgt_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_UINT32 || GxB_NO_ISGT_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isgt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isgt_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isgt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__pair_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_fp64) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__pair_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fp64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: double // A type: double // B,b type: double // BinaryOp: cij = 1 #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_FP64 || GxB_NO_PAIR_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
_phonopy.c
/* Copyright (C) 2011 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <Python.h> #include <stdio.h> #include <stddef.h> #include <math.h> #include <float.h> #include <numpy/arrayobject.h> #include "phonopy.h" /* PHPYCONST is defined in dynmat.h */ /* Build dynamical matrix */ static PyObject * py_transform_dynmat_to_fc(PyObject *self, PyObject *args); static PyObject * py_perm_trans_symmetrize_fc(PyObject *self, PyObject *args); static PyObject * py_perm_trans_symmetrize_compact_fc(PyObject *self, PyObject *args); static PyObject * py_transpose_compact_fc(PyObject *self, PyObject *args); static PyObject * py_get_dynamical_matrix(PyObject *self, PyObject *args); static PyObject * py_get_nac_dynamical_matrix(PyObject *self, PyObject *args); static PyObject * py_get_recip_dipole_dipole(PyObject *self, PyObject *args); static PyObject * py_get_recip_dipole_dipole_q0(PyObject *self, PyObject *args); static PyObject * py_get_derivative_dynmat(PyObject *self, PyObject *args); static PyObject * py_get_thermal_properties(PyObject *self, PyObject *args); static PyObject * py_distribute_fc2(PyObject *self, PyObject *args); static PyObject * py_compute_permutation(PyObject *self, PyObject *args); static PyObject * py_gsv_set_smallest_vectors_sparse(PyObject *self, PyObject *args); static PyObject * py_gsv_set_smallest_vectors_dense(PyObject *self, PyObject *args); static PyObject * py_thm_relative_grid_address(PyObject *self, PyObject *args); static PyObject * py_thm_all_relative_grid_address(PyObject *self, PyObject *args); static PyObject * py_thm_integration_weight(PyObject *self, PyObject *args); static PyObject * py_thm_integration_weight_at_omegas(PyObject *self, PyObject *args); static PyObject * py_get_tetrahedra_frequenies(PyObject *self, PyObject *args); static PyObject * py_tetrahedron_method_dos(PyObject *self, PyObject *args); struct module_state { PyObject *error; }; #if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) static struct module_state _state; #endif static PyObject * error_out(PyObject *m) { struct module_state *st = GETSTATE(m); PyErr_SetString(st->error, "something bad happened"); return NULL; } static PyMethodDef _phonopy_methods[] = { {"error_out", (PyCFunction)error_out, METH_NOARGS, NULL}, {"transform_dynmat_to_fc", py_transform_dynmat_to_fc, METH_VARARGS, "Transform a set of dynmat to force constants"}, {"perm_trans_symmetrize_fc", py_perm_trans_symmetrize_fc, METH_VARARGS, "Enforce permutation and translational symmetry of force constants"}, {"perm_trans_symmetrize_compact_fc", py_perm_trans_symmetrize_compact_fc, METH_VARARGS, "Enforce permutation and translational symmetry of compact force constants"}, {"transpose_compact_fc", py_transpose_compact_fc, METH_VARARGS, "Transpose compact force constants"}, {"dynamical_matrix", py_get_dynamical_matrix, METH_VARARGS, "Dynamical matrix"}, {"nac_dynamical_matrix", py_get_nac_dynamical_matrix, METH_VARARGS, "NAC dynamical matrix"}, {"recip_dipole_dipole", py_get_recip_dipole_dipole, METH_VARARGS, "Reciprocal part of dipole-dipole interaction"}, {"recip_dipole_dipole_q0", py_get_recip_dipole_dipole_q0, METH_VARARGS, "q=0 terms of reciprocal part of dipole-dipole interaction"}, {"derivative_dynmat", py_get_derivative_dynmat, METH_VARARGS, "Q derivative of dynamical matrix"}, {"thermal_properties", py_get_thermal_properties, METH_VARARGS, "Thermal properties"}, {"distribute_fc2", py_distribute_fc2, METH_VARARGS, "Distribute force constants for all atoms in atom_list using precomputed symmetry mappings."}, {"compute_permutation", py_compute_permutation, METH_VARARGS, "Compute indices of original points in a set of rotated points."}, {"gsv_set_smallest_vectors_sparse", py_gsv_set_smallest_vectors_sparse, METH_VARARGS, "Set shortest vectors in sparse array."}, {"gsv_set_smallest_vectors_dense", py_gsv_set_smallest_vectors_dense, METH_VARARGS, "Set shortest vectors in dense array."}, {"tetrahedra_relative_grid_address", py_thm_relative_grid_address, METH_VARARGS, "Relative grid addresses of vertices of 24 tetrahedra"}, {"all_tetrahedra_relative_grid_address", py_thm_all_relative_grid_address, METH_VARARGS, "4 (all) sets of relative grid addresses of vertices of 24 tetrahedra"}, {"tetrahedra_integration_weight", py_thm_integration_weight, METH_VARARGS, "Integration weight for tetrahedron method"}, {"tetrahedra_integration_weight_at_omegas", py_thm_integration_weight_at_omegas, METH_VARARGS, "Integration weight for tetrahedron method at omegas"}, {"tetrahedra_frequencies", py_get_tetrahedra_frequenies, METH_VARARGS, "Run tetrahedron method"}, {"tetrahedron_method_dos", py_tetrahedron_method_dos, METH_VARARGS, "Run tetrahedron method"}, {NULL, NULL, 0, NULL} }; #if PY_MAJOR_VERSION >= 3 static int _phonopy_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->error); return 0; } static int _phonopy_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->error); return 0; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_phonopy", NULL, sizeof(struct module_state), _phonopy_methods, NULL, _phonopy_traverse, _phonopy_clear, NULL }; #define INITERROR return NULL PyObject * PyInit__phonopy(void) #else #define INITERROR return void init_phonopy(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module = PyModule_Create(&moduledef); #else PyObject *module = Py_InitModule("_phonopy", _phonopy_methods); #endif struct module_state *st; if (module == NULL) INITERROR; st = GETSTATE(module); st->error = PyErr_NewException("_phonopy.Error", NULL, NULL); if (st->error == NULL) { Py_DECREF(module); INITERROR; } #if PY_MAJOR_VERSION >= 3 return module; #endif } static PyObject * py_transform_dynmat_to_fc(PyObject *self, PyObject *args) { PyArrayObject* py_force_constants; PyArrayObject* py_dynamical_matrices; PyArrayObject* py_commensurate_points; PyArrayObject* py_svecs; PyArrayObject* py_multi; PyArrayObject* py_masses; PyArrayObject* py_s2pp_map; PyArrayObject* py_fc_index_map; double* fc; double* dm; double (*comm_points)[3]; double (*svecs)[3]; double* masses; long (*multi)[2]; long* s2pp_map; long* fc_index_map; long num_patom; long num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_force_constants, &py_dynamical_matrices, &py_commensurate_points, &py_svecs, &py_multi, &py_masses, &py_s2pp_map, &py_fc_index_map)) { return NULL; } fc = (double*)PyArray_DATA(py_force_constants); dm = (double*)PyArray_DATA(py_dynamical_matrices); comm_points = (double(*)[3])PyArray_DATA(py_commensurate_points); svecs = (double(*)[3])PyArray_DATA(py_svecs); masses = (double*)PyArray_DATA(py_masses); multi = (long(*)[2])PyArray_DATA(py_multi); s2pp_map = (long*)PyArray_DATA(py_s2pp_map); fc_index_map = (long*)PyArray_DATA(py_fc_index_map); num_patom = PyArray_DIMS(py_multi)[1]; num_satom = PyArray_DIMS(py_multi)[0]; phpy_transform_dynmat_to_fc(fc, dm, comm_points, svecs, multi, masses, s2pp_map, fc_index_map, num_patom, num_satom); Py_RETURN_NONE; } static PyObject * py_compute_permutation(PyObject *self, PyObject *args) { PyArrayObject* permutation; PyArrayObject* lattice; PyArrayObject* positions; PyArrayObject* permuted_positions; double symprec; int* rot_atoms; double (*lat)[3]; double (*pos)[3]; double (*rot_pos)[3]; int num_pos; int is_found; if (!PyArg_ParseTuple(args, "OOOOd", &permutation, &lattice, &positions, &permuted_positions, &symprec)) { return NULL; } rot_atoms = (int*)PyArray_DATA(permutation); lat = (double(*)[3])PyArray_DATA(lattice); pos = (double(*)[3])PyArray_DATA(positions); rot_pos = (double(*)[3])PyArray_DATA(permuted_positions); num_pos = PyArray_DIMS(positions)[0]; is_found = phpy_compute_permutation(rot_atoms, lat, pos, rot_pos, num_pos, symprec); if (is_found) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } static PyObject * py_gsv_set_smallest_vectors_sparse(PyObject *self, PyObject *args) { PyArrayObject* py_smallest_vectors; PyArrayObject* py_multiplicity; PyArrayObject* py_pos_to; PyArrayObject* py_pos_from; PyArrayObject* py_lattice_points; PyArrayObject* py_reduced_basis; PyArrayObject* py_trans_mat; double symprec; double (*smallest_vectors)[27][3]; int * multiplicity; double (*pos_to)[3]; double (*pos_from)[3]; int (*lattice_points)[3]; double (*reduced_basis)[3]; int (*trans_mat)[3]; int num_pos_to, num_pos_from, num_lattice_points; if (!PyArg_ParseTuple(args, "OOOOOOOd", &py_smallest_vectors, &py_multiplicity, &py_pos_to, &py_pos_from, &py_lattice_points, &py_reduced_basis, &py_trans_mat, &symprec)) { return NULL; } smallest_vectors = (double(*)[27][3])PyArray_DATA(py_smallest_vectors); multiplicity = (int*)PyArray_DATA(py_multiplicity); pos_to = (double(*)[3])PyArray_DATA(py_pos_to); pos_from = (double(*)[3])PyArray_DATA(py_pos_from); num_pos_to = PyArray_DIMS(py_pos_to)[0]; num_pos_from = PyArray_DIMS(py_pos_from)[0]; lattice_points = (int(*)[3])PyArray_DATA(py_lattice_points); num_lattice_points = PyArray_DIMS(py_lattice_points)[0]; reduced_basis = (double(*)[3])PyArray_DATA(py_reduced_basis); trans_mat = (int(*)[3])PyArray_DATA(py_trans_mat); phpy_set_smallest_vectors_sparse(smallest_vectors, multiplicity, pos_to, num_pos_to, pos_from, num_pos_from, lattice_points, num_lattice_points, reduced_basis, trans_mat, symprec); Py_RETURN_NONE; } static PyObject * py_gsv_set_smallest_vectors_dense(PyObject *self, PyObject *args) { PyArrayObject* py_smallest_vectors; PyArrayObject* py_multiplicity; PyArrayObject* py_pos_to; PyArrayObject* py_pos_from; PyArrayObject* py_lattice_points; PyArrayObject* py_reduced_basis; PyArrayObject* py_trans_mat; long initialize; double symprec; double (*smallest_vectors)[3]; long (*multiplicity)[2]; double (*pos_to)[3]; double (*pos_from)[3]; long (*lattice_points)[3]; double (*reduced_basis)[3]; long (*trans_mat)[3]; long num_pos_to, num_pos_from, num_lattice_points; if (!PyArg_ParseTuple(args, "OOOOOOOld", &py_smallest_vectors, &py_multiplicity, &py_pos_to, &py_pos_from, &py_lattice_points, &py_reduced_basis, &py_trans_mat, &initialize, &symprec)) { return NULL; } smallest_vectors = (double(*)[3])PyArray_DATA(py_smallest_vectors); multiplicity = (long(*)[2])PyArray_DATA(py_multiplicity); pos_to = (double(*)[3])PyArray_DATA(py_pos_to); pos_from = (double(*)[3])PyArray_DATA(py_pos_from); num_pos_to = PyArray_DIMS(py_pos_to)[0]; num_pos_from = PyArray_DIMS(py_pos_from)[0]; lattice_points = (long(*)[3])PyArray_DATA(py_lattice_points); num_lattice_points = PyArray_DIMS(py_lattice_points)[0]; reduced_basis = (double(*)[3])PyArray_DATA(py_reduced_basis); trans_mat = (long(*)[3])PyArray_DATA(py_trans_mat); phpy_set_smallest_vectors_dense(smallest_vectors, multiplicity, pos_to, num_pos_to, pos_from, num_pos_from, lattice_points, num_lattice_points, reduced_basis, trans_mat, initialize, symprec); Py_RETURN_NONE; } static PyObject * py_perm_trans_symmetrize_fc(PyObject *self, PyObject *args) { PyArrayObject* py_fc; double *fc; int level; int n_satom; if (!PyArg_ParseTuple(args, "Oi", &py_fc, &level)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); n_satom = PyArray_DIMS(py_fc)[0]; phpy_perm_trans_symmetrize_fc(fc, n_satom, level); Py_RETURN_NONE; } static PyObject * py_perm_trans_symmetrize_compact_fc(PyObject *self, PyObject *args) { PyArrayObject* py_fc; PyArrayObject* py_permutations; PyArrayObject* py_s2pp_map; PyArrayObject* py_p2s_map; PyArrayObject* py_nsym_list; int level; double *fc; int *perms; int *s2pp; int *p2s; int *nsym_list; int n_patom, n_satom; if (!PyArg_ParseTuple(args, "OOOOOi", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list, &level)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); perms = (int*)PyArray_DATA(py_permutations); s2pp = (int*)PyArray_DATA(py_s2pp_map); p2s = (int*)PyArray_DATA(py_p2s_map); nsym_list = (int*)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; phpy_perm_trans_symmetrize_compact_fc( fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, level); Py_RETURN_NONE; } static PyObject * py_transpose_compact_fc(PyObject *self, PyObject *args) { PyArrayObject* py_fc; PyArrayObject* py_permutations; PyArrayObject* py_s2pp_map; PyArrayObject* py_p2s_map; PyArrayObject* py_nsym_list; double *fc; int *s2pp; int *p2s; int *nsym_list; int *perms; int n_patom, n_satom; if (!PyArg_ParseTuple(args, "OOOOO", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); perms = (int*)PyArray_DATA(py_permutations); s2pp = (int*)PyArray_DATA(py_s2pp_map); p2s = (int*)PyArray_DATA(py_p2s_map); nsym_list = (int*)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; phpy_set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); Py_RETURN_NONE; } static PyObject * py_get_dynamical_matrix(PyObject *self, PyObject *args) { PyArrayObject* py_dynamical_matrix; PyArrayObject* py_force_constants; PyArrayObject* py_svecs; PyArrayObject* py_q; PyArrayObject* py_multi; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; double* dm; double* fc; double* q; double (*svecs)[3]; double* m; long (*multi)[2]; long* s2p_map; long* p2s_map; long num_patom; long num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dynamical_matrix, &py_force_constants, &py_q, &py_svecs, &py_multi, &py_masses, &py_s2p_map, &py_p2s_map)) { return NULL; } dm = (double*)PyArray_DATA(py_dynamical_matrix); fc = (double*)PyArray_DATA(py_force_constants); q = (double*)PyArray_DATA(py_q); svecs = (double(*)[3])PyArray_DATA(py_svecs); m = (double*)PyArray_DATA(py_masses); multi = (long(*)[2])PyArray_DATA(py_multi); s2p_map = (long*)PyArray_DATA(py_s2p_map); p2s_map = (long*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; phpy_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, NULL, 1); Py_RETURN_NONE; } static PyObject * py_get_nac_dynamical_matrix(PyObject *self, PyObject *args) { PyArrayObject* py_dynamical_matrix; PyArrayObject* py_force_constants; PyArrayObject* py_svecs; PyArrayObject* py_q_cart; PyArrayObject* py_q; PyArrayObject* py_multi; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; PyArrayObject* py_born; double factor; double* dm; double* fc; double* q_cart; double* q; double (*svecs)[3]; double* m; double (*born)[3][3]; long (*multi)[2]; long* s2p_map; long* p2s_map; long num_patom; long num_satom; long n; double (*charge_sum)[3][3]; if (!PyArg_ParseTuple(args, "OOOOOOOOOOd", &py_dynamical_matrix, &py_force_constants, &py_q, &py_svecs, &py_multi, &py_masses, &py_s2p_map, &py_p2s_map, &py_q_cart, &py_born, &factor)) return NULL; dm = (double*)PyArray_DATA(py_dynamical_matrix); fc = (double*)PyArray_DATA(py_force_constants); q_cart = (double*)PyArray_DATA(py_q_cart); q = (double*)PyArray_DATA(py_q); svecs = (double(*)[3])PyArray_DATA(py_svecs); m = (double*)PyArray_DATA(py_masses); born = (double(*)[3][3])PyArray_DATA(py_born); multi = (long(*)[2])PyArray_DATA(py_multi); s2p_map = (long*)PyArray_DATA(py_s2p_map); p2s_map = (long*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; charge_sum = (double(*)[3][3]) malloc(sizeof(double[3][3]) * num_patom * num_patom); n = num_satom / num_patom; phpy_get_charge_sum(charge_sum, num_patom, factor / n, q_cart, born); phpy_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, charge_sum, 1); free(charge_sum); Py_RETURN_NONE; } static PyObject * py_get_recip_dipole_dipole(PyObject *self, PyObject *args) { PyArrayObject* py_dd; PyArrayObject* py_dd_q0; PyArrayObject* py_G_list; PyArrayObject* py_q_cart; PyArrayObject* py_q_direction; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_positions; double factor; double lambda; double tolerance; double* dd; double* dd_q0; double (*G_list)[3]; double* q_vector; double* q_direction; double (*born)[3][3]; double (*dielectric)[3]; double (*pos)[3]; long num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOOOOddd", &py_dd, &py_dd_q0, &py_G_list, &py_q_cart, &py_q_direction, &py_born, &py_dielectric, &py_positions, &factor, &lambda, &tolerance)) return NULL; dd = (double*)PyArray_DATA(py_dd); dd_q0 = (double*)PyArray_DATA(py_dd_q0); G_list = (double(*)[3])PyArray_DATA(py_G_list); if ((PyObject*)py_q_direction == Py_None) { q_direction = NULL; } else { q_direction = (double*)PyArray_DATA(py_q_direction); } q_vector = (double*)PyArray_DATA(py_q_cart); born = (double(*)[3][3])PyArray_DATA(py_born); dielectric = (double(*)[3])PyArray_DATA(py_dielectric); pos = (double(*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; phpy_get_recip_dipole_dipole(dd, /* [natom, 3, natom, 3, (real, imag)] */ dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, q_vector, q_direction, born, dielectric, pos, /* [natom, 3] */ factor, /* 4pi/V*unit-conv */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject * py_get_recip_dipole_dipole_q0(PyObject *self, PyObject *args) { PyArrayObject* py_dd_q0; PyArrayObject* py_G_list; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_positions; double lambda; double tolerance; double* dd_q0; double (*G_list)[3]; double (*born)[3][3]; double (*dielectric)[3]; double (*pos)[3]; long num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOdd", &py_dd_q0, &py_G_list, &py_born, &py_dielectric, &py_positions, &lambda, &tolerance)) return NULL; dd_q0 = (double*)PyArray_DATA(py_dd_q0); G_list = (double(*)[3])PyArray_DATA(py_G_list); born = (double(*)[3][3])PyArray_DATA(py_born); dielectric = (double(*)[3])PyArray_DATA(py_dielectric); pos = (double(*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; phpy_get_recip_dipole_dipole_q0(dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, born, dielectric, pos, /* [natom, 3] */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject * py_get_derivative_dynmat(PyObject *self, PyObject *args) { PyArrayObject* py_derivative_dynmat; PyArrayObject* py_force_constants; PyArrayObject* py_svecs; PyArrayObject* py_lattice; PyArrayObject* py_q_vector; PyArrayObject* py_multi; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_q_direction; double nac_factor; double* ddm; double* fc; double* q_vector; double* lat; double (*svecs)[3]; double* masses; long (*multi)[2]; long* s2p_map; long* p2s_map; long num_patom; long num_satom; double *born; double *epsilon; double *q_dir; if (!PyArg_ParseTuple(args, "OOOOOOOOOdOOO", &py_derivative_dynmat, &py_force_constants, &py_q_vector, &py_lattice, /* column vectors */ &py_svecs, &py_multi, &py_masses, &py_s2p_map, &py_p2s_map, &nac_factor, &py_born, &py_dielectric, &py_q_direction)) { return NULL; } ddm = (double*)PyArray_DATA(py_derivative_dynmat); fc = (double*)PyArray_DATA(py_force_constants); q_vector = (double*)PyArray_DATA(py_q_vector); lat = (double*)PyArray_DATA(py_lattice); svecs = (double(*)[3])PyArray_DATA(py_svecs); masses = (double*)PyArray_DATA(py_masses); multi = (long(*)[2])PyArray_DATA(py_multi); s2p_map = (long*)PyArray_DATA(py_s2p_map); p2s_map = (long*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; if ((PyObject*)py_born == Py_None) { born = NULL; } else { born = (double*)PyArray_DATA(py_born); } if ((PyObject*)py_dielectric == Py_None) { epsilon = NULL; } else { epsilon = (double*)PyArray_DATA(py_dielectric); } if ((PyObject*)py_q_direction == Py_None) { q_dir = NULL; } else { q_dir = (double*)PyArray_DATA(py_q_direction); } phpy_get_derivative_dynmat_at_q(ddm, num_patom, num_satom, fc, q_vector, lat, svecs, multi, masses, s2p_map, p2s_map, nac_factor, born, epsilon, q_dir); Py_RETURN_NONE; } /* Thermal properties */ static PyObject * py_get_thermal_properties(PyObject *self, PyObject *args) { PyArrayObject* py_thermal_props; PyArrayObject* py_temperatures; PyArrayObject* py_frequencies; PyArrayObject* py_weights; double cutoff_frequency; double *temperatures; double* freqs; double *thermal_props; long* weights; long num_qpoints; long num_bands; long num_temp; if (!PyArg_ParseTuple(args, "OOOOd", &py_thermal_props, &py_temperatures, &py_frequencies, &py_weights, &cutoff_frequency)) { return NULL; } thermal_props = (double*)PyArray_DATA(py_thermal_props); temperatures = (double*)PyArray_DATA(py_temperatures); num_temp = (long)PyArray_DIMS(py_temperatures)[0]; freqs = (double*)PyArray_DATA(py_frequencies); num_qpoints = (long)PyArray_DIMS(py_frequencies)[0]; weights = (long*)PyArray_DATA(py_weights); num_bands = (long)PyArray_DIMS(py_frequencies)[1]; phpy_get_thermal_properties(thermal_props, temperatures, freqs, weights, num_temp, num_qpoints, num_bands, cutoff_frequency); Py_RETURN_NONE; } static PyObject * py_distribute_fc2(PyObject *self, PyObject *args) { PyArrayObject* py_force_constants; PyArrayObject* py_permutations; PyArrayObject* py_map_atoms; PyArrayObject* py_map_syms; PyArrayObject* py_atom_list; PyArrayObject* py_rotations_cart; double (*r_carts)[3][3]; double (*fc2)[3][3]; int *permutations; int *map_atoms; int *map_syms; int *atom_list; npy_intp num_pos, num_rot, len_atom_list; if (!PyArg_ParseTuple(args, "OOOOOO", &py_force_constants, &py_atom_list, &py_rotations_cart, &py_permutations, &py_map_atoms, &py_map_syms)) { return NULL; } fc2 = (double(*)[3][3])PyArray_DATA(py_force_constants); atom_list = (int*)PyArray_DATA(py_atom_list); len_atom_list = PyArray_DIMS(py_atom_list)[0]; permutations = (int*)PyArray_DATA(py_permutations); map_atoms = (int*)PyArray_DATA(py_map_atoms); map_syms = (int*)PyArray_DATA(py_map_syms); r_carts = (double(*)[3][3])PyArray_DATA(py_rotations_cart); num_rot = PyArray_DIMS(py_permutations)[0]; num_pos = PyArray_DIMS(py_permutations)[1]; if (PyArray_NDIM(py_map_atoms) != 1 || PyArray_DIMS(py_map_atoms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_atoms"); return NULL; } if (PyArray_NDIM(py_map_syms) != 1 || PyArray_DIMS(py_map_syms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_syms"); return NULL; } if (PyArray_DIMS(py_rotations_cart)[0] != num_rot) { PyErr_SetString(PyExc_ValueError, "permutations and rotations are different length"); return NULL; } phpy_distribute_fc2(fc2, atom_list, len_atom_list, r_carts, permutations, map_atoms, map_syms, num_rot, num_pos); Py_RETURN_NONE; } static PyObject * py_thm_relative_grid_address(PyObject *self, PyObject *args) { PyArrayObject* py_relative_grid_address; PyArrayObject* py_reciprocal_lattice_py; long (*relative_grid_address)[4][3]; double (*reciprocal_lattice)[3]; if (!PyArg_ParseTuple(args, "OO", &py_relative_grid_address, &py_reciprocal_lattice_py)) { return NULL; } relative_grid_address = (long(*)[4][3])PyArray_DATA(py_relative_grid_address); reciprocal_lattice = (double(*)[3])PyArray_DATA(py_reciprocal_lattice_py); phpy_get_relative_grid_address(relative_grid_address, reciprocal_lattice); Py_RETURN_NONE; } static PyObject * py_thm_all_relative_grid_address(PyObject *self, PyObject *args) { PyArrayObject* py_relative_grid_address; long (*relative_grid_address)[24][4][3]; if (!PyArg_ParseTuple(args, "O", &py_relative_grid_address)) { return NULL; } relative_grid_address = (long(*)[24][4][3])PyArray_DATA(py_relative_grid_address); phpy_get_all_relative_grid_address(relative_grid_address); Py_RETURN_NONE; } static PyObject * py_thm_integration_weight(PyObject *self, PyObject *args) { double omega; PyArrayObject* py_tetrahedra_omegas; char* function; double (*tetrahedra_omegas)[4]; double iw; if (!PyArg_ParseTuple(args, "dOs", &omega, &py_tetrahedra_omegas, &function)) { return NULL; } tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas); iw = phpy_get_integration_weight(omega, tetrahedra_omegas, function[0]); return PyFloat_FromDouble(iw); } static PyObject * py_thm_integration_weight_at_omegas(PyObject *self, PyObject *args) { PyArrayObject* py_integration_weights; PyArrayObject* py_omegas; PyArrayObject* py_tetrahedra_omegas; char* function; double *omegas; double *iw; long num_omegas; double (*tetrahedra_omegas)[4]; long i; if (!PyArg_ParseTuple(args, "OOOs", &py_integration_weights, &py_omegas, &py_tetrahedra_omegas, &function)) { return NULL; } omegas = (double*)PyArray_DATA(py_omegas); iw = (double*)PyArray_DATA(py_integration_weights); num_omegas = (long)PyArray_DIMS(py_omegas)[0]; tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas); #pragma omp parallel for for (i = 0; i < num_omegas; i++) { iw[i] = phpy_get_integration_weight(omegas[i], tetrahedra_omegas, function[0]); } Py_RETURN_NONE; } static PyObject * py_get_tetrahedra_frequenies(PyObject *self, PyObject *args) { PyArrayObject* py_freq_tetras; PyArrayObject* py_grid_points; PyArrayObject* py_mesh; PyArrayObject* py_grid_address; PyArrayObject* py_gp_ir_index; PyArrayObject* py_relative_grid_address; PyArrayObject* py_frequencies; double* freq_tetras; long* grid_points; long* mesh; long (*grid_address)[3]; long* gp_ir_index; long (*relative_grid_address)[3]; double* frequencies; long num_gp_in, num_band; if (!PyArg_ParseTuple(args, "OOOOOOO", &py_freq_tetras, &py_grid_points, &py_mesh, &py_grid_address, &py_gp_ir_index, &py_relative_grid_address, &py_frequencies)) { return NULL; } freq_tetras = (double*)PyArray_DATA(py_freq_tetras); grid_points = (long*)PyArray_DATA(py_grid_points); num_gp_in = PyArray_DIMS(py_grid_points)[0]; mesh = (long*)PyArray_DATA(py_mesh); grid_address = (long(*)[3])PyArray_DATA(py_grid_address); gp_ir_index = (long*)PyArray_DATA(py_gp_ir_index); relative_grid_address = (long(*)[3])PyArray_DATA(py_relative_grid_address); frequencies = (double*)PyArray_DATA(py_frequencies); num_band = PyArray_DIMS(py_frequencies)[1]; phpy_get_tetrahedra_frequenies(freq_tetras, mesh, grid_points, grid_address, relative_grid_address, gp_ir_index, frequencies, num_band, num_gp_in); Py_RETURN_NONE; } static PyObject * py_tetrahedron_method_dos(PyObject *self, PyObject *args) { PyArrayObject* py_dos; PyArrayObject* py_mesh; PyArrayObject* py_freq_points; PyArrayObject* py_frequencies; PyArrayObject* py_coef; PyArrayObject* py_grid_address; PyArrayObject* py_grid_mapping_table; PyArrayObject* py_relative_grid_address; double *dos; long* mesh; double* freq_points; double* frequencies; double* coef; long (*grid_address)[3]; long num_gp, num_ir_gp, num_band, num_freq_points, num_coef; long *grid_mapping_table; long (*relative_grid_address)[4][3]; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dos, &py_mesh, &py_freq_points, &py_frequencies, &py_coef, &py_grid_address, &py_grid_mapping_table, &py_relative_grid_address)) { return NULL; } /* dos[num_ir_gp][num_band][num_freq_points][num_coef] */ dos = (double*)PyArray_DATA(py_dos); mesh = (long*)PyArray_DATA(py_mesh); freq_points = (double*)PyArray_DATA(py_freq_points); num_freq_points = (long)PyArray_DIMS(py_freq_points)[0]; frequencies = (double*)PyArray_DATA(py_frequencies); num_ir_gp = (long)PyArray_DIMS(py_frequencies)[0]; num_band = (long)PyArray_DIMS(py_frequencies)[1]; coef = (double*)PyArray_DATA(py_coef); num_coef = (long)PyArray_DIMS(py_coef)[1]; grid_address = (long(*)[3])PyArray_DATA(py_grid_address); num_gp = (long)PyArray_DIMS(py_grid_address)[0]; grid_mapping_table = (long*)PyArray_DATA(py_grid_mapping_table); relative_grid_address = (long(*)[4][3])PyArray_DATA(py_relative_grid_address); phpy_tetrahedron_method_dos(dos, mesh, grid_address, relative_grid_address, grid_mapping_table, freq_points, frequencies, coef, num_freq_points, num_ir_gp, num_band, num_coef, num_gp); Py_RETURN_NONE; }
trmm_x_sky_u_hi_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT i = 0; i < mat->rows; i++) for(ALPHA_INT j = 0; j < columns; j++) alpha_mul(y[index2(i, j, ldy)], y[index2(i, j, ldy)], beta); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { for (ALPHA_INT ac = 0; ac < mat->cols; ++ac) { ALPHA_INT start = mat->pointers[ac]; ALPHA_INT end = mat->pointers[ac + 1]; ALPHA_INT idx = 1; ALPHA_INT eles_num = end - start; for (ALPHA_INT ai = start; ai < end; ++ai) { ALPHA_INT cr = ac - eles_num + idx; if (ac > cr) { ALPHA_Number t; alpha_mul(t, alpha, mat->values[ai]); alpha_madde(y[index2(cr, cc, ldy)], t, x[index2(ac, cc, ldx)]); } else if(ac == cr) alpha_madde(y[index2(cr, cc, ldy)], alpha, x[index2(ac, cc, ldx)]); idx++; } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
morphology.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y % % MM MM O O R R P P H H O O L O O G Y Y % % M M M O O RRRR PPPP HHHHH O O L O O G GGG Y % % M M O O R R P H H O O L O O G G Y % % M M OOO R R P H H OOO LLLLL OOO GGG Y % % % % % % MagickCore Morphology Methods % % % % Software Design % % Anthony Thyssen % % January 2010 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Morphology is the application of various kernels, of any size or shape, to an % image in various ways (typically binary, but not always). % % Convolution (weighted sum or average) is just one specific type of % morphology. Just one that is very common for image bluring and sharpening % effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring. % % This module provides not only a general morphology function, and the ability % to apply more advanced or iterative morphologies, but also functions for the % generation of many different types of kernel arrays from user supplied % arguments. Prehaps even the generation of a kernel from a small image. */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/prepress.h" #include "MagickCore/quantize.h" #include "MagickCore/resource_.h" #include "MagickCore/registry.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* Other global definitions used by module. */ #define Minimize(assign,value) assign=MagickMin(assign,value) #define Maximize(assign,value) assign=MagickMax(assign,value) /* Integer Factorial Function - for a Binomial kernel */ #if 1 static inline size_t fact(size_t n) { size_t f,l; for(f=1, l=2; l <= n; f=f*l, l++); return(f); } #elif 1 /* glibc floating point alternatives */ #define fact(n) ((size_t)tgamma((double)n+1)) #else #define fact(n) ((size_t)lgamma((double)n+1)) #endif /* Currently these are only internal to this module */ static void CalcKernelMetaData(KernelInfo *), ExpandMirrorKernelInfo(KernelInfo *), ExpandRotateKernelInfo(KernelInfo *, const double), RotateKernelInfo(KernelInfo *, double); /* Quick function to find last kernel in a kernel list */ static inline KernelInfo *LastKernelInfo(KernelInfo *kernel) { while (kernel->next != (KernelInfo *) NULL) kernel=kernel->next; return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelInfo() takes the given string (generally supplied by the % user) and converts it into a Morphology/Convolution Kernel. This allows % users to specify a kernel from a number of pre-defined kernels, or to fully % specify their own kernel for a specific Convolution or Morphology % Operation. % % The kernel so generated can be any rectangular array of floating point % values (doubles) with the 'control point' or 'pixel being affected' % anywhere within that array of values. % % Previously IM was restricted to a square of odd size using the exact % center as origin, this is no longer the case, and any rectangular kernel % with any value being declared the origin. This in turn allows the use of % highly asymmetrical kernels. % % The floating point values in the kernel can also include a special value % known as 'nan' or 'not a number' to indicate that this value is not part % of the kernel array. This allows you to shaped the kernel within its % rectangular area. That is 'nan' values provide a 'mask' for the kernel % shape. However at least one non-nan value must be provided for correct % working of a kernel. % % The returned kernel should be freed using the DestroyKernelInfo() when you % are finished with it. Do not free this memory yourself. % % Input kernel defintion strings can consist of any of three types. % % "name:args[[@><]" % Select from one of the built in kernels, using the name and % geometry arguments supplied. See AcquireKernelBuiltIn() % % "WxH[+X+Y][@><]:num, num, num ..." % a kernel of size W by H, with W*H floating point numbers following. % the 'center' can be optionally be defined at +X+Y (such that +0+0 % is top left corner). If not defined the pixel in the center, for % odd sizes, or to the immediate top or left of center for even sizes % is automatically selected. % % "num, num, num, num, ..." % list of floating point numbers defining an 'old style' odd sized % square kernel. At least 9 values should be provided for a 3x3 % square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc. % Values can be space or comma separated. This is not recommended. % % You can define a 'list of kernels' which can be used by some morphology % operators A list is defined as a semi-colon separated list kernels. % % " kernel ; kernel ; kernel ; " % % Any extra ';' characters, at start, end or between kernel defintions are % simply ignored. % % The special flags will expand a single kernel, into a list of rotated % kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree % cyclic rotations, while a '>' will generate a list of 90-degree rotations. % The '<' also exands using 90-degree rotates, but giving a 180-degree % reflected kernel before the +/- 90-degree rotations, which can be important % for Thinning operations. % % Note that 'name' kernels will start with an alphabetic character while the % new kernel specification has a ':' character in its specification string. % If neither is the case, it is assumed an old style of a simple list of % numbers generating a odd-sized square kernel has been given. % % The format of the AcquireKernal method is: % % KernelInfo *AcquireKernelInfo(const char *kernel_string) % % A description of each parameter follows: % % o kernel_string: the Morphology/Convolution kernel wanted. % */ /* This was separated so that it could be used as a separate ** array input handling function, such as for -color-matrix */ static KernelInfo *ParseKernelArray(const char *kernel_string) { KernelInfo *kernel; char token[MagickPathExtent]; const char *p, *end; register ssize_t i; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ MagickStatusType flags; GeometryInfo args; kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) memset(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = UserDefinedKernel; kernel->next = (KernelInfo *) NULL; kernel->signature=MagickCoreSignature; if (kernel_string == (const char *) NULL) return(kernel); /* find end of this specific kernel definition string */ end = strchr(kernel_string, ';'); if ( end == (char *) NULL ) end = strchr(kernel_string, '\0'); /* clear flags - for Expanding kernel lists thorugh rotations */ flags = NoValue; /* Has a ':' in argument - New user kernel specification FUTURE: this split on ':' could be done by StringToken() */ p = strchr(kernel_string, ':'); if ( p != (char *) NULL && p < end) { /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, kernel_string, (size_t) (p-kernel_string)); token[p-kernel_string] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); /* Size handling and checks of geometry settings */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 1.0; /* then width = 1 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ kernel->width = (size_t)args.rho; kernel->height = (size_t)args.sigma; /* Offset Handling and Checks */ if ( args.xi < 0.0 || args.psi < 0.0 ) return(DestroyKernelInfo(kernel)); kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi : (ssize_t) (kernel->width-1)/2; kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi : (ssize_t) (kernel->height-1)/2; if ( kernel->x >= (ssize_t) kernel->width || kernel->y >= (ssize_t) kernel->height ) return(DestroyKernelInfo(kernel)); p++; /* advance beyond the ':' */ } else { /* ELSE - Old old specification, forming odd-square kernel */ /* count up number of values given */ p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ for (i=0; p < end; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); } /* set the size of the kernel - old sized square */ kernel->width = kernel->height= (size_t) sqrt((double) i+1.0); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ } /* Read in the kernel values from rest of input string argument */ kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); kernel->minimum=MagickMaximumValue; kernel->maximum=(-MagickMaximumValue); kernel->negative_range = kernel->positive_range = 0.0; for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); if ( LocaleCompare("nan",token) == 0 || LocaleCompare("-",token) == 0 ) { kernel->values[i] = nan; /* this value is not part of neighbourhood */ } else { kernel->values[i] = StringToDouble(token,(char **) NULL); ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } } /* sanity check -- no more values in kernel definition */ (void) GetNextToken(p,&p,MagickPathExtent,token); if ( *token != '\0' && *token != ';' && *token != '\'' ) return(DestroyKernelInfo(kernel)); #if 0 /* this was the old method of handling a incomplete kernel */ if ( i < (ssize_t) (kernel->width*kernel->height) ) { Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); for ( ; i < (ssize_t) (kernel->width*kernel->height); i++) kernel->values[i]=0.0; } #else /* Number of values for kernel was not enough - Report Error */ if ( i < (ssize_t) (kernel->width*kernel->height) ) return(DestroyKernelInfo(kernel)); #endif /* check that we recieved at least one real (non-nan) value! */ if (kernel->minimum == MagickMaximumValue) return(DestroyKernelInfo(kernel)); if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */ ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */ else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */ else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */ return(kernel); } static KernelInfo *ParseKernelName(const char *kernel_string, ExceptionInfo *exception) { char token[MagickPathExtent]; const char *p, *end; GeometryInfo args; KernelInfo *kernel; MagickStatusType flags; ssize_t type; /* Parse special 'named' kernel */ (void) GetNextToken(kernel_string,&p,MagickPathExtent,token); type=ParseCommandOption(MagickKernelOptions,MagickFalse,token); if ( type < 0 || type == UserDefinedKernel ) return((KernelInfo *) NULL); /* not a valid named kernel */ while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';')) p++; end = strchr(p, ';'); /* end of this kernel defintion */ if ( end == (char *) NULL ) end = strchr(p, '\0'); /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, p, (size_t) (end-p)); token[end-p] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif /* special handling of missing values in input string */ switch( type ) { /* Shape Kernel Defaults */ case UnityKernel: if ( (flags & WidthValue) == 0 ) args.rho = 1.0; /* Default scale = 1.0, zero is valid */ break; case SquareKernel: case DiamondKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: if ( (flags & HeightValue) == 0 ) args.sigma = 1.0; /* Default scale = 1.0, zero is valid */ break; case RingKernel: if ( (flags & XValue) == 0 ) args.xi = 1.0; /* Default scale = 1.0, zero is valid */ break; case RectangleKernel: /* Rectangle - set size defaults */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 3; /* then width = 3 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ if ( (flags & XValue) == 0 ) /* center offset if not defined */ args.xi = (double)(((ssize_t)args.rho-1)/2); if ( (flags & YValue) == 0 ) args.psi = (double)(((ssize_t)args.sigma-1)/2); break; /* Distance Kernel Defaults */ case ChebyshevKernel: case ManhattanKernel: case OctagonalKernel: case EuclideanKernel: if ( (flags & HeightValue) == 0 ) /* no distance scale */ args.sigma = 100.0; /* default distance scaling */ else if ( (flags & AspectValue ) != 0 ) /* '!' flag */ args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */ else if ( (flags & PercentValue ) != 0 ) /* '%' flag */ args.sigma *= QuantumRange/100.0; /* percentage of color range */ break; default: break; } kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception); if ( kernel == (KernelInfo *) NULL ) return(kernel); /* global expand to rotated kernel list - only for single kernels */ if ( kernel->next == (KernelInfo *) NULL ) { if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 45.0); else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); } return(kernel); } MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string, ExceptionInfo *exception) { KernelInfo *kernel, *new_kernel; char *kernel_cache, token[MagickPathExtent]; const char *p; if (kernel_string == (const char *) NULL) return(ParseKernelArray(kernel_string)); p=kernel_string; kernel_cache=(char *) NULL; if (*kernel_string == '@') { kernel_cache=FileToString(kernel_string+1,~0UL,exception); if (kernel_cache == (char *) NULL) return((KernelInfo *) NULL); p=(const char *) kernel_cache; } kernel=NULL; while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0') { /* ignore extra or multiple ';' kernel separators */ if (*token != ';') { /* tokens starting with alpha is a Named kernel */ if (isalpha((int) ((unsigned char) *token)) != 0) new_kernel=ParseKernelName(p,exception); else /* otherwise a user defined kernel array */ new_kernel=ParseKernelArray(p); /* Error handling -- this is not proper error handling! */ if (new_kernel == (KernelInfo *) NULL) { if (kernel != (KernelInfo *) NULL) kernel=DestroyKernelInfo(kernel); return((KernelInfo *) NULL); } /* initialise or append the kernel list */ if (kernel == (KernelInfo *) NULL) kernel=new_kernel; else LastKernelInfo(kernel)->next=new_kernel; } /* look for the next kernel in list */ p=strchr(p,';'); if (p == (char *) NULL) break; p++; } if (kernel_cache != (char *) NULL) kernel_cache=DestroyString(kernel_cache); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l B u i l t I n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelBuiltIn() returned one of the 'named' built-in types of % kernels used for special purposes such as gaussian blurring, skeleton % pruning, and edge distance determination. % % They take a KernelType, and a set of geometry style arguments, which were % typically decoded from a user supplied string, or from a more complex % Morphology Method that was requested. % % The format of the AcquireKernalBuiltIn method is: % % KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, % const GeometryInfo args) % % A description of each parameter follows: % % o type: the pre-defined type of kernel wanted % % o args: arguments defining or modifying the kernel % % Convolution Kernels % % Unity % The a No-Op or Scaling single element kernel. % % Gaussian:{radius},{sigma} % Generate a two-dimensional gaussian kernel, as used by -gaussian. % The sigma for the curve is required. The resulting kernel is % normalized, % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % NOTE: that the 'radius' is optional, but if provided can limit (clip) % the final size of the resulting kernel to a square 2*radius+1 in size. % The radius should be at least 2 times that of the sigma value, or % sever clipping and aliasing may result. If not given or set to 0 the % radius will be determined so as to produce the best minimal error % result, which is usally much larger than is normally needed. % % LoG:{radius},{sigma} % "Laplacian of a Gaussian" or "Mexician Hat" Kernel. % The supposed ideal edge detection, zero-summing kernel. % % An alturnative to this kernel is to use a "DoG" with a sigma ratio of % approx 1.6 (according to wikipedia). % % DoG:{radius},{sigma1},{sigma2} % "Difference of Gaussians" Kernel. % As "Gaussian" but with a gaussian produced by 'sigma2' subtracted % from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1. % The result is a zero-summing kernel. % % Blur:{radius},{sigma}[,{angle}] % Generates a 1 dimensional or linear gaussian blur, at the angle given % (current restricted to orthogonal angles). If a 'radius' is given the % kernel is clipped to a width of 2*radius+1. Kernel can be rotated % by a 90 degree angle. % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % Note that two convolutions with two "Blur" kernels perpendicular to % each other, is equivalent to a far larger "Gaussian" kernel with the % same sigma value, However it is much faster to apply. This is how the % "-blur" operator actually works. % % Comet:{width},{sigma},{angle} % Blur in one direction only, much like how a bright object leaves % a comet like trail. The Kernel is actually half a gaussian curve, % Adding two such blurs in opposite directions produces a Blur Kernel. % Angle can be rotated in multiples of 90 degrees. % % Note that the first argument is the width of the kernel and not the % radius of the kernel. % % Binomial:[{radius}] % Generate a discrete kernel using a 2 dimentional Pascel's Triangle % of values. Used for special forma of image filters. % % # Still to be implemented... % # % # Filter2D % # Filter1D % # Set kernel values using a resize filter, and given scale (sigma) % # Cylindrical or Linear. Is this possible with an image? % # % % Named Constant Convolution Kernels % % All these are unscaled, zero-summing kernels by default. As such for % non-HDRI version of ImageMagick some form of normalization, user scaling, % and biasing the results is recommended, to prevent the resulting image % being 'clipped'. % % The 3x3 kernels (most of these) can be circularly rotated in multiples of % 45 degrees to generate the 8 angled varients of each of the kernels. % % Laplacian:{type} % Discrete Lapacian Kernels, (without normalization) % Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood) % Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood) % Type 2 : 3x3 with center:4 edge:1 corner:-2 % Type 3 : 3x3 with center:4 edge:-2 corner:1 % Type 5 : 5x5 laplacian % Type 7 : 7x7 laplacian % Type 15 : 5x5 LoG (sigma approx 1.4) % Type 19 : 9x9 LoG (sigma approx 1.4) % % Sobel:{angle} % Sobel 'Edge' convolution kernel (3x3) % | -1, 0, 1 | % | -2, 0,-2 | % | -1, 0, 1 | % % Roberts:{angle} % Roberts convolution kernel (3x3) % | 0, 0, 0 | % | -1, 1, 0 | % | 0, 0, 0 | % % Prewitt:{angle} % Prewitt Edge convolution kernel (3x3) % | -1, 0, 1 | % | -1, 0, 1 | % | -1, 0, 1 | % % Compass:{angle} % Prewitt's "Compass" convolution kernel (3x3) % | -1, 1, 1 | % | -1,-2, 1 | % | -1, 1, 1 | % % Kirsch:{angle} % Kirsch's "Compass" convolution kernel (3x3) % | -3,-3, 5 | % | -3, 0, 5 | % | -3,-3, 5 | % % FreiChen:{angle} % Frei-Chen Edge Detector is based on a kernel that is similar to % the Sobel Kernel, but is designed to be isotropic. That is it takes % into account the distance of the diagonal in the kernel. % % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | % | 1, 0, -1 | % % FreiChen:{type},{angle} % % Frei-Chen Pre-weighted kernels... % % Type 0: default un-nomalized version shown above. % % Type 1: Orthogonal Kernel (same as type 11 below) % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 2: Diagonal form of Kernel... % | 1, sqrt(2), 0 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 0, -sqrt(2) -1 | % % However this kernel is als at the heart of the FreiChen Edge Detection % Process which uses a set of 9 specially weighted kernel. These 9 % kernels not be normalized, but directly applied to the image. The % results is then added together, to produce the intensity of an edge in % a specific direction. The square root of the pixel value can then be % taken as the cosine of the edge, and at least 2 such runs at 90 degrees % from each other, both the direction and the strength of the edge can be % determined. % % Type 10: All 9 of the following pre-weighted kernels... % % Type 11: | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 12: | 1, sqrt(2), 1 | % | 0, 0, 0 | / 2*sqrt(2) % | 1, sqrt(2), 1 | % % Type 13: | sqrt(2), -1, 0 | % | -1, 0, 1 | / 2*sqrt(2) % | 0, 1, -sqrt(2) | % % Type 14: | 0, 1, -sqrt(2) | % | -1, 0, 1 | / 2*sqrt(2) % | sqrt(2), -1, 0 | % % Type 15: | 0, -1, 0 | % | 1, 0, 1 | / 2 % | 0, -1, 0 | % % Type 16: | 1, 0, -1 | % | 0, 0, 0 | / 2 % | -1, 0, 1 | % % Type 17: | 1, -2, 1 | % | -2, 4, -2 | / 6 % | -1, -2, 1 | % % Type 18: | -2, 1, -2 | % | 1, 4, 1 | / 6 % | -2, 1, -2 | % % Type 19: | 1, 1, 1 | % | 1, 1, 1 | / 3 % | 1, 1, 1 | % % The first 4 are for edge detection, the next 4 are for line detection % and the last is to add a average component to the results. % % Using a special type of '-1' will return all 9 pre-weighted kernels % as a multi-kernel list, so that you can use them directly (without % normalization) with the special "-set option:morphology:compose Plus" % setting to apply the full FreiChen Edge Detection Technique. % % If 'type' is large it will be taken to be an actual rotation angle for % the default FreiChen (type 0) kernel. As such FreiChen:45 will look % like a Sobel:45 but with 'sqrt(2)' instead of '2' values. % % WARNING: The above was layed out as per % http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf % But rotated 90 degrees so direction is from left rather than the top. % I have yet to find any secondary confirmation of the above. The only % other source found was actual source code at % http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf % Neigher paper defineds the kernels in a way that looks locical or % correct when taken as a whole. % % Boolean Kernels % % Diamond:[{radius}[,{scale}]] % Generate a diamond shaped kernel with given radius to the points. % Kernel size will again be radius*2+1 square and defaults to radius 1, % generating a 3x3 kernel that is slightly larger than a square. % % Square:[{radius}[,{scale}]] % Generate a square shaped kernel of size radius*2+1, and defaulting % to a 3x3 (radius 1). % % Octagon:[{radius}[,{scale}]] % Generate octagonal shaped kernel of given radius and constant scale. % Default radius is 3 producing a 7x7 kernel. A radius of 1 will result % in "Diamond" kernel. % % Disk:[{radius}[,{scale}]] % Generate a binary disk, thresholded at the radius given, the radius % may be a float-point value. Final Kernel size is floor(radius)*2+1 % square. A radius of 5.3 is the default. % % NOTE: That a low radii Disk kernels produce the same results as % many of the previously defined kernels, but differ greatly at larger % radii. Here is a table of equivalences... % "Disk:1" => "Diamond", "Octagon:1", or "Cross:1" % "Disk:1.5" => "Square" % "Disk:2" => "Diamond:2" % "Disk:2.5" => "Octagon" % "Disk:2.9" => "Square:2" % "Disk:3.5" => "Octagon:3" % "Disk:4.5" => "Octagon:4" % "Disk:5.4" => "Octagon:5" % "Disk:6.4" => "Octagon:6" % All other Disk shapes are unique to this kernel, but because a "Disk" % is more circular when using a larger radius, using a larger radius is % preferred over iterating the morphological operation. % % Rectangle:{geometry} % Simply generate a rectangle of 1's with the size given. You can also % specify the location of the 'control point', otherwise the closest % pixel to the center of the rectangle is selected. % % Properly centered and odd sized rectangles work the best. % % Symbol Dilation Kernels % % These kernel is not a good general morphological kernel, but is used % more for highlighting and marking any single pixels in an image using, % a "Dilate" method as appropriate. % % For the same reasons iterating these kernels does not produce the % same result as using a larger radius for the symbol. % % Plus:[{radius}[,{scale}]] % Cross:[{radius}[,{scale}]] % Generate a kernel in the shape of a 'plus' or a 'cross' with % a each arm the length of the given radius (default 2). % % NOTE: "plus:1" is equivalent to a "Diamond" kernel. % % Ring:{radius1},{radius2}[,{scale}] % A ring of the values given that falls between the two radii. % Defaults to a ring of approximataly 3 radius in a 7x7 kernel. % This is the 'edge' pixels of the default "Disk" kernel, % More specifically, "Ring" -> "Ring:2.5,3.5,1.0" % % Hit and Miss Kernels % % Peak:radius1,radius2 % Find any peak larger than the pixels the fall between the two radii. % The default ring of pixels is as per "Ring". % Edges % Find flat orthogonal edges of a binary shape % Corners % Find 90 degree corners of a binary shape % Diagonals:type % A special kernel to thin the 'outside' of diagonals % LineEnds:type % Find end points of lines (for pruning a skeletion) % Two types of lines ends (default to both) can be searched for % Type 0: All line ends % Type 1: single kernel for 4-conneected line ends % Type 2: single kernel for simple line ends % LineJunctions % Find three line junctions (within a skeletion) % Type 0: all line junctions % Type 1: Y Junction kernel % Type 2: Diagonal T Junction kernel % Type 3: Orthogonal T Junction kernel % Type 4: Diagonal X Junction kernel % Type 5: Orthogonal + Junction kernel % Ridges:type % Find single pixel ridges or thin lines % Type 1: Fine single pixel thick lines and ridges % Type 2: Find two pixel thick lines and ridges % ConvexHull % Octagonal Thickening Kernel, to generate convex hulls of 45 degrees % Skeleton:type % Traditional skeleton generating kernels. % Type 1: Tradional Skeleton kernel (4 connected skeleton) % Type 2: HIPR2 Skeleton kernel (8 connected skeleton) % Type 3: Thinning skeleton based on a ressearch paper by % Dan S. Bloomberg (Default Type) % ThinSE:type % A huge variety of Thinning Kernels designed to preserve conectivity. % many other kernel sets use these kernels as source definitions. % Type numbers are 41-49, 81-89, 481, and 482 which are based on % the super and sub notations used in the source research paper. % % Distance Measuring Kernels % % Different types of distance measuring methods, which are used with the % a 'Distance' morphology method for generating a gradient based on % distance from an edge of a binary shape, though there is a technique % for handling a anti-aliased shape. % % See the 'Distance' Morphological Method, for information of how it is % applied. % % Chebyshev:[{radius}][x{scale}[%!]] % Chebyshev Distance (also known as Tchebychev or Chessboard distance) % is a value of one to any neighbour, orthogonal or diagonal. One why % of thinking of it is the number of squares a 'King' or 'Queen' in % chess needs to traverse reach any other position on a chess board. % It results in a 'square' like distance function, but one where % diagonals are given a value that is closer than expected. % % Manhattan:[{radius}][x{scale}[%!]] % Manhattan Distance (also known as Rectilinear, City Block, or the Taxi % Cab distance metric), it is the distance needed when you can only % travel in horizontal or vertical directions only. It is the % distance a 'Rook' in chess would have to travel, and results in a % diamond like distances, where diagonals are further than expected. % % Octagonal:[{radius}][x{scale}[%!]] % An interleving of Manhatten and Chebyshev metrics producing an % increasing octagonally shaped distance. Distances matches those of % the "Octagon" shaped kernel of the same radius. The minimum radius % and default is 2, producing a 5x5 kernel. % % Euclidean:[{radius}][x{scale}[%!]] % Euclidean distance is the 'direct' or 'as the crow flys' distance. % However by default the kernel size only has a radius of 1, which % limits the distance to 'Knight' like moves, with only orthogonal and % diagonal measurements being correct. As such for the default kernel % you will get octagonal like distance function. % % However using a larger radius such as "Euclidean:4" you will get a % much smoother distance gradient from the edge of the shape. Especially % if the image is pre-processed to include any anti-aliasing pixels. % Of course a larger kernel is slower to use, and not always needed. % % The first three Distance Measuring Kernels will only generate distances % of exact multiples of {scale} in binary images. As such you can use a % scale of 1 without loosing any information. However you also need some % scaling when handling non-binary anti-aliased shapes. % % The "Euclidean" Distance Kernel however does generate a non-integer % fractional results, and as such scaling is vital even for binary shapes. % */ MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, const GeometryInfo *args,ExceptionInfo *exception) { KernelInfo *kernel; register ssize_t i; register ssize_t u, v; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ /* Generate a new empty kernel if needed */ kernel=(KernelInfo *) NULL; switch(type) { case UndefinedKernel: /* These should not call this function */ case UserDefinedKernel: assert("Should not call this function" != (char *) NULL); break; case LaplacianKernel: /* Named Descrete Convolution Kernels */ case SobelKernel: /* these are defined using other kernels */ case RobertsKernel: case PrewittKernel: case CompassKernel: case KirschKernel: case FreiChenKernel: case EdgesKernel: /* Hit and Miss kernels */ case CornersKernel: case DiagonalsKernel: case LineEndsKernel: case LineJunctionsKernel: case RidgesKernel: case ConvexHullKernel: case SkeletonKernel: case ThinSEKernel: break; /* A pre-generated kernel is not needed */ #if 0 /* set to 1 to do a compile-time check that we haven't missed anything */ case UnityKernel: case GaussianKernel: case DoGKernel: case LoGKernel: case BlurKernel: case CometKernel: case BinomialKernel: case DiamondKernel: case SquareKernel: case RectangleKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: case RingKernel: case PeaksKernel: case ChebyshevKernel: case ManhattanKernel: case OctangonalKernel: case EuclideanKernel: #else default: #endif /* Generate the base Kernel Structure */ kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) memset(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = type; kernel->next = (KernelInfo *) NULL; kernel->signature=MagickCoreSignature; break; } switch(type) { /* Convolution Kernels */ case UnityKernel: { kernel->height = kernel->width = (size_t) 1; kernel->x = kernel->y = (ssize_t) 0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(1,sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); kernel->maximum = kernel->values[0] = args->rho; break; } break; case GaussianKernel: case DoGKernel: case LoGKernel: { double sigma = fabs(args->sigma), sigma2 = fabs(args->xi), A, B, R; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else if ( (type != DoGKernel) || (sigma >= sigma2) ) kernel->width = GetOptimalKernelWidth2D(args->rho,sigma); else kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2); kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* WARNING: The following generates a 'sampled gaussian' kernel. * What we really want is a 'discrete gaussian' kernel. * * How to do this is I don't know, but appears to be basied on the * Error Function 'erf()' (intergral of a gaussian) */ if ( type == GaussianKernel || type == DoGKernel ) { /* Calculate a Gaussian, OR positive half of a DoG */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } if ( type == DoGKernel ) { /* Subtract a Negative Gaussian for "Difference of Gaussian" */ if ( sigma2 > MagickEpsilon ) { sigma = sigma2; /* simplify loop expressions */ A = 1.0/(2.0*sigma*sigma); B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0; } if ( type == LoGKernel ) { /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { R = ((double)(u*u+v*v))*A; kernel->values[i] = (1-R)*exp(-R)*B; } } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } /* Note the above kernels may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, and thus ** producing a very bright kernel. ** ** Normalization will still be needed. */ /* Normalize the 2D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); break; } case BlurKernel: { double sigma = fabs(args->sigma), alpha, beta; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else kernel->width = GetOptimalKernelWidth1D(args->rho,sigma); kernel->height = 1; kernel->x = (ssize_t) (kernel->width-1)/2; kernel->y = 0; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); #if 1 #define KernelRank 3 /* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix). ** It generates a gaussian 3 times the width, and compresses it into ** the expected range. This produces a closer normalization of the ** resulting kernel, especially for very low sigma values. ** As such while wierd it is prefered. ** ** I am told this method originally came from Photoshop. ** ** A properly normalized curve is generated (apart from edge clipping) ** even though we later normalize the result (for edge clipping) ** to allow the correct generation of a "Difference of Blurs". */ /* initialize */ v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */ (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); /* Calculate a Positive 1D Gaussian */ if ( sigma > MagickEpsilon ) { sigma *= KernelRank; /* simplify loop expressions */ alpha = 1.0/(2.0*sigma*sigma); beta= (double) (1.0/(MagickSQ2PI*sigma )); for ( u=-v; u <= v; u++) { kernel->values[(u+v)/KernelRank] += exp(-((double)(u*u))*alpha)*beta; } } else /* special case - generate a unity kernel */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; #else /* Direct calculation without curve averaging This is equivelent to a KernelRank of 1 */ /* Calculate a Positive Gaussian */ if ( sigma > MagickEpsilon ) { alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ beta = 1.0/(MagickSQ2PI*sigma); for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u))*alpha)*beta; } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } #endif /* Note the above kernel may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, as a ** result of not generating a actual 'discrete' kernel, and thus ** producing a very bright 'impulse'. ** ** Becuase of these two factors Normalization is required! */ /* Normalize the 1D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); /* rotate the 1D kernel by given angle */ RotateKernelInfo(kernel, args->xi ); break; } case CometKernel: { double sigma = fabs(args->sigma), A; if ( args->rho < 1.0 ) kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1; else kernel->width = (size_t)args->rho; kernel->x = kernel->y = 0; kernel->height = 1; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* A comet blur is half a 1D gaussian curve, so that the object is ** blurred in one direction only. This may not be quite the right ** curve to use so may change in the future. The function must be ** normalised after generation, which also resolves any clipping. ** ** As we are normalizing and not subtracting gaussians, ** there is no need for a divisor in the gaussian formula ** ** It is less comples */ if ( sigma > MagickEpsilon ) { #if 1 #define KernelRank 3 v = (ssize_t) kernel->width*KernelRank; /* start/end points */ (void) memset(kernel->values,0, (size_t) kernel->width*sizeof(*kernel->values)); sigma *= KernelRank; /* simplify the loop expression */ A = 1.0/(2.0*sigma*sigma); /* B = 1.0/(MagickSQ2PI*sigma); */ for ( u=0; u < v; u++) { kernel->values[u/KernelRank] += exp(-((double)(u*u))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ } for (i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i]; #else A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */ /* B = 1.0/(MagickSQ2PI*sigma); */ for ( i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i] = exp(-((double)(i*i))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ #endif } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; } kernel->minimum = 0.0; kernel->maximum = kernel->values[0]; kernel->negative_range = 0.0; ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */ RotateKernelInfo(kernel, args->xi); /* Rotate by angle */ break; } case BinomialKernel: { size_t order_f; if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; order_f = fact(kernel->width-1); kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=0; v < (ssize_t)kernel->height; v++) { size_t alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) ); for ( u=0; u < (ssize_t)kernel->width; u++, i++) kernel->positive_range += kernel->values[i] = (double) (alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) )); } kernel->minimum = 1.0; kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width]; kernel->negative_range = 0.0; break; } /* Convolution Kernels - Well Known Named Constant Kernels */ case LaplacianKernel: { switch ( (int) args->rho ) { case 0: default: /* laplacian square filter -- default */ kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1"); break; case 1: /* laplacian diamond filter */ kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0"); break; case 2: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); break; case 3: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1"); break; case 5: /* a 5x5 laplacian */ kernel=ParseKernelArray( "5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4"); break; case 7: /* a 7x7 laplacian */ kernel=ParseKernelArray( "7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" ); break; case 15: /* a 5x5 LoG (sigma approx 1.4) */ kernel=ParseKernelArray( "5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0"); break; case 19: /* a 9x9 LoG (sigma approx 1.4) */ /* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */ kernel=ParseKernelArray( "9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; break; } case SobelKernel: { /* Simple Sobel Kernel */ kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case RobertsKernel: { kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case PrewittKernel: { kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case CompassKernel: { kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case KirschKernel: { kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case FreiChenKernel: /* Direction is set to be left to right positive */ /* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */ /* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */ { switch ( (int) args->rho ) { default: case 0: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +(MagickRealType) MagickSQ2; kernel->values[5] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ break; case 2: kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2; kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 10: { kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception); if (kernel == (KernelInfo *) NULL) return(kernel); break; } case 1: case 11: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +(MagickRealType) MagickSQ2; kernel->values[5] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 12: kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = +(MagickRealType) MagickSQ2; kernel->values[7] = +(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 13: kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[0] = +(MagickRealType) MagickSQ2; kernel->values[8] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 14: kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[2] = -(MagickRealType) MagickSQ2; kernel->values[6] = +(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 15: kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 16: kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 17: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 18: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 19: kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/3.0, NoValue); break; } if ( fabs(args->sigma) >= MagickEpsilon ) /* Rotate by correctly supplied 'angle' */ RotateKernelInfo(kernel, args->sigma); else if ( args->rho > 30.0 || args->rho < -30.0 ) /* Rotate by out of bounds 'type' */ RotateKernelInfo(kernel, args->rho); break; } /* Boolean or Shaped Kernels */ case DiamondKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case SquareKernel: case RectangleKernel: { double scale; if ( type == SquareKernel ) { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = (size_t) (2*args->rho+1); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; scale = args->sigma; } else { /* NOTE: user defaults set in "AcquireKernelInfo()" */ if ( args->rho < 1.0 || args->sigma < 1.0 ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->width = (size_t)args->rho; kernel->height = (size_t)args->sigma; if ( args->xi < 0.0 || args->xi > (double)kernel->width || args->psi < 0.0 || args->psi > (double)kernel->height ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->x = (ssize_t) args->xi; kernel->y = (ssize_t) args->psi; scale = 1.0; } kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values to scale given */ u=(ssize_t) (kernel->width*kernel->height); for ( i=0; i < u; i++) kernel->values[i] = scale; kernel->minimum = kernel->maximum = scale; /* a flat shape */ kernel->positive_range = scale*u; break; } case OctagonKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= ((long)kernel->x + (long)(kernel->x/2)) ) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case DiskKernel: { ssize_t limit = (ssize_t)(args->rho*args->rho); if (args->rho < 0.4) /* default radius approx 4.3 */ kernel->width = kernel->height = 9L, limit = 18L; else kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ((u*u+v*v) <= limit) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case PlusKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } case CrossKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == v || u == -v) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } /* HitAndMiss Kernels */ case RingKernel: case PeaksKernel: { ssize_t limit1, limit2, scale; if (args->rho < args->sigma) { kernel->width = ((size_t)args->sigma)*2+1; limit1 = (ssize_t)(args->rho*args->rho); limit2 = (ssize_t)(args->sigma*args->sigma); } else { kernel->width = ((size_t)args->rho)*2+1; limit1 = (ssize_t)(args->sigma*args->sigma); limit2 = (ssize_t)(args->rho*args->rho); } if ( limit2 <= 0 ) kernel->width = 7L, limit1 = 7L, limit2 = 11L; kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */ scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi); for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { ssize_t radius=u*u+v*v; if (limit1 < radius && radius <= limit2) kernel->positive_range += kernel->values[i] = (double) scale; else kernel->values[i] = nan; } kernel->minimum = kernel->maximum = (double) scale; if ( type == PeaksKernel ) { /* set the central point in the middle */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; kernel->maximum = 1.0; } break; } case EdgesKernel: { kernel=AcquireKernelInfo("ThinSE:482",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */ break; } case CornersKernel: { kernel=AcquireKernelInfo("ThinSE:87",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */ break; } case DiagonalsKernel: { switch ( (int) args->rho ) { case 0: default: { KernelInfo *new_kernel; kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; ExpandMirrorKernelInfo(kernel); return(kernel); } case 1: kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); break; case 2: kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineEndsKernel: { /* Kernels for finding the end of thin lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all end of lines */ return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception)); case 1: /* kernel for 4-connected line ends - no rotation */ kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-"); break; case 2: /* kernel to add for 8-connected lines - no rotation */ kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1"); break; case 3: /* kernel to add for orthogonal line ends - does not find corners */ kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0"); break; case 4: /* traditional line end - fails on last T end */ kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineJunctionsKernel: { /* kernels for finding the junctions of multiple lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all line junctions */ return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception)); case 1: /* Y Junction */ kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-"); break; case 2: /* Diagonal T Junctions */ kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1"); break; case 3: /* Orthogonal T Junctions */ kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-"); break; case 4: /* Diagonal X Junctions */ kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1"); break; case 5: /* Orthogonal X Junctions - minimal diamond kernel */ kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case RidgesKernel: { /* Ridges - Ridge finding kernels */ KernelInfo *new_kernel; switch ( (int) args->rho ) { case 1: default: kernel=ParseKernelArray("3x1:0,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */ break; case 2: kernel=ParseKernelArray("4x1:0,1,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */ /* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */ /* Unfortunatally we can not yet rotate a non-square kernel */ /* But then we can't flip a non-symetrical kernel either */ new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; break; } break; } case ConvexHullKernel: { KernelInfo *new_kernel; /* first set of 8 kernels */ kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* append the mirror versions too - no flip function yet */ new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; ExpandRotateKernelInfo(new_kernel, 90.0); LastKernelInfo(kernel)->next = new_kernel; break; } case SkeletonKernel: { switch ( (int) args->rho ) { case 1: default: /* Traditional Skeleton... ** A cyclically rotated single kernel */ kernel=AcquireKernelInfo("ThinSE:482",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */ break; case 2: /* HIPR Variation of the cyclic skeleton ** Corners of the traditional method made more forgiving, ** but the retain the same cyclic order. */ kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception); if (kernel == (KernelInfo *) NULL) return(kernel); if (kernel->next == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); kernel->type = type; kernel->next->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */ break; case 3: /* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf */ kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43", exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->next->type = type; kernel->next->next->type = type; ExpandMirrorKernelInfo(kernel); /* 12 kernels total */ break; } break; } case ThinSEKernel: { /* Special kernels for general thinning, while preserving connections ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf ** And ** http://tpgit.github.com/Leptonica/ccthin_8c_source.html ** ** Note kernels do not specify the origin pixel, allowing them ** to be used for both thickening and thinning operations. */ switch ( (int) args->rho ) { /* SE for 4-connected thinning */ case 41: /* SE_4_1 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1"); break; case 42: /* SE_4_2 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-"); break; case 43: /* SE_4_3 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1"); break; case 44: /* SE_4_4 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-"); break; case 45: /* SE_4_5 */ kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-"); break; case 46: /* SE_4_6 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1"); break; case 47: /* SE_4_7 */ kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-"); break; case 48: /* SE_4_8 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1"); break; case 49: /* SE_4_9 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1"); break; /* SE for 8-connected thinning - negatives of the above */ case 81: /* SE_8_0 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-"); break; case 82: /* SE_8_2 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-"); break; case 83: /* SE_8_3 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-"); break; case 84: /* SE_8_4 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-"); break; case 85: /* SE_8_5 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-"); break; case 86: /* SE_8_6 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1"); break; case 87: /* SE_8_7 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-"); break; case 88: /* SE_8_8 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-"); break; case 89: /* SE_8_9 */ kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-"); break; /* Special combined SE kernels */ case 423: /* SE_4_2 , SE_4_3 Combined Kernel */ kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-"); break; case 823: /* SE_8_2 , SE_8_3 Combined Kernel */ kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-"); break; case 481: /* SE_48_1 - General Connected Corner Kernel */ kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-"); break; default: case 482: /* SE_48_2 - General Edge Kernel */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } /* Distance Measuring Kernels */ case ChebyshevKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*MagickMax(fabs((double)u),fabs((double)v)) ); kernel->maximum = kernel->values[0]; break; } case ManhattanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*(labs((long) u)+labs((long) v)) ); kernel->maximum = kernel->values[0]; break; } case OctagonalKernel: { if (args->rho < 2.0) kernel->width = kernel->height = 5; /* default/minimum radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { double r1 = MagickMax(fabs((double)u),fabs((double)v)), r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5); kernel->positive_range += kernel->values[i] = args->sigma*MagickMax(r1,r2); } kernel->maximum = kernel->values[0]; break; } case EuclideanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*sqrt((double)(u*u+v*v)) ); kernel->maximum = kernel->values[0]; break; } default: { /* No-Op Kernel - Basically just a single pixel on its own */ kernel=ParseKernelArray("1:1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = UndefinedKernel; break; } break; } return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneKernelInfo() creates a new clone of the given Kernel List so that its % can be modified without effecting the original. The cloned kernel should % be destroyed using DestoryKernelInfo() when no longer needed. % % The format of the CloneKernelInfo method is: % % KernelInfo *CloneKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be cloned % */ MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel) { register ssize_t i; KernelInfo *new_kernel; assert(kernel != (KernelInfo *) NULL); new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (new_kernel == (KernelInfo *) NULL) return(new_kernel); *new_kernel=(*kernel); /* copy values in structure */ /* replace the values with a copy of the values */ new_kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values))); if (new_kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(new_kernel)); for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) new_kernel->values[i]=kernel->values[i]; /* Also clone the next kernel in the kernel list */ if ( kernel->next != (KernelInfo *) NULL ) { new_kernel->next = CloneKernelInfo(kernel->next); if ( new_kernel->next == (KernelInfo *) NULL ) return(DestroyKernelInfo(new_kernel)); } return(new_kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyKernelInfo() frees the memory used by a Convolution/Morphology % kernel. % % The format of the DestroyKernelInfo method is: % % KernelInfo *DestroyKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be destroyed % */ MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel) { assert(kernel != (KernelInfo *) NULL); if (kernel->next != (KernelInfo *) NULL) kernel->next=DestroyKernelInfo(kernel->next); kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values); kernel=(KernelInfo *) RelinquishMagickMemory(kernel); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d M i r r o r K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandMirrorKernelInfo() takes a single kernel, and expands it into a % sequence of 90-degree rotated kernels but providing a reflected 180 % rotatation, before the -/+ 90-degree rotations. % % This special rotation order produces a better, more symetrical thinning of % objects. % % The format of the ExpandMirrorKernelInfo method is: % % void ExpandMirrorKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ #if 0 static void FlopKernelInfo(KernelInfo *kernel) { /* Do a Flop by reversing each row. */ size_t y; register ssize_t x,r; register double *k,t; for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width) for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--) t=k[x], k[x]=k[r], k[r]=t; kernel->x = kernel->width - kernel->x - 1; angle = fmod(angle+180.0, 360.0); } #endif static void ExpandMirrorKernelInfo(KernelInfo *kernel) { KernelInfo *clone, *last; last = kernel; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 180); /* flip */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 90); /* transpose */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 180); /* flop */ LastKernelInfo(last)->next = clone; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating % incrementally by the angle given, until the kernel repeats. % % WARNING: 45 degree rotations only works for 3x3 kernels. % While 90 degree roatations only works for linear and square kernels % % The format of the ExpandRotateKernelInfo method is: % % void ExpandRotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ /* Internal Routine - Return true if two kernels are the same */ static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1, const KernelInfo *kernel2) { register size_t i; /* check size and origin location */ if ( kernel1->width != kernel2->width || kernel1->height != kernel2->height || kernel1->x != kernel2->x || kernel1->y != kernel2->y ) return MagickFalse; /* check actual kernel values */ for (i=0; i < (kernel1->width*kernel1->height); i++) { /* Test for Nan equivalence */ if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) ) return MagickFalse; if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) ) return MagickFalse; /* Test actual values are equivalent */ if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon ) return MagickFalse; } return MagickTrue; } static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle) { KernelInfo *clone_info, *last; clone_info=(KernelInfo *) NULL; last=kernel; DisableMSCWarning(4127) while (1) { RestoreMSCWarning clone_info=CloneKernelInfo(last); if (clone_info == (KernelInfo *) NULL) break; RotateKernelInfo(clone_info,angle); if (SameKernelInfo(kernel,clone_info) != MagickFalse) break; LastKernelInfo(last)->next=clone_info; last=clone_info; } if (clone_info != (KernelInfo *) NULL) clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a l c M e t a K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only, % using the kernel values. This should only ne used if it is not possible to % calculate that meta-data in some easier way. % % It is important that the meta-data is correct before ScaleKernelInfo() is % used to perform kernel normalization. % % The format of the CalcKernelMetaData method is: % % void CalcKernelMetaData(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % WARNING: Minimum and Maximum values are assumed to include zero, even if % zero is not part of the kernel (as in Gaussian Derived kernels). This % however is not true for flat-shaped morphological kernels. % % WARNING: Only the specific kernel pointed to is modified, not a list of % multiple kernels. % % This is an internal function and not expected to be useful outside this % module. This could change however. */ static void CalcKernelMetaData(KernelInfo *kernel) { register size_t i; kernel->minimum = kernel->maximum = 0.0; kernel->negative_range = kernel->positive_range = 0.0; for (i=0; i < (kernel->width*kernel->height); i++) { if ( fabs(kernel->values[i]) < MagickEpsilon ) kernel->values[i] = 0.0; ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y A p p l y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyApply() applies a morphological method, multiple times using % a list of multiple kernels. This is the method that should be called by % other 'operators' that internally use morphology operations as part of % their processing. % % It is basically equivalent to as MorphologyImage() (see below) but without % any user controls. This allows internel programs to use this method to % perform a specific task without possible interference by any API user % supplied settings. % % It is MorphologyImage() task to extract any such user controls, and % pass them to this function for processing. % % More specifically all given kernels should already be scaled, normalised, % and blended appropriatally before being parred to this routine. The % appropriate bias, and compose (typically 'UndefinedComposeOp') given. % % The format of the MorphologyApply method is: % % Image *MorphologyApply(const Image *image,MorphologyMethod method, % const ssize_t iterations,const KernelInfo *kernel, % const CompositeMethod compose,const double bias, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the source image % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % % o compose: How to handle or merge multi-kernel results. % If 'UndefinedCompositeOp' use default for the Morphology method. % If 'NoCompositeOp' force image to be re-iterated by each kernel. % Otherwise merge the results using the compose method given. % % o bias: Convolution Output Bias. % % o exception: return any errors or warnings in this structure. % */ static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image, const MorphologyMethod method,const KernelInfo *kernel,const double bias, ExceptionInfo *exception) { #define MorphologyTag "Morphology/Image" CacheView *image_view, *morphology_view; OffsetInfo offset; register ssize_t j, y; size_t *changes, changed, width; MagickBooleanType status; MagickOffsetType progress; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(morphology_image != (Image *) NULL); assert(morphology_image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(morphology_image,exception); width=image->columns+kernel->width-1; offset.x=0; offset.y=0; switch (method) { case ConvolveMorphology: case DilateMorphology: case DilateIntensityMorphology: case IterativeDistanceMorphology: { /* Kernel needs to used with reflection about origin. */ offset.x=(ssize_t) kernel->width-kernel->x-1; offset.y=(ssize_t) kernel->height-kernel->y-1; break; } case ErodeMorphology: case ErodeIntensityMorphology: case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: { offset.x=kernel->x; offset.y=kernel->y; break; } default: { assert("Not a Primitive Morphology Method" != (char *) NULL); break; } } changed=0; changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(), sizeof(*changes)); if (changes == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changes[j]=0; if ((method == ConvolveMorphology) && (kernel->width == 1)) { register ssize_t x; /* Special handling (for speed) of vertical (blur) kernels. This performs its handling in columns rather than in rows. This is only done for convolve as it is the only method that generates very large 1-D vertical kernels (such as a 'BlurKernel') */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,morphology_image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t r; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+ kernel->height-1,exception); q=GetCacheViewAuthenticPixels(morphology_view,x,0,1, morphology_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*offset.y; for (r=0; r < (ssize_t) image->rows; r++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait morphology_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t v; size_t count; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); morphology_traits=GetPixelChannelTraits(morphology_image,channel); if ((traits == UndefinedPixelTrait) || (morphology_traits == UndefinedPixelTrait)) continue; if ((traits & CopyPixelTrait) != 0) { SetPixelChannel(morphology_image,channel,p[center+i],q); continue; } k=(&kernel->values[kernel->height-1]); pixels=p; pixel=bias; gamma=0.0; count=0; if (((image->alpha_trait & BlendPixelTrait) == 0) || ((morphology_traits & BlendPixelTrait) == 0)) for (v=0; v < (ssize_t) kernel->height; v++) { if (!IsNaN(*k)) { pixel+=(*k)*pixels[i]; gamma+=(*k); count++; } k--; pixels+=GetPixelChannels(image); } else for (v=0; v < (ssize_t) kernel->height; v++) { if (!IsNaN(*k)) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=alpha*(*k)*pixels[i]; gamma+=alpha*(*k); count++; } k--; pixels+=GetPixelChannels(image); } if (fabs(pixel-p[center+i]) > MagickEpsilon) changes[id]++; gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height/count; SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma* pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(morphology_image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_image->type=image->type; morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changed+=changes[j]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : 0); } /* Normal handling of horizontal or rectangular kernels (row by row). */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,morphology_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width, kernel->height,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) (GetPixelChannels(image)*width*offset.y+ GetPixelChannels(image)*offset.x); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, intensity, maximum, minimum, pixel; PixelChannel channel; PixelTrait morphology_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels, *magick_restrict quantum_pixels; register ssize_t u; size_t count; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); morphology_traits=GetPixelChannelTraits(morphology_image,channel); if ((traits == UndefinedPixelTrait) || (morphology_traits == UndefinedPixelTrait)) continue; if ((traits & CopyPixelTrait) != 0) { SetPixelChannel(morphology_image,channel,p[center+i],q); continue; } pixels=p; quantum_pixels=(const Quantum *) NULL; maximum=0.0; minimum=(double) QuantumRange; switch (method) { case ConvolveMorphology: { pixel=bias; break; } case DilateMorphology: case ErodeIntensityMorphology: { pixel=0.0; break; } case HitAndMissMorphology: case ErodeMorphology: { pixel=QuantumRange; break; } default: { pixel=(double) p[center+i]; break; } } count=0; gamma=1.0; switch (method) { case ConvolveMorphology: { /* Weighted Average of pixels using reflected kernel For correct working of this operation for asymetrical kernels, the kernel needs to be applied in its reflected form. That is its values needs to be reversed. Correlation is actually the same as this but without reflecting the kernel, and thus 'lower-level' that Convolution. However as Convolution is the more common method used, and it does not really cost us much in terms of processing to use a reflected kernel, so it is Convolution that is implemented. Correlation will have its kernel reflected before calling this function to do a Convolve. For more details of Correlation vs Convolution see http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf */ k=(&kernel->values[kernel->width*kernel->height-1]); if (((image->alpha_trait & BlendPixelTrait) == 0) || ((morphology_traits & BlendPixelTrait) == 0)) { /* No alpha blending. */ for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { pixel+=(*k)*pixels[i]; count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } /* Alpha blending. */ gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=alpha*(*k)*pixels[i]; gamma+=alpha*(*k); count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case ErodeMorphology: { /* Minimum value within kernel neighbourhood. The kernel is not reflected for this operation. In normal Greyscale Morphology, the kernel value should be added to the real value, this is currently not done, due to the nature of the boolean kernels being used. */ k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { if ((double) pixels[i] < pixel) pixel=(double) pixels[i]; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case DilateMorphology: { /* Maximum value within kernel neighbourhood. For correct working of this operation for asymetrical kernels, the kernel needs to be applied in its reflected form. That is its values needs to be reversed. In normal Greyscale Morphology, the kernel value should be added to the real value, this is currently not done, due to the nature of the boolean kernels being used. */ k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k > 0.5)) { if ((double) pixels[i] > pixel) pixel=(double) pixels[i]; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: { /* Minimum of foreground pixel minus maxumum of background pixels. The kernel is not reflected for this operation, and consists of both foreground and background pixel neighbourhoods, 0.0 for background, and 1.0 for foreground with either Nan or 0.5 values for don't care. This never produces a meaningless negative result. Such results cause Thinning/Thicken to not work correctly when used against a greyscale image. */ k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if (*k > 0.7) { if ((double) pixels[i] < pixel) pixel=(double) pixels[i]; } else if (*k < 0.3) { if ((double) pixels[i] > maximum) maximum=(double) pixels[i]; } count++; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } pixel-=maximum; if (pixel < 0.0) pixel=0.0; if (method == ThinningMorphology) pixel=(double) p[center+i]-pixel; else if (method == ThickenMorphology) pixel+=(double) p[center+i]+pixel; break; } case ErodeIntensityMorphology: { /* Select pixel with minimum intensity within kernel neighbourhood. The kernel is not reflected for this operation. */ k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { intensity=(double) GetPixelIntensity(image,pixels); if (intensity < minimum) { quantum_pixels=pixels; pixel=(double) pixels[i]; minimum=intensity; } count++; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case DilateIntensityMorphology: { /* Select pixel with maximum intensity within kernel neighbourhood. The kernel is not reflected for this operation. */ k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { intensity=(double) GetPixelIntensity(image,pixels); if (intensity > maximum) { pixel=(double) pixels[i]; quantum_pixels=pixels; maximum=intensity; } count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case IterativeDistanceMorphology: { /* Compute th iterative distance from black edge of a white image shape. Essentually white values are decreased to the smallest 'distance from edge' it can find. It works by adding kernel values to the neighbourhood, and and select the minimum value found. The kernel is rotated before use, so kernel distances match resulting distances, when a user provided asymmetric kernel is applied. This code is nearly identical to True GrayScale Morphology but not quite. GreyDilate Kernel values added, maximum value found Kernel is rotated before use. GrayErode: Kernel values subtracted and minimum value found No kernel rotation used. Note the the Iterative Distance method is essentially a GrayErode, but with negative kernel values, and kernel rotation applied. */ k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case UndefinedMorphology: default: break; } if (fabs(pixel-p[center+i]) > MagickEpsilon) changes[id]++; if (quantum_pixels != (const Quantum *) NULL) { SetPixelChannel(morphology_image,channel,quantum_pixels[i],q); continue; } gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height*kernel->width/count; SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(morphology_image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changed+=changes[j]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : -1); } /* This is almost identical to the MorphologyPrimative() function above, but applies the primitive directly to the actual image using two passes, once in each direction, with the results of the previous (and current) row being re-used. That is after each row is 'Sync'ed' into the image, the next row makes use of those values as part of the calculation of the next row. It repeats, but going in the oppisite (bottom-up) direction. Because of this 're-use of results' this function can not make use of multi- threaded, parellel processing. */ static ssize_t MorphologyPrimitiveDirect(Image *image, const MorphologyMethod method,const KernelInfo *kernel, ExceptionInfo *exception) { CacheView *morphology_view, *image_view; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; size_t width, changed; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; changed=0; progress=0; switch(method) { case DistanceMorphology: case VoronoiMorphology: { /* Kernel reflected about origin. */ offset.x=(ssize_t) kernel->width-kernel->x-1; offset.y=(ssize_t) kernel->height-kernel->y-1; break; } default: { offset.x=kernel->x; offset.y=kernel->y; break; } } /* Two views into same image, do not thread. */ image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(image,exception); width=image->columns+kernel->width-1; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Read virtual pixels, and authentic pixels, from the same image! We read using virtual to get virtual pixel handling, but write back into the same image. Only top half of kernel is processed as we do a single pass downward through the image iterating the distance function as we go. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t) offset.y+1,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & CopyPixelTrait) != 0) continue; pixels=p; pixel=(double) QuantumRange; switch (method) { case DistanceMorphology: { k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v <= offset.y; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q-offset.x*GetPixelChannels(image); for (u=0; u < offset.x; u++) { if (!IsNaN(*k) && ((x+u-offset.x) >= 0)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } break; } case VoronoiMorphology: { k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < offset.y; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q-offset.x*GetPixelChannels(image); for (u=0; u < offset.x; u++) { if (!IsNaN(*k) && ((x+u-offset.x) >= 0)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } break; } default: break; } if (fabs(pixel-q[i]) > MagickEpsilon) changed++; q[i]=ClampToQuantum(pixel); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); /* Do the reverse pass through the image. */ image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(image,exception); for (y=(ssize_t) image->rows-1; y >= 0; y--) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Read virtual pixels, and authentic pixels, from the same image. We read using virtual to get virtual pixel handling, but write back into the same image. Only the bottom half of the kernel is processed as we up the image. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t) kernel->y+1,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } p+=(image->columns-1)*GetPixelChannels(image); q+=(image->columns-1)*GetPixelChannels(image); for (x=(ssize_t) image->columns-1; x >= 0; x--) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & CopyPixelTrait) != 0) continue; pixels=p; pixel=(double) QuantumRange; switch (method) { case DistanceMorphology: { k=(&kernel->values[kernel->width*(kernel->y+1)-1]); for (v=offset.y; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]); pixels=q; for (u=offset.x+1; u < (ssize_t) kernel->width; u++) { pixels+=GetPixelChannels(image); if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; } break; } case VoronoiMorphology: { k=(&kernel->values[kernel->width*(kernel->y+1)-1]); for (v=offset.y; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q; for (u=offset.x+1; u < (ssize_t) kernel->width; u++) { pixels+=GetPixelChannels(image); if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; } break; } default: break; } if (fabs(pixel-q[i]) > MagickEpsilon) changed++; q[i]=ClampToQuantum(pixel); } p-=GetPixelChannels(image); q-=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); return(status ? (ssize_t) changed : -1); } /* Apply a Morphology by calling one of the above low level primitive application functions. This function handles any iteration loops, composition or re-iteration of results, and compound morphology methods that is based on multiple low-level (staged) morphology methods. Basically this provides the complex glue between the requested morphology method and raw low-level implementation (above). */ MagickPrivate Image *MorphologyApply(const Image *image, const MorphologyMethod method, const ssize_t iterations, const KernelInfo *kernel, const CompositeOperator compose,const double bias, ExceptionInfo *exception) { CompositeOperator curr_compose; Image *curr_image, /* Image we are working with or iterating */ *work_image, /* secondary image for primitive iteration */ *save_image, /* saved image - for 'edge' method only */ *rslt_image; /* resultant image - after multi-kernel handling */ KernelInfo *reflected_kernel, /* A reflected copy of the kernel (if needed) */ *norm_kernel, /* the current normal un-reflected kernel */ *rflt_kernel, /* the current reflected kernel (if needed) */ *this_kernel; /* the kernel being applied */ MorphologyMethod primitive; /* the current morphology primitive being applied */ CompositeOperator rslt_compose; /* multi-kernel compose method for results to use */ MagickBooleanType special, /* do we use a direct modify function? */ verbose; /* verbose output of results */ size_t method_loop, /* Loop 1: number of compound method iterations (norm 1) */ method_limit, /* maximum number of compound method iterations */ kernel_number, /* Loop 2: the kernel number being applied */ stage_loop, /* Loop 3: primitive loop for compound morphology */ stage_limit, /* how many primitives are in this compound */ kernel_loop, /* Loop 4: iterate the kernel over image */ kernel_limit, /* number of times to iterate kernel */ count, /* total count of primitive steps applied */ kernel_changed, /* total count of changed using iterated kernel */ method_changed; /* total count of changed over method iteration */ ssize_t changed; /* number pixels changed by last primitive operation */ char v_info[MagickPathExtent]; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); count = 0; /* number of low-level morphology primitives performed */ if ( iterations == 0 ) return((Image *) NULL); /* null operation - nothing to do! */ kernel_limit = (size_t) iterations; if ( iterations < 0 ) /* negative interations = infinite (well alomst) */ kernel_limit = image->columns>image->rows ? image->columns : image->rows; verbose = IsStringTrue(GetImageArtifact(image,"debug")); /* initialise for cleanup */ curr_image = (Image *) image; curr_compose = image->compose; (void) curr_compose; work_image = save_image = rslt_image = (Image *) NULL; reflected_kernel = (KernelInfo *) NULL; /* Initialize specific methods * + which loop should use the given iteratations * + how many primitives make up the compound morphology * + multi-kernel compose method to use (by default) */ method_limit = 1; /* just do method once, unless otherwise set */ stage_limit = 1; /* assume method is not a compound */ special = MagickFalse; /* assume it is NOT a direct modify primitive */ rslt_compose = compose; /* and we are composing multi-kernels as given */ switch( method ) { case SmoothMorphology: /* 4 primitive compound morphology */ stage_limit = 4; break; case OpenMorphology: /* 2 primitive compound morphology */ case OpenIntensityMorphology: case TopHatMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case EdgeMorphology: stage_limit = 2; break; case HitAndMissMorphology: rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */ /* FALL THUR */ case ThinningMorphology: case ThickenMorphology: method_limit = kernel_limit; /* iterate the whole method */ kernel_limit = 1; /* do not do kernel iteration */ break; case DistanceMorphology: case VoronoiMorphology: special = MagickTrue; /* use special direct primative */ break; default: break; } /* Apply special methods with special requirments ** For example, single run only, or post-processing requirements */ if ( special != MagickFalse ) { rslt_image=CloneImage(image,0,0,MagickTrue,exception); if (rslt_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse) goto error_cleanup; changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception); if (verbose != MagickFalse) (void) (void) FormatLocaleFile(stderr, "%s:%.20g.%.20g #%.20g => Changed %.20g\n", CommandOptionToMnemonic(MagickMorphologyOptions, method), 1.0,0.0,1.0, (double) changed); if ( changed < 0 ) goto error_cleanup; if ( method == VoronoiMorphology ) { /* Preserve the alpha channel of input image - but turned it off */ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel, exception); (void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp, MagickTrue,0,0,exception); (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel, exception); } goto exit_cleanup; } /* Handle user (caller) specified multi-kernel composition method */ if ( compose != UndefinedCompositeOp ) rslt_compose = compose; /* override default composition for method */ if ( rslt_compose == UndefinedCompositeOp ) rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */ /* Some methods require a reflected kernel to use with primitives. * Create the reflected kernel for those methods. */ switch ( method ) { case CorrelateMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case SmoothMorphology: reflected_kernel = CloneKernelInfo(kernel); if (reflected_kernel == (KernelInfo *) NULL) goto error_cleanup; RotateKernelInfo(reflected_kernel,180); break; default: break; } /* Loops around more primitive morpholgy methods ** erose, dilate, open, close, smooth, edge, etc... */ /* Loop 1: iterate the compound method */ method_loop = 0; method_changed = 1; while ( method_loop < method_limit && method_changed > 0 ) { method_loop++; method_changed = 0; /* Loop 2: iterate over each kernel in a multi-kernel list */ norm_kernel = (KernelInfo *) kernel; this_kernel = (KernelInfo *) kernel; rflt_kernel = reflected_kernel; kernel_number = 0; while ( norm_kernel != NULL ) { /* Loop 3: Compound Morphology Staging - Select Primative to apply */ stage_loop = 0; /* the compound morphology stage number */ while ( stage_loop < stage_limit ) { stage_loop++; /* The stage of the compound morphology */ /* Select primitive morphology for this stage of compound method */ this_kernel = norm_kernel; /* default use unreflected kernel */ primitive = method; /* Assume method is a primitive */ switch( method ) { case ErodeMorphology: /* just erode */ case EdgeInMorphology: /* erode and image difference */ primitive = ErodeMorphology; break; case DilateMorphology: /* just dilate */ case EdgeOutMorphology: /* dilate and image difference */ primitive = DilateMorphology; break; case OpenMorphology: /* erode then dialate */ case TopHatMorphology: /* open and image difference */ primitive = ErodeMorphology; if ( stage_loop == 2 ) primitive = DilateMorphology; break; case OpenIntensityMorphology: primitive = ErodeIntensityMorphology; if ( stage_loop == 2 ) primitive = DilateIntensityMorphology; break; case CloseMorphology: /* dilate, then erode */ case BottomHatMorphology: /* close and image difference */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; if ( stage_loop == 2 ) primitive = ErodeMorphology; break; case CloseIntensityMorphology: this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateIntensityMorphology; if ( stage_loop == 2 ) primitive = ErodeIntensityMorphology; break; case SmoothMorphology: /* open, close */ switch ( stage_loop ) { case 1: /* start an open method, which starts with Erode */ primitive = ErodeMorphology; break; case 2: /* now Dilate the Erode */ primitive = DilateMorphology; break; case 3: /* Reflect kernel a close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; break; case 4: /* Finish the Close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ErodeMorphology; break; } break; case EdgeMorphology: /* dilate and erode difference */ primitive = DilateMorphology; if ( stage_loop == 2 ) { save_image = curr_image; /* save the image difference */ curr_image = (Image *) image; primitive = ErodeMorphology; } break; case CorrelateMorphology: /* A Correlation is a Convolution with a reflected kernel. ** However a Convolution is a weighted sum using a reflected ** kernel. It may seem stange to convert a Correlation into a ** Convolution as the Correlation is the simplier method, but ** Convolution is much more commonly used, and it makes sense to ** implement it directly so as to avoid the need to duplicate the ** kernel when it is not required (which is typically the ** default). */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ConvolveMorphology; break; default: break; } assert( this_kernel != (KernelInfo *) NULL ); /* Extra information for debugging compound operations */ if (verbose != MagickFalse) { if ( stage_limit > 1 ) (void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions,method),(double) method_loop,(double) stage_loop); else if ( primitive != method ) (void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions, method),(double) method_loop); else v_info[0] = '\0'; } /* Loop 4: Iterate the kernel with primitive */ kernel_loop = 0; kernel_changed = 0; changed = 1; while ( kernel_loop < kernel_limit && changed > 0 ) { kernel_loop++; /* the iteration of this kernel */ /* Create a clone as the destination image, if not yet defined */ if ( work_image == (Image *) NULL ) { work_image=CloneImage(image,0,0,MagickTrue,exception); if (work_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse) goto error_cleanup; } /* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */ count++; changed = MorphologyPrimitive(curr_image, work_image, primitive, this_kernel, bias, exception); if (verbose != MagickFalse) { if ( kernel_loop > 1 ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */ (void) (void) FormatLocaleFile(stderr, "%s%s%s:%.20g.%.20g #%.20g => Changed %.20g", v_info,CommandOptionToMnemonic(MagickMorphologyOptions, primitive),(this_kernel == rflt_kernel ) ? "*" : "", (double) (method_loop+kernel_loop-1),(double) kernel_number, (double) count,(double) changed); } if ( changed < 0 ) goto error_cleanup; kernel_changed += changed; method_changed += changed; /* prepare next loop */ { Image *tmp = work_image; /* swap images for iteration */ work_image = curr_image; curr_image = tmp; } if ( work_image == image ) work_image = (Image *) NULL; /* replace input 'image' */ } /* End Loop 4: Iterate the kernel with primitive */ if (verbose != MagickFalse && kernel_changed != (size_t)changed) (void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed); if (verbose != MagickFalse && stage_loop < stage_limit) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */ #if 0 (void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image); (void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image); (void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image); (void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image); (void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image); #endif } /* End Loop 3: Primative (staging) Loop for Coumpound Methods */ /* Final Post-processing for some Compound Methods ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** Turn off SVG composition 'alpha blending'. */ switch( method ) { case EdgeOutMorphology: case EdgeInMorphology: case TopHatMorphology: case BottomHatMorphology: if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n%s: Difference with original image",CommandOptionToMnemonic( MagickMorphologyOptions, method) ); (void) CompositeImage(curr_image,image,DifferenceCompositeOp, MagickTrue,0,0,exception); break; case EdgeMorphology: if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic( MagickMorphologyOptions, method) ); (void) CompositeImage(curr_image,save_image,DifferenceCompositeOp, MagickTrue,0,0,exception); save_image = DestroyImage(save_image); /* finished with save image */ break; default: break; } /* multi-kernel handling: re-iterate, or compose results */ if ( kernel->next == (KernelInfo *) NULL ) rslt_image = curr_image; /* just return the resulting image */ else if ( rslt_compose == NoCompositeOp ) { if (verbose != MagickFalse) { if ( this_kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " (re-iterate)"); else (void) FormatLocaleFile(stderr, " (done)"); } rslt_image = curr_image; /* return result, and re-iterate */ } else if ( rslt_image == (Image *) NULL) { if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, " (save for compose)"); rslt_image = curr_image; curr_image = (Image *) image; /* continue with original image */ } else { /* Add the new 'current' result to the composition ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** IE: Turn off SVG composition 'alpha blending'. */ if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, " (compose \"%s\")", CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) ); (void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue, 0,0,exception); curr_image = DestroyImage(curr_image); curr_image = (Image *) image; /* continue with original image */ } if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n"); /* loop to the next kernel in a multi-kernel list */ norm_kernel = norm_kernel->next; if ( rflt_kernel != (KernelInfo *) NULL ) rflt_kernel = rflt_kernel->next; kernel_number++; } /* End Loop 2: Loop over each kernel */ } /* End Loop 1: compound method interation */ goto exit_cleanup; /* Yes goto's are bad, but it makes cleanup lot more efficient */ error_cleanup: if ( curr_image == rslt_image ) curr_image = (Image *) NULL; if ( rslt_image != (Image *) NULL ) rslt_image = DestroyImage(rslt_image); exit_cleanup: if ( curr_image == rslt_image || curr_image == image ) curr_image = (Image *) NULL; if ( curr_image != (Image *) NULL ) curr_image = DestroyImage(curr_image); if ( work_image != (Image *) NULL ) work_image = DestroyImage(work_image); if ( save_image != (Image *) NULL ) save_image = DestroyImage(save_image); if ( reflected_kernel != (KernelInfo *) NULL ) reflected_kernel = DestroyKernelInfo(reflected_kernel); return(rslt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyImage() applies a user supplied kernel to the image according to % the given mophology method. % % This function applies any and all user defined settings before calling % the above internal function MorphologyApply(). % % User defined settings include... % * Output Bias for Convolution and correlation ("-define convolve:bias=??") % * Kernel Scale/normalize settings ("-define convolve:scale=??") % This can also includes the addition of a scaled unity kernel. % * Show Kernel being applied ("-define morphology:showKernel=1") % % Other operators that do not want user supplied options interfering, % especially "convolve:bias" and "morphology:showKernel" should use % MorphologyApply() directly. % % The format of the MorphologyImage method is: % % Image *MorphologyImage(const Image *image,MorphologyMethod method, % const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o kernel: An array of double representing the morphology kernel. % Warning: kernel may be normalized for the Convolve method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod method,const ssize_t iterations, const KernelInfo *kernel,ExceptionInfo *exception) { const char *artifact; CompositeOperator compose; double bias; Image *morphology_image; KernelInfo *curr_kernel; curr_kernel = (KernelInfo *) kernel; bias=0.0; compose = UndefinedCompositeOp; /* use default for method */ /* Apply Convolve/Correlate Normalization and Scaling Factors. * This is done BEFORE the ShowKernelInfo() function is called so that * users can see the results of the 'option:convolve:scale' option. */ if ( method == ConvolveMorphology || method == CorrelateMorphology ) { /* Get the bias value as it will be needed */ artifact = GetImageArtifact(image,"convolve:bias"); if ( artifact != (const char *) NULL) { if (IsGeometry(artifact) == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "convolve:bias",artifact); else bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0); } /* Scale kernel according to user wishes */ artifact = GetImageArtifact(image,"convolve:scale"); if ( artifact != (const char *) NULL ) { if (IsGeometry(artifact) == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "convolve:scale",artifact); else { if ( curr_kernel == kernel ) curr_kernel = CloneKernelInfo(kernel); if (curr_kernel == (KernelInfo *) NULL) return((Image *) NULL); ScaleGeometryKernelInfo(curr_kernel, artifact); } } } /* display the (normalized) kernel via stderr */ artifact=GetImageArtifact(image,"morphology:showKernel"); if (IsStringTrue(artifact) != MagickFalse) ShowKernelInfo(curr_kernel); /* Override the default handling of multi-kernel morphology results * If 'Undefined' use the default method * If 'None' (default for 'Convolve') re-iterate previous result * Otherwise merge resulting images using compose method given. * Default for 'HitAndMiss' is 'Lighten'. */ { ssize_t parse; artifact = GetImageArtifact(image,"morphology:compose"); if ( artifact != (const char *) NULL) { parse=ParseCommandOption(MagickComposeOptions, MagickFalse,artifact); if ( parse < 0 ) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'", "morphology:compose",artifact); else compose=(CompositeOperator)parse; } } /* Apply the Morphology */ morphology_image = MorphologyApply(image,method,iterations, curr_kernel,compose,bias,exception); /* Cleanup and Exit */ if ( curr_kernel != kernel ) curr_kernel=DestroyKernelInfo(curr_kernel); return(morphology_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateKernelInfo() rotates the kernel by the angle given. % % Currently it is restricted to 90 degree angles, of either 1D kernels % or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels. % It will ignore usless rotations for specific 'named' built-in kernels. % % The format of the RotateKernelInfo method is: % % void RotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is currently internal to this module only, but can be exported % to other modules if needed. */ static void RotateKernelInfo(KernelInfo *kernel, double angle) { /* angle the lower kernels first */ if ( kernel->next != (KernelInfo *) NULL) RotateKernelInfo(kernel->next, angle); /* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical ** ** TODO: expand beyond simple 90 degree rotates, flips and flops */ /* Modulus the angle */ angle = fmod(angle, 360.0); if ( angle < 0 ) angle += 360.0; if ( 337.5 < angle || angle <= 22.5 ) return; /* Near zero angle - no change! - At least not at this time */ /* Handle special cases */ switch (kernel->type) { /* These built-in kernels are cylindrical kernels, rotating is useless */ case GaussianKernel: case DoGKernel: case LoGKernel: case DiskKernel: case PeaksKernel: case LaplacianKernel: case ChebyshevKernel: case ManhattanKernel: case EuclideanKernel: return; /* These may be rotatable at non-90 angles in the future */ /* but simply rotating them in multiples of 90 degrees is useless */ case SquareKernel: case DiamondKernel: case PlusKernel: case CrossKernel: return; /* These only allows a +/-90 degree rotation (by transpose) */ /* A 180 degree rotation is useless */ case BlurKernel: if ( 135.0 < angle && angle <= 225.0 ) return; if ( 225.0 < angle && angle <= 315.0 ) angle -= 180; break; default: break; } /* Attempt rotations by 45 degrees -- 3x3 kernels only */ if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 ) { if ( kernel->width == 3 && kernel->height == 3 ) { /* Rotate a 3x3 square by 45 degree angle */ double t = kernel->values[0]; kernel->values[0] = kernel->values[3]; kernel->values[3] = kernel->values[6]; kernel->values[6] = kernel->values[7]; kernel->values[7] = kernel->values[8]; kernel->values[8] = kernel->values[5]; kernel->values[5] = kernel->values[2]; kernel->values[2] = kernel->values[1]; kernel->values[1] = t; /* rotate non-centered origin */ if ( kernel->x != 1 || kernel->y != 1 ) { ssize_t x,y; x = (ssize_t) kernel->x-1; y = (ssize_t) kernel->y-1; if ( x == y ) x = 0; else if ( x == 0 ) x = -y; else if ( x == -y ) y = 0; else if ( y == 0 ) y = x; kernel->x = (ssize_t) x+1; kernel->y = (ssize_t) y+1; } angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */ kernel->angle = fmod(kernel->angle+45.0, 360.0); } else perror("Unable to rotate non-3x3 kernel by 45 degrees"); } if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 ) { if ( kernel->width == 1 || kernel->height == 1 ) { /* Do a transpose of a 1 dimensional kernel, ** which results in a fast 90 degree rotation of some type. */ ssize_t t; t = (ssize_t) kernel->width; kernel->width = kernel->height; kernel->height = (size_t) t; t = kernel->x; kernel->x = kernel->y; kernel->y = t; if ( kernel->width == 1 ) { angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else { angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */ kernel->angle = fmod(kernel->angle+270.0, 360.0); } } else if ( kernel->width == kernel->height ) { /* Rotate a square array of values by 90 degrees */ { register ssize_t i,j,x,y; register MagickRealType *k,t; k=kernel->values; for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--) for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--) { t = k[i+j*kernel->width]; k[i+j*kernel->width] = k[j+x*kernel->width]; k[j+x*kernel->width] = k[x+y*kernel->width]; k[x+y*kernel->width] = k[y+i*kernel->width]; k[y+i*kernel->width] = t; } } /* rotate the origin - relative to center of array */ { register ssize_t x,y; x = (ssize_t) (kernel->x*2-kernel->width+1); y = (ssize_t) (kernel->y*2-kernel->height+1); kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2; kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2; } angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else perror("Unable to rotate a non-square, non-linear kernel 90 degrees"); } if ( 135.0 < angle && angle <= 225.0 ) { /* For a 180 degree rotation - also know as a reflection * This is actually a very very common operation! * Basically all that is needed is a reversal of the kernel data! * And a reflection of the origon */ MagickRealType t; register MagickRealType *k; ssize_t i, j; k=kernel->values; j=(ssize_t) (kernel->width*kernel->height-1); for (i=0; i < j; i++, j--) t=k[i], k[i]=k[j], k[j]=t; kernel->x = (ssize_t) kernel->width - kernel->x - 1; kernel->y = (ssize_t) kernel->height - kernel->y - 1; angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */ kernel->angle = fmod(kernel->angle+180.0, 360.0); } /* At this point angle should at least between -45 (315) and +45 degrees * In the future some form of non-orthogonal angled rotates could be * performed here, posibily with a linear kernel restriction. */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e G e o m e t r y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleGeometryKernelInfo() takes a geometry argument string, typically % provided as a "-set option:convolve:scale {geometry}" user setting, % and modifies the kernel according to the parsed arguments of that setting. % % The first argument (and any normalization flags) are passed to % ScaleKernelInfo() to scale/normalize the kernel. The second argument % is then passed to UnityAddKernelInfo() to add a scled unity kernel % into the scaled/normalized kernel. % % The format of the ScaleGeometryKernelInfo method is: % % void ScaleGeometryKernelInfo(KernelInfo *kernel, % const double scaling_factor,const MagickStatusType normalize_flags) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % o geometry: % The geometry string to parse, typically from the user provided % "-set option:convolve:scale {geometry}" setting. % */ MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel, const char *geometry) { MagickStatusType flags; GeometryInfo args; SetGeometryInfo(&args); flags = ParseGeometry(geometry, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/ args.rho *= 0.01, args.sigma *= 0.01; if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */ args.rho = 1.0; if ( (flags & SigmaValue) == 0 ) args.sigma = 0.0; /* Scale/Normalize the input kernel */ ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags); /* Add Unity Kernel, for blending with original */ if ( (flags & SigmaValue) != 0 ) UnityAddKernelInfo(kernel, args.sigma); return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleKernelInfo() scales the given kernel list by the given amount, with or % without normalization of the sum of the kernel values (as per given flags). % % By default (no flags given) the values within the kernel is scaled % directly using given scaling factor without change. % % If either of the two 'normalize_flags' are given the kernel will first be % normalized and then further scaled by the scaling factor value given. % % Kernel normalization ('normalize_flags' given) is designed to ensure that % any use of the kernel scaling factor with 'Convolve' or 'Correlate' % morphology methods will fall into -1.0 to +1.0 range. Note that for % non-HDRI versions of IM this may cause images to have any negative results % clipped, unless some 'bias' is used. % % More specifically. Kernels which only contain positive values (such as a % 'Gaussian' kernel) will be scaled so that those values sum to +1.0, % ensuring a 0.0 to +1.0 output range for non-HDRI images. % % For Kernels that contain some negative values, (such as 'Sharpen' kernels) % the kernel will be scaled by the absolute of the sum of kernel values, so % that it will generally fall within the +/- 1.0 range. % % For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel % will be scaled by just the sum of the postive values, so that its output % range will again fall into the +/- 1.0 range. % % For special kernels designed for locating shapes using 'Correlate', (often % only containing +1 and -1 values, representing foreground/brackground % matching) a special normalization method is provided to scale the positive % values separately to those of the negative values, so the kernel will be % forced to become a zero-sum kernel better suited to such searches. % % WARNING: Correct normalization of the kernel assumes that the '*_range' % attributes within the kernel structure have been correctly set during the % kernels creation. % % NOTE: The values used for 'normalize_flags' have been selected specifically % to match the use of geometry options, so that '!' means NormalizeValue, '^' % means CorrelateNormalizeValue. All other GeometryFlags values are ignored. % % The format of the ScaleKernelInfo method is: % % void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor, % const MagickStatusType normalize_flags ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scaling_factor: % multiply all values (after normalization) by this factor if not % zero. If the kernel is normalized regardless of any flags. % % o normalize_flags: % GeometryFlags defining normalization method to use. % specifically: NormalizeValue, CorrelateNormalizeValue, % and/or PercentValue % */ MagickExport void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,const GeometryFlags normalize_flags) { register double pos_scale, neg_scale; register ssize_t i; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags); /* Normalization of Kernel */ pos_scale = 1.0; if ( (normalize_flags&NormalizeValue) != 0 ) { if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon ) /* non-zero-summing kernel (generally positive) */ pos_scale = fabs(kernel->positive_range + kernel->negative_range); else /* zero-summing kernel */ pos_scale = kernel->positive_range; } /* Force kernel into a normalized zero-summing kernel */ if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) { pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon ) ? kernel->positive_range : 1.0; neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon ) ? -kernel->negative_range : 1.0; } else neg_scale = pos_scale; /* finialize scaling_factor for positive and negative components */ pos_scale = scaling_factor/pos_scale; neg_scale = scaling_factor/neg_scale; for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) if (!IsNaN(kernel->values[i])) kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale; /* convolution output range */ kernel->positive_range *= pos_scale; kernel->negative_range *= neg_scale; /* maximum and minimum values in kernel */ kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale; kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale; /* swap kernel settings if user's scaling factor is negative */ if ( scaling_factor < MagickEpsilon ) { double t; t = kernel->positive_range; kernel->positive_range = kernel->negative_range; kernel->negative_range = t; t = kernel->maximum; kernel->maximum = kernel->minimum; kernel->minimum = 1; } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h o w K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShowKernelInfo() outputs the details of the given kernel defination to % standard error, generally due to a users 'morphology:showKernel' option % request. % % The format of the ShowKernel method is: % % void ShowKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickPrivate void ShowKernelInfo(const KernelInfo *kernel) { const KernelInfo *k; size_t c, i, u, v; for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) { (void) FormatLocaleFile(stderr, "Kernel"); if ( kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c ); (void) FormatLocaleFile(stderr, " \"%s", CommandOptionToMnemonic(MagickKernelOptions, k->type) ); if ( fabs(k->angle) >= MagickEpsilon ) (void) FormatLocaleFile(stderr, "@%lg", k->angle); (void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long) k->width,(unsigned long) k->height,(long) k->x,(long) k->y); (void) FormatLocaleFile(stderr, " with values from %.*lg to %.*lg\n", GetMagickPrecision(), k->minimum, GetMagickPrecision(), k->maximum); (void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg", GetMagickPrecision(), k->negative_range, GetMagickPrecision(), k->positive_range); if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Zero-Summing)\n"); else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Normalized)\n"); else (void) FormatLocaleFile(stderr, " (Sum %.*lg)\n", GetMagickPrecision(), k->positive_range+k->negative_range); for (i=v=0; v < k->height; v++) { (void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v ); for (u=0; u < k->width; u++, i++) if (IsNaN(k->values[i])) (void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan"); else (void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3, GetMagickPrecision(), (double) k->values[i]); (void) FormatLocaleFile(stderr,"\n"); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n i t y A d d K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel % to the given pre-scaled and normalized Kernel. This in effect adds that % amount of the original image into the resulting convolution kernel. This % value is usually provided by the user as a percentage value in the % 'convolve:scale' setting. % % The resulting effect is to convert the defined kernels into blended % soft-blurs, unsharp kernels or into sharpening kernels. % % The format of the UnityAdditionKernelInfo method is: % % void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scale: % scaling factor for the unity kernel to be added to % the given kernel. % */ MagickExport void UnityAddKernelInfo(KernelInfo *kernel, const double scale) { /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) UnityAddKernelInfo(kernel->next, scale); /* Add the scaled unity kernel to the existing kernel */ kernel->values[kernel->x+kernel->y*kernel->width] += scale; CalcKernelMetaData(kernel); /* recalculate the meta-data */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z e r o K e r n e l N a n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroKernelNans() replaces any special 'nan' value that may be present in % the kernel with a zero value. This is typically done when the kernel will % be used in special hardware (GPU) convolution processors, to simply % matters. % % The format of the ZeroKernelNans method is: % % void ZeroKernelNans (KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickPrivate void ZeroKernelNans(KernelInfo *kernel) { register size_t i; /* do the other kernels in a multi-kernel list first */ if (kernel->next != (KernelInfo *) NULL) ZeroKernelNans(kernel->next); for (i=0; i < (kernel->width*kernel->height); i++) if (IsNaN(kernel->values[i])) kernel->values[i]=0.0; return; }
conv_dw_dilation_kernel_arm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: haoluo@openailab.com */ #ifndef __CONV_DW_DILATION_KERNEL_ARM_H_ #define __CONV_DW_DILATION_KERNEL_ARM_H_ #include "tengine_ir.h" #include "convolution_param.h" #include "conv_dw_k5_k7_kernel_arm.h" int conv_dw_dilation_run(float* input_buf, float* weight_buf, float* bias, float* output_buf, int input_h, int input_w, int channel, int pad, int activation, int num_thread) { int channel_size = input_h * input_w; int mid_w = input_w - pad * 2; int mid_block_end = (mid_w & -4) + pad; int mid_end = mid_w + pad; int w = 0; #pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel; c++) { float* input_buf_c = input_buf + c * channel_size; float* output_buf_c = output_buf + c * channel_size; float* weight_buf_c = weight_buf + c * 9; float bias_c = bias ? bias[c] : 0; for (int h = 0; h < pad; h++) { for (w = 0; w < pad; w++) { float tmp = bias_c; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); } for (; w < mid_block_end; w += 4) { float32x4_t tmp_4 = vdupq_n_f32(bias_c); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[6]), vld1q_f32(input_buf_c + (h + pad) * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[7]), vld1q_f32(input_buf_c + (h + pad) * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[8]), vld1q_f32(input_buf_c + (h + pad) * input_w + w + pad)); tmp_4 = vector_activation(tmp_4, activation); vst1q_f32(output_buf_c + h * input_w + w, tmp_4); } for (; w < mid_end; w++) { float tmp = bias_c; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < input_w; w++) { float tmp = bias_c; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } } for (int h = pad; h < input_h - pad; h++) { for (w = 0; w < pad; w++) { float tmp = bias_c; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < mid_block_end; w += 4) { float32x4_t tmp_4 = vdupq_n_f32(bias_c); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[0]), vld1q_f32(input_buf_c + (h - pad) * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[1]), vld1q_f32(input_buf_c + (h - pad) * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[2]), vld1q_f32(input_buf_c + (h - pad) * input_w + w + pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[6]), vld1q_f32(input_buf_c + (h + pad) * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[7]), vld1q_f32(input_buf_c + (h + pad) * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[8]), vld1q_f32(input_buf_c + (h + pad) * input_w + w + pad)); tmp_4 = vector_activation(tmp_4, activation); vst1q_f32(output_buf_c + h * input_w + w, tmp_4); } for (; w < mid_end; w++) { float tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad]; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad]; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < input_w; w++) { float tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad]; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } } for (int h = input_h - pad; h < input_h; h++) { for (w = 0; w < pad; w++) { float tmp = bias_c; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < mid_block_end; w += 4) { float32x4_t tmp_4 = vdupq_n_f32(bias_c); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[0]), vld1q_f32(input_buf_c + (h - pad) * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[1]), vld1q_f32(input_buf_c + (h - pad) * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[2]), vld1q_f32(input_buf_c + (h - pad) * input_w + w + pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad)); tmp_4 = vector_activation(tmp_4, activation); vst1q_f32(output_buf_c + h * input_w + w, tmp_4); } for (; w < mid_end; w++) { float tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad]; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad]; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < input_w; w++) { float tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad]; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } } } return 0; } #endif
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "cuda.h" #include <stdio.h> #include <stdlib.h> #include <string.h> pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = rand()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_random_paths(char **paths, int n, int m) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = rand()%m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char **replace_paths = calloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop; if(center){ crop = center_crop_image(im, size, size); } else { crop = random_augment_image(im, angle, aspect, min, max, size, size); } int flip = rand()%2; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); /* show_image(im, "orig"); show_image(crop, "crop"); cvWaitKey(0); */ //grayscale_image_3c(crop); free_image(im); X.vals[i] = crop.data; X.cols = crop.h*crop.w*crop.c; } return X; } box_label *read_boxes(char *filename, int *n) { FILE *file = fopen(filename, "r"); if(!file) file_error(filename); float x, y, h, w; int id; int count = 0; int size = 64; box_label *boxes = calloc(size, sizeof(box_label)); while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ if(count == size) { size = size * 2; boxes = realloc(boxes, size*sizeof(box_label)); } boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = rand()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 90; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .005 || h < .005) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } void load_rle(image im, int *rle, int n) { int count = 0; int curr = 0; int i,j; for(i = 0; i < n; ++i){ for(j = 0; j < rle[i]; ++j){ im.data[count++] = curr; } curr = 1 - curr; } for(; count < im.h*im.w*im.c; ++count){ im.data[count] = curr; } } void or_image(image src, image dest, int c) { int i; for(i = 0; i < src.w*src.h; ++i){ if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1; } } void exclusive_image(image src) { int k, j, i; int s = src.w*src.h; for(k = 0; k < src.c-1; ++k){ for(i = 0; i < s; ++i){ if (src.data[k*s + i]){ for(j = k+1; j < src.c; ++j){ src.data[j*s + i] = 0; } } } } } box bound_image(image im) { int x,y; int minx = im.w; int miny = im.h; int maxx = 0; int maxy = 0; for(y = 0; y < im.h; ++y){ for(x = 0; x < im.w; ++x){ if(im.data[y*im.w + x]){ minx = (x < minx) ? x : minx; miny = (y < miny) ? y : miny; maxx = (x > maxx) ? x : maxx; maxy = (y > maxy) ? y : maxy; } } } box b = {minx, miny, maxx-minx + 1, maxy-miny + 1}; //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); return b; } void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; int i = 0; int j; image part = make_image(w, h, 1); while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect); if(flip) flip_image(sized); image mask = resize_image(sized, mw, mh); truth[i*(mw*mh+1)] = id; for(j = 0; j < mw*mh; ++j){ truth[i*(mw*mh + 1) + 1 + j] = mask.data[j]; } ++i; free_image(mask); free_image(sized); free(rle); } if(i < num_boxes) truth[i*(mw*mh+1)] = -1; fclose(file); free_image(part); } void fill_truth_mask(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; int i = 0; image part = make_image(w, h, 1); while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect); if(flip) flip_image(sized); box b = bound_image(sized); if(b.w > 0){ image crop = crop_image(sized, b.x, b.y, b.w, b.h); image mask = resize_image(crop, mw, mh); truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w; truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h; truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w; truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h; int j; for(j = 0; j < mw*mh; ++j){ truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j]; } truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id; free_image(crop); free_image(mask); ++i; } free_image(sized); free(rle); } fclose(file); free_image(part); } void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, "raw", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if(count > num_boxes) count = num_boxes; float x,y,w,h; int id; int i; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if ((w < .001 || h < .001)) { ++sub; continue; } truth[(i-sub)*5+0] = x; truth[(i-sub)*5+1] = y; truth[(i-sub)*5+2] = w; truth[(i-sub)*5+3] = h; truth[(i-sub)*5+4] = id; } free(boxes); } #define NUMCHARS 37 void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; //printf("%s %s %d\n", path, labels[i], i); } } //if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path); } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_regression_labels_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i,j; for(i = 0; i < n; ++i){ char labelpath[4096]; find_replace(paths[i], "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".BMP", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPeG", ".txt", labelpath); find_replace(labelpath, ".Jpeg", ".txt", labelpath); find_replace(labelpath, ".PNG", ".txt", labelpath); find_replace(labelpath, ".TIF", ".txt", labelpath); find_replace(labelpath, ".bmp", ".txt", labelpath); find_replace(labelpath, ".jpeg", ".txt", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".tif", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); for(j = 0; j < k; ++j){ fscanf(file, "%f", &(y.vals[i][j])); } fclose(file); } return y; } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth(paths[i], labels, k, y.vals[i]); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; //int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "images", "labels", label); find_replace(label, ".jpg", ".txt", label); FILE *file = fopen(label, "r"); if (!file) continue; //++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } //printf("%d/%d\n", count, n); return y; } char **get_labels(char *filename) { list *plist = get_paths(filename); char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } image get_segmentation_image(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while(fscanf(file, "%d %s", &id, buff) == 2){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } image get_segmentation_image2(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes+1); int i; for(i = 0; i < w*h; ++i){ mask.data[w*h*classes + i] = 1; } FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while(fscanf(file, "%d %s", &id, buff) == 2){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); for(i = 0; i < w*h; ++i){ if(part.data[i]) mask.data[w*h*classes + i] = 0; } free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y.rows = n; d.y.cols = h*w*classes/div/div; d.y.vals = calloc(d.X.rows, sizeof(float*)); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes); //image mask = make_image(orig.w, orig.h, classes+1); image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect); if(flip) flip_image(sized_m); d.y.vals[i] = sized_m.data; free_image(orig); free_image(mask); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int div, int min, int max, float angle, float aspect, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, (((w/div)*(h/div))+1)*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; //show_image(sized, "image"); fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, w/div, h/div); free_image(orig); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, (coords+1)*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; //show_image(sized, "image"); fill_truth_mask(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14); free_image(orig); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = rand()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = calloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = rand()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*90; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = rand()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, 5*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); image sized = make_image(w, h, orig.c); fill_image(sized, .5); float dw = jitter * orig.w; float dh = jitter * orig.h; float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh)); //float scale = rand_uniform(.25, 2); float scale = 1; float nw, nh; if(new_ar < 1){ nh = scale * h; nw = nh * new_ar; } else { nw = scale * w; nh = nw / new_ar; } float dx = rand_uniform(0, w - nw); float dy = rand_uniform(0, h - nh); place_image(orig, nw, nh, dx, dy, sized); random_distort_image(sized, hue, saturation, exposure); int flip = rand()%2; if(flip) flip_image(sized); d.X.vals[i] = sized.data; fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h); free_image(orig); } free(random_paths); return d; } void *load_thread(void *ptr) { //printf("Loading data: %d\n", rand()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == REGRESSION_DATA){ *a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == ISEG_DATA){ *a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.scale, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == INSTANCE_DATA){ *a.d = load_data_mask(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == SEGMENTATION_DATA){ *a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = resize_image(*(a.im), a.w, a.h); } else if (a.type == LETTERBOX_DATA){ *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } void *load_threads(void *ptr) { int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data *buffers = calloc(args.threads, sizeof(data)); pthread_t *threads = calloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); free(threads); return 0; } void load_data_blocking(load_args args) { struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; load_thread(ptr); } pthread_t load_data(load_args args) { pthread_t thread; struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = calloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = calloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = rand()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_regression_labels_paths(paths, n, k); if(m) free(paths); return d; } data select_data(data *orig, int *inds) { data d = {0}; d.shallow = 1; d.w = orig[0].w; d.h = orig[0].h; d.X.rows = orig[0].X.rows; d.y.rows = orig[0].X.rows; d.X.cols = orig[0].X.cols; d.y.cols = orig[0].y.cols; d.X.vals = calloc(orig[0].X.rows, sizeof(float *)); d.y.vals = calloc(orig[0].y.rows, sizeof(float *)); int i; for(i = 0; i < d.X.rows; ++i){ d.X.vals[i] = orig[inds[i]].X.vals[i]; d.y.vals[i] = orig[inds[i]].y.vals[i]; } return d; } data *tile_data(data orig, int divs, int size) { data *ds = calloc(divs*divs, sizeof(data)); int i, j; #pragma omp parallel for for(i = 0; i < divs*divs; ++i){ data d; d.shallow = 0; d.w = orig.w/divs * size; d.h = orig.h/divs * size; d.X.rows = orig.X.rows; d.X.cols = d.w*d.h*3; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for(j = 0; j < orig.X.rows; ++j){ int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2; int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2; image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]); d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data; } ds[i] = d; } return ds; } data resize_data(data orig, int w, int h) { data d = {0}; d.shallow = 0; d.w = w; d.h = h; int i; d.X.rows = orig.X.rows; d.X.cols = w*h*3; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for(i = 0; i < orig.X.rows; ++i){ image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]); d.X.vals[i] = resize_image(im, w, h).data; } return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.w=size; d.h=size; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center); d.y = load_labels_paths(paths, n, labels, k, hierarchy); if(m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = size; d.h = size; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = calloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); d.w = d1.w; d.h = d1.h; return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data new = concat_data(d[i], out); free_data(out); out = new; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i][class] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = rand()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i+b*10000][class] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = rand()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } data copy_data(data d) { data c = {0}; c.w = d.w; c.h = d.h; c.shallow = 0; c.num_boxes = d.num_boxes; c.boxes = d.boxes; c.X = copy_matrix(d.X); c.y = copy_matrix(d.y); return c; } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = calloc(num, sizeof(float *)); r.y.vals = calloc(num, sizeof(float *)); int i; for(i = 0; i < num; ++i){ int index = rand()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data *split = calloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train; data test; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = calloc(train.X.rows, sizeof(float*)); test.X.vals = calloc(test.X.rows, sizeof(float*)); train.y.vals = calloc(train.y.rows, sizeof(float*)); test.y.vals = calloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
openmp_private.c
/* OpenMP "private" clause example Jim Teresco, CS 338, Williams College, CS 341, Mount Holyoke College Mon Feb 24 22:30:57 EST 2003 Updated for CSIS-335, Siena College, Fall 2021 */ #include <stdio.h> #include <omp.h> int main(int argc, char *argv[]) { int thread_num = 997; /* by putting thread_num into the private clause, we are essentially creating a new copy of it for each thread */ #pragma omp parallel private(thread_num) { thread_num = omp_get_thread_num(); printf("In parallel directive, thread_num=%d\n", thread_num); } /* and when we're done, it's like we never used the original thread_num */ printf("Back from parallel directive, thread_num=%d\n", thread_num); return 0; }
tree.h
#pragma once template <class TreePtcl> class Tree{ static const std::size_t MAX_LEVEL = 20; static const std::size_t N_LEAF = 16; static const double EXPAND = 1.0; static const double MAC = 0.5;//Multipole Acceptance Criterion typedef unsigned long long int ulli; struct node_t{ vec3<double> mass_center, min, max; //mat33<double> Quad; double mass, size, radius, dmax; std::size_t Nptcl, head_ptcl, more; unsigned short int Nbranch; unsigned int level; node_t(){ Nptcl = 0; mass_center = 0; mass = 0; } ~node_t(){ } inline bool isEmpty() const{ return (Nptcl == 0) ? true : false; } inline bool isLeaf(const unsigned int N) const{ return (Nptcl <= N) ? true : false; } inline const node_t* const getPointer() const{ return this; } inline node_t* getPointer(){ return this; } }; struct ptcl_ptr_t{ ulli key; const TreePtcl* ptr; bool operator < (const ptcl_ptr_t& right) const{ return key < right.key; } ptcl_ptr_t(const ulli _key, const TreePtcl* const _ptr) : key(_key), ptr(_ptr){ } }; //member variables; std::vector<node_t> node; std::vector<ptcl_ptr_t> ptcl_ptr; //member functions; node_t createRoot(const std::vector<TreePtcl >& ptcl, const kernel_t<double>& kernel){ node_t root; root.min = + 1.0e+30; root.max = - 1.0e+30; root.mass_center = 0; root.mass = 0; root.Nptcl = ptcl.size(); root.head_ptcl = 0; root.level = 0; root.radius = 0; root.dmax = 0; for(std::size_t i = 0 ; i < ptcl.size() ; ++ i){ root.min.x = math::min(root.min.x, ptcl[i].r.x - kernel.width * ptcl[i].h); root.min.y = math::min(root.min.y, ptcl[i].r.y - kernel.width * ptcl[i].h); root.min.z = math::min(root.min.z, ptcl[i].r.z - kernel.width * ptcl[i].h); root.max.x = math::max(root.max.x, ptcl[i].r.x + kernel.width * ptcl[i].h); root.max.y = math::max(root.max.y, ptcl[i].r.y + kernel.width * ptcl[i].h); root.max.z = math::max(root.max.z, ptcl[i].r.z + kernel.width * ptcl[i].h); } root.size = math::max(math::max((root.max - root.min).y, (root.max - root.min).z), (root.max - root.min).x); return root; } //key maker inline ulli SpreadBits(ulli x) const{ x = (x | (x << 32)) & 0x7fff00000000ffff; // 0b0111111111111111000000000000000000000000000000001111111111111111 x = (x | (x << 16)) & 0x00ff0000ff0000ff; // 0b0000000011111111000000000000000011111111000000000000000011111111 x = (x | (x << 8)) & 0x700f00f00f00f00f; // 0b0111000000001111000000001111000000001111000000001111000000001111 x = (x | (x << 4)) & 0x30c30c30c30c30c3; // 0b0011000011000011000011000011000011000011000011000011000011000011 x = (x | (x << 2)) & 0x1249249249249249; // 0b0001001001001001001001001001001001001001001001001001001001001001 return x; } inline ulli zorder3d(const ulli x, const ulli y, const ulli z) const{ return (SpreadBits(x) | (SpreadBits(y) << 1) | (SpreadBits(z) << 2)); } inline unsigned int GetBranchId(const ulli key, const unsigned int level) const{ return key >> (3 * MAX_LEVEL - 3 * level) & 0x7; // 0b111 } inline bool isOverwrapping(const TreePtcl& ptcl, const kernel_t<double>& kernel, const node_t& node, const boundary_t<double>& boundary) const{ return (abs(boundary.Periodic(ptcl.r - node.mass_center)) > math::max(node.radius, node.dmax + EXPAND * kernel.width * ptcl.h)) ? false : true; } inline bool isClose(const TreePtcl& ptcl, const node_t& node) const{ return (abs(ptcl.r - node.mass_center) * MAC < node.size) ? true : false; } //public member functions public: Tree(){ node.clear(); ptcl_ptr.clear(); } //make tree structure void Plant(const std::vector<TreePtcl >& ptcl, const kernel_t<double>& kernel){ //Clear; node.clear(); ptcl_ptr.clear(); ptcl_ptr.reserve(ptcl.size()); //Set root domain; node.push_back(createRoot(ptcl, kernel)); //Create particle pointer const ulli grid_size = 1 << MAX_LEVEL; for(std::size_t i = 0 ; i < ptcl.size() ; ++ i){ struct{ ulli x, y, z; } grid_address; grid_address.x = static_cast<ulli>((ptcl[i].r.x - node[0].min.x) / (node[0].max.x - node[0].min.x) * static_cast<double>(grid_size)); grid_address.y = static_cast<ulli>((ptcl[i].r.y - node[0].min.y) / (node[0].max.y - node[0].min.y) * static_cast<double>(grid_size)); grid_address.z = static_cast<ulli>((ptcl[i].r.z - node[0].min.z) / (node[0].max.z - node[0].min.z) * static_cast<double>(grid_size)); const ulli zkey = zorder3d(grid_address.x, grid_address.y, grid_address.z); const TreePtcl* const ptr = ptcl[i].getPointer(); ptcl_ptr.push_back(ptcl_ptr_t(zkey, ptr)); } std::sort(ptcl_ptr.begin(), ptcl_ptr.end()); //__gnu_parallel::sort(ptcl_ptr.begin(), ptcl_ptr.end()); //Create tree structure; for(std::size_t n = 0 ; n < node.size() ; ++ n){ if(node[n].isLeaf(N_LEAF) == false){ node[n].more = node.size();// //have a baby nodes node_t child[8]; for(short unsigned int c = 0 ; c < 8 ; ++ c){ child[c].size = 0.5 * node[n].size; child[c].level = node[n].level + 1; child[c].radius = 0.0; child[c].dmax = 0.0; child[c].Nbranch = 0; child[c].Nptcl = 0; child[c].more = 0;//NULL } for(std::size_t i = node[n].head_ptcl ; i < node[n].head_ptcl + node[n].Nptcl ; ++ i){ const std::size_t c = GetBranchId(ptcl_ptr[i].key, node[n].level + 1); ++ child[c].Nptcl; } child[0].head_ptcl = node[n].head_ptcl; for(std::size_t c = 1 ; c < 8 ; ++ c){ child[c].head_ptcl = child[c-1].head_ptcl + child[c-1].Nptcl; } node[n].Nbranch = 0; for(std::size_t c = 0 ; c < 8 ; ++ c){ if(__builtin_expect(child[c].isEmpty() == true, 0)) continue; ++ node[n].Nbranch; node.push_back(child[c]); } } } } //get cell properties void setCellProperties(const kernel_t<double>& kernel){ //Create bounding box; #if 0 //Top Down; for(vector<node_t>::iterator cell = node.begin() ; cell != node.end() ; ++ cell){ cell->mass_center = 0; cell->mass = 0; cell->radius = 0; cell->dmax = 0; for(std::size_t i = cell->head_ptcl ; i < cell->head_ptcl + cell->Nptcl ; ++ i){ cell->mass_center += ptcl_ptr[i].ptr->r * ptcl_ptr[i].ptr->Q.mass; cell->mass += ptcl_ptr[i].ptr->Q.mass; } cell->mass_center /= cell->mass; for(std::size_t i = cell->head_ptcl ; i < cell->head_ptcl + cell->Nptcl ; ++ i){ cell->radius = math::max(cell->radius, abs(cell->mass_center - ptcl_ptr[i].ptr->r) + EXPAND * kernel.width * ptcl_ptr[i].ptr->h); cell->dmax = math::max(cell->dmax , abs(cell->mass_center - ptcl_ptr[i].ptr->r)); } } #else //Bottom Up; for(typename std::vector<node_t>::reverse_iterator cell = node.rbegin() ; cell != node.rend() ; ++ cell){ cell->mass_center = 0; cell->mass = 0; cell->radius = 0; cell->dmax = 0; if(__builtin_expect(cell->isLeaf(N_LEAF) == true, 0)){ for(std::size_t i = cell->head_ptcl ; i < cell->head_ptcl + cell->Nptcl ; ++ i){ cell->mass_center += ptcl_ptr[i].ptr->r * ptcl_ptr[i].ptr->mass; cell->mass += ptcl_ptr[i].ptr->mass; } cell->mass_center /= cell->mass; for(std::size_t i = cell->head_ptcl ; i < cell->head_ptcl + cell->Nptcl ; ++ i){ const double dis = abs(cell->mass_center - ptcl_ptr[i].ptr->r); cell->radius = math::max(cell->radius, dis + EXPAND * kernel.width * ptcl_ptr[i].ptr->h); cell->dmax = math::max(cell->dmax , dis); } }else{ for(std::size_t b = cell->more ; b < cell->more + cell->Nbranch ; ++ b){ cell->mass_center += node[b].mass * node[b].mass_center; cell->mass += node[b].mass; } cell->mass_center /= cell->mass; for(std::size_t b = cell->more ; b < cell->more + cell->Nbranch ; ++ b){ const double dis = abs(cell->mass_center - node[b].mass_center); cell->radius = math::max(cell->radius, dis + node[b].radius); cell->dmax = math::max(cell->dmax , dis + node[b].dmax ); } } } #endif } //set Ngb List void setNeighbourList(std::vector<TreePtcl >& ptcl, const kernel_t<double>& kernel, const boundary_t<double>& boundary){ #pragma omp parallel for //schedule(guided) for(std::size_t i = 0 ; i < ptcl.size() ; ++ i){ ptcl[i].ngb.clear(); std::stack<const node_t*> stack; stack.push(node[0].getPointer()); while(stack.empty() == false){ const node_t* const cell = stack.top(); stack.pop(); if(isOverwrapping(ptcl[i], kernel, *cell, boundary) == true){ if((*cell).isLeaf(N_LEAF) == true){ for(std::size_t j = (*cell).head_ptcl ; j < (*cell).head_ptcl + (*cell).Nptcl ; ++ j){ if(abs(boundary.Periodic(ptcl[i].r - ptcl_ptr[j].ptr->r)) < kernel.width * std::max(ptcl[i].h, ptcl_ptr[j].ptr->h)) ptcl[i].ngb.push_back(ptcl_ptr[j].ptr); //ptcl[i].ngb.push_back(ptcl_ptr[j].ptr); } }else{ for(std::size_t b = (*cell).more ; b < (*cell).more + (*cell).Nbranch ; ++ b) stack.push(node[b].getPointer()); } } } } } //get gravity void setSelfGravity(std::vector<TreePtcl >& ptcl); //Dumping function void Dump(char const * const filename) const{ FILE* fp = fopen(filename, "w"); for(std::size_t i = 0 ; i < node.size() ; ++ i){ fprintf(fp, "%e\t%e\t%e\t%e\t%e\n", node[i].mass_center.x, node[i].mass_center.y, node[i].mass_center.z, node[i].radius, node[i].dmax); } fclose(fp); } };
MINDSSCbox.h
void boxfilter(float *input, float *temp1, float *temp2, int hw, int m, int n, int o) { int sz = m * n * o; for (int i = 0; i < sz; i++) { temp1[i] = input[i]; } for (int k = 0; k < o; k++) { for (int j = 0; j < n; j++) { for (int i = 1; i < m; i++) { temp1[i + j * m + k * m * n] += temp1[(i - 1) + j * m + k * m * n]; } } } for (int k = 0; k < o; k++) { for (int j = 0; j < n; j++) { for (int i = 0; i < (hw + 1); i++) { temp2[i + j * m + k * m * n] = temp1[(i + hw) + j * m + k * m * n]; } for (int i = (hw + 1); i < (m - hw); i++) { temp2[i + j * m + k * m * n] = temp1[(i + hw) + j * m + k * m * n] - temp1[(i - hw - 1) + j * m + k * m * n]; } for (int i = (m - hw); i < m; i++) { temp2[i + j * m + k * m * n] = temp1[(m - 1) + j * m + k * m * n] - temp1[(i - hw - 1) + j * m + k * m * n]; } } } for (int k = 0; k < o; k++) { for (int j = 1; j < n; j++) { for (int i = 0; i < m; i++) { temp2[i + j * m + k * m * n] += temp2[i + (j - 1) * m + k * m * n]; } } } for (int k = 0; k < o; k++) { for (int i = 0; i < m; i++) { for (int j = 0; j < (hw + 1); j++) { temp1[i + j * m + k * m * n] = temp2[i + (j + hw) * m + k * m * n]; } for (int j = (hw + 1); j < (n - hw); j++) { temp1[i + j * m + k * m * n] = temp2[i + (j + hw) * m + k * m * n] - temp2[i + (j - hw - 1) * m + k * m * n]; } for (int j = (n - hw); j < n; j++) { temp1[i + j * m + k * m * n] = temp2[i + (n - 1) * m + k * m * n] - temp2[i + (j - hw - 1) * m + k * m * n]; } } } for (int k = 1; k < o; k++) { for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { temp1[i + j * m + k * m * n] += temp1[i + j * m + (k - 1) * m * n]; } } } for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { for (int k = 0; k < (hw + 1); k++) { input[i + j * m + k * m * n] = temp1[i + j * m + (k + hw) * m * n]; } for (int k = (hw + 1); k < (o - hw); k++) { input[i + j * m + k * m * n] = temp1[i + j * m + (k + hw) * m * n] - temp1[i + j * m + (k - hw - 1) * m * n]; } for (int k = (o - hw); k < o; k++) { input[i + j * m + k * m * n] = temp1[i + j * m + (o - 1) * m * n] - temp1[i + j * m + (k - hw - 1) * m * n]; } } } } void imshift(float *input, float *output, int dx, int dy, int dz, int m, int n, int o) { for (int k = 0; k < o; k++) { for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { if (i + dy >= 0 && i + dy < m && j + dx >= 0 && j + dx < n && k + dz >= 0 && k + dz < o) output[i + j * m + k * m * n] = input[i + dy + (j + dx) * m + (k + dz) * m * n]; else output[i + j * m + k * m * n] = input[i + j * m + k * m * n]; } } } } /*void *distances(void *threadarg) { struct mind_data *my_data; my_data = (struct mind_data *) threadarg; float* im1=my_data->im1; float* d1=my_data->d1; int qs=my_data->qs; int ind_d1=my_data->ind_d1; int m=image_m; int n=image_n; int o=image_o;*/ void distances(float *im1, float *d1, int m, int n, int o, int qs, int l) { int sz1 = m * n * o; float *w1 = new float[sz1]; int len1 = 6; float *temp1 = new float[sz1]; float *temp2 = new float[sz1]; int dx[6] = {+qs, +qs, -qs, +0, +qs, +0}; int dy[6] = {+qs, -qs, +0, -qs, +0, +qs}; int dz[6] = {0, +0, +qs, +qs, +qs, +qs}; imshift(im1, w1, dx[l], dy[l], dz[l], m, n, o); for (int i = 0; i < sz1; i++) { w1[i] = (w1[i] - im1[i]) * (w1[i] - im1[i]); } boxfilter(w1, temp1, temp2, qs, m, n, o); for (int i = 0; i < sz1; i++) { d1[i + l * sz1] = w1[i]; } delete temp1; delete temp2; delete w1; } //__builtin_popcountll(left[i]^right[i]); absolute hamming distances void descriptor(uint64_t *mindq, float *im1, int m, int n, int o, int qs) { // MIND with self-similarity context int dx[6] = {+qs, +qs, -qs, +0, +qs, +0}; int dy[6] = {+qs, -qs, +0, -qs, +0, +qs}; int dz[6] = {0, +0, +qs, +qs, +qs, +qs}; int sx[12] = {-qs, +0, -qs, +0, +0, +qs, +0, +0, +0, -qs, +0, +0}; int sy[12] = {+0, -qs, +0, +qs, +0, +0, +0, +qs, +0, +0, +0, -qs}; int sz[12] = {+0, +0, +0, +0, -qs, +0, -qs, +0, -qs, +0, -qs, +0}; int index[12] = {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5}; float sigma = 0.75; // 1.0;//0.75;//1.5; int rho = ceil(sigma * 1.5) * 2 + 1; int len1 = 6; const int len2 = 12; image_d = 12; int d = 12; int sz1 = m * n * o; //============== DISTANCES USING BOXFILTER =================== float *d1 = new float[sz1 * len1]; auto time1 = chrono::steady_clock::now(); #pragma omp parallel for for (int l = 0; l < len1; l++) { distances(im1, d1, m, n, o, qs, l); } auto time2 = chrono::steady_clock::now(); float timeMIND1 = chrono::duration_cast<chrono::duration<float>>(time2 - time1).count(); time1 = chrono::steady_clock::now(); // quantisation table const int val = 6; const uint64_t power = 32; #pragma omp parallel for for (int k = 0; k < o; k++) { unsigned int tablei[6] = {0, 1, 3, 7, 15, 31}; float compare[val - 1]; for (int i = 0; i < val - 1; i++) { compare[i] = -log((i + 1.5f) / val); } float mind1[12]; for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { for (int l = 0; l < len2; l++) { if (i + sy[l] >= 0 && i + sy[l] < m && j + sx[l] >= 0 && j + sx[l] < n && k + sz[l] >= 0 && k + sz[l] < o) { mind1[l] = d1[i + sy[l] + (j + sx[l]) * m + (k + sz[l]) * m * n + index[l] * sz1]; } else { mind1[l] = d1[i + j * m + k * m * n + index[l] * sz1]; } } float minval = *min_element(mind1, mind1 + len2); float sumnoise = 0.0f; for (int l = 0; l < len2; l++) { mind1[l] -= minval; sumnoise += mind1[l]; } float noise1 = max(sumnoise / (float)len2, 1e-6f); for (int l = 0; l < len2; l++) { mind1[l] /= noise1; } uint64_t accum = 0; uint64_t tabled1 = 1; for (int l = 0; l < len2; l++) { // mind1[l]=exp(-mind1[l]); int mind1val = 0; for (int c = 0; c < val - 1; c++) { mind1val += compare[c] > mind1[l] ? 1 : 0; } // int mind1val=min(max((int)(mind1[l]*val-0.5f),0),val-1); accum += tablei[mind1val] * tabled1; tabled1 *= power; } mindq[i + j * m + k * m * n] = accum; } } } time2 = chrono::steady_clock::now(); float timeMIND2 = chrono::duration_cast<chrono::duration<float>>(time2 - time1).count(); delete d1; }
GB_binop__max_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__max_fp32 // A.*B function (eWiseMult): GB_AemultB__max_fp32 // A*D function (colscale): GB_AxD__max_fp32 // D*A function (rowscale): GB_DxB__max_fp32 // C+=B function (dense accum): GB_Cdense_accumB__max_fp32 // C+=b function (dense accum): GB_Cdense_accumb__max_fp32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__max_fp32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__max_fp32 // C=scalar+B GB_bind1st__max_fp32 // C=scalar+B' GB_bind1st_tran__max_fp32 // C=A+scalar GB_bind2nd__max_fp32 // C=A'+scalar GB_bind2nd_tran__max_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = fmaxf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = fmaxf (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_FP32 || GxB_NO_MAX_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__max_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__max_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__max_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__max_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__max_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__max_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__max_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__max_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__max_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float bij = Bx [p] ; Cx [p] = fmaxf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__max_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; Cx [p] = fmaxf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = fmaxf (x, aij) ; \ } GrB_Info GB_bind1st_tran__max_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = fmaxf (aij, y) ; \ } GrB_Info GB_bind2nd_tran__max_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
compatibility.h
// -*- C++ -*- // Copyright (C) 2007-2016 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/compatibility.h * @brief Compatibility layer, mostly concerned with atomic operations. * * This file is a GNU parallel extension to the Standard C++ Library * and contains implementation details for the library's internal use. */ // Written by Felix Putze. #ifndef _GLIBCXX_PARALLEL_COMPATIBILITY_H #define _GLIBCXX_PARALLEL_COMPATIBILITY_H 1 #include <parallel/types.h> #include <parallel/base.h> #if !defined(_WIN32) || defined (__CYGWIN__) #include <sched.h> #endif #ifdef __MINGW32__ // Including <windows.h> will drag in all the windows32 names. Since // that can cause user code portability problems, we just declare the // one needed function here. extern "C" __attribute((dllimport)) void __attribute__((stdcall)) Sleep (unsigned long); #endif namespace __gnu_parallel { template<typename _Tp> inline _Tp __add_omp(volatile _Tp* __ptr, _Tp __addend) { int64_t __res; #pragma omp critical { __res = *__ptr; *(__ptr) += __addend; } return __res; } /** @brief Add a value to a variable, atomically. * * @param __ptr Pointer to a signed integer. * @param __addend Value to add. */ template<typename _Tp> inline _Tp __fetch_and_add(volatile _Tp* __ptr, _Tp __addend) { if (__atomic_always_lock_free(sizeof(_Tp), __ptr)) return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL); return __add_omp(__ptr, __addend); } template<typename _Tp> inline bool __cas_omp(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement) { bool __res = false; #pragma omp critical { if (*__ptr == __comparand) { *__ptr = __replacement; __res = true; } } return __res; } /** @brief Compare-and-swap * * Compare @c *__ptr and @c __comparand. If equal, let @c * *__ptr=__replacement and return @c true, return @c false otherwise. * * @param __ptr Pointer to signed integer. * @param __comparand Compare value. * @param __replacement Replacement value. */ template<typename _Tp> inline bool __compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement) { if (__atomic_always_lock_free(sizeof(_Tp), __ptr)) return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement, false, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED); return __cas_omp(__ptr, __comparand, __replacement); } /** @brief Yield control to another thread, without waiting for * the end of the time slice. */ inline void __yield() { #if defined (_WIN32) && !defined (__CYGWIN__) Sleep(0); #else sched_yield(); #endif } } // end namespace #endif /* _GLIBCXX_PARALLEL_COMPATIBILITY_H */
GB_unaryop__minv_uint32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint32_fp64 // op(A') function: GB_tran__minv_uint32_fp64 // C type: uint32_t // A type: double // cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32) // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ double #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ uint32_t z ; GB_CAST_UNSIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint32_fp64 ( uint32_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
common.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <limits> #include <string> #include <algorithm> #include <chrono> #include <cmath> #include <cstdint> #include <cstdio> #include <functional> #include <iomanip> #include <iterator> #include <map> #include <memory> #include <sstream> #include <type_traits> #include <unordered_map> #include <utility> #include <vector> #ifdef _MSC_VER #include <intrin.h> #pragma intrinsic(_BitScanReverse) #endif #if defined(_MSC_VER) #include <malloc.h> #elif MM_MALLOC #include <mm_malloc.h> #elif defined(__GNUC__) #include <malloc.h> #define _mm_malloc(a, b) memalign(b, a) #define _mm_free(a) free(a) #else #include <stdlib.h> #define _mm_malloc(a, b) malloc(a) #define _mm_free(a) free(a) #endif namespace LightGBM { namespace Common { inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static unsigned CountDecimalDigit32(uint32_t n) { #if defined(_MSC_VER) || defined(__GNUC__) static const uint32_t powers_of_10[] = { 0, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; #ifdef _MSC_VER // NOLINTNEXTLINE unsigned long i = 0; _BitScanReverse(&i, n | 1); uint32_t t = (i + 1) * 1233 >> 12; #elif __GNUC__ uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12; #endif return t - (n < powers_of_10[t]) + 1; #else if (n < 10) return 1; if (n < 100) return 2; if (n < 1000) return 3; if (n < 10000) return 4; if (n < 100000) return 5; if (n < 1000000) return 6; if (n < 10000000) return 7; if (n < 100000000) return 8; if (n < 1000000000) return 9; return 10; #endif } inline static void Uint32ToStr(uint32_t value, char* buffer) { const char kDigitsLut[200] = { '0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9', '1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9', '3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9', '4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9', '5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9', '6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9', '7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9', '8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9', '9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9' }; unsigned digit = CountDecimalDigit32(value); buffer += digit; *buffer = '\0'; while (value >= 100) { const unsigned i = (value % 100) << 1; value /= 100; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } if (value < 10) { *--buffer = static_cast<char>(value) + '0'; } else { const unsigned i = value << 1; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } } inline static void Int32ToStr(int32_t value, char* buffer) { uint32_t u = static_cast<uint32_t>(value); if (value < 0) { *buffer++ = '-'; u = ~u + 1; } Uint32ToStr(u, buffer); } inline static void DoubleToStr(double value, char* buffer, size_t buffer_len) { #ifdef _MSC_VER int num_chars = sprintf_s(buffer, buffer_len, "%.17g", value); #else int num_chars = snprintf(buffer, buffer_len, "%.17g", value); #endif CHECK_GE(num_chars, 0); } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float, bool is_unsign> struct __TToStringHelperFast { void operator()(T value, char* buffer, size_t) const { Int32ToStr(value, buffer); } }; template<typename T> struct __TToStringHelperFast<T, true, false> { void operator()(T value, char* buffer, size_t buf_len) const { #ifdef _MSC_VER int num_chars = sprintf_s(buffer, buf_len, "%g", value); #else int num_chars = snprintf(buffer, buf_len, "%g", value); #endif CHECK_GE(num_chars, 0); } }; template<typename T> struct __TToStringHelperFast<T, false, true> { void operator()(T value, char* buffer, size_t) const { Uint32ToStr(value, buffer); } }; template<typename T> inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper; const size_t buf_len = 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } const size_t buf_len = 32; std::vector<char> buffer(buf_len); std::stringstream str_buf; DoubleToStr(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { DoubleToStr(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK_EQ(strs.size(), static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<> inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << static_cast<int16_t>(strs[0]); for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << static_cast<int16_t>(strs[i]); } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformation on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (auto t = input.begin(); t !=input.end(); ++t) { ret.push_back(t->get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; auto& ref_key = *keys; auto& ref_value = *values; for (size_t i = start; i < keys->size(); ++i) { arr.emplace_back(ref_key[i], ref_value[i]); } if (!is_reverse) { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { ref_key[i] = arr[i].first; ref_value[i] = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) { std::vector<T*> ptr(data->size()); auto& ref_data = *data; for (size_t i = 0; i < data->size(); ++i) { ptr[i] = ref_data[i].data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (std::isnan(x)) { return 0.0; } else if (x >= 1e300) { return 1e300; } else if (x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (std::isnan(x)) { return 0.0f; } else if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = OMP_NUM_THREADS(); if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } inline static std::vector<uint32_t> EmptyBitset(int n) { int size = n / 32; if (n % 32 != 0) ++size; return std::vector<uint32_t>(size); } template<typename T> inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) { auto& ref_v = *vec; int i1 = val / 32; int i2 = val % 32; if (static_cast<int>(vec->size()) < i1 + 1) { vec->resize(i1 + 1, 0); } ref_v[i1] |= (1 << i2); } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY);; } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } template <typename T> static T SafeLog(T x) { if (x > 0) { return std::log(x); } else { return -INFINITY; } } inline bool CheckAllowedJSON(const std::string& s) { unsigned char char_code; for (auto c : s) { char_code = static_cast<unsigned char>(c); if (char_code == 34 // " || char_code == 44 // , || char_code == 58 // : || char_code == 91 // [ || char_code == 93 // ] || char_code == 123 // { || char_code == 125 // } ) { return false; } } return true; } inline int RoundInt(double x) { return static_cast<int>(x + 0.5f); } template <typename T, std::size_t N = 32> class AlignmentAllocator { public: typedef T value_type; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; inline AlignmentAllocator() throw() {} template <typename T2> inline AlignmentAllocator(const AlignmentAllocator<T2, N>&) throw() {} inline ~AlignmentAllocator() throw() {} inline pointer adress(reference r) { return &r; } inline const_pointer adress(const_reference r) const { return &r; } inline pointer allocate(size_type n) { return (pointer)_mm_malloc(n * sizeof(value_type), N); } inline void deallocate(pointer p, size_type) { _mm_free(p); } inline void construct(pointer p, const value_type& wert) { new (p) value_type(wert); } inline void destroy(pointer p) { p->~value_type(); } inline size_type max_size() const throw() { return size_type(-1) / sizeof(value_type); } template <typename T2> struct rebind { typedef AlignmentAllocator<T2, N> other; }; bool operator!=(const AlignmentAllocator<T, N>& other) const { return !(*this == other); } // Returns true if and only if storage allocated from *this // can be deallocated from other, and vice versa. // Always returns true for stateless allocators. bool operator==(const AlignmentAllocator<T, N>&) const { return true; } }; class Timer { public: Timer() { #ifdef TIMETAG int num_threads = OMP_NUM_THREADS(); start_time_.resize(num_threads); stats_.resize(num_threads); #endif // TIMETAG } ~Timer() { Print(); } #ifdef TIMETAG void Start(const std::string& name) { auto tid = omp_get_thread_num(); start_time_[tid][name] = std::chrono::steady_clock::now(); } void Stop(const std::string& name) { auto cur_time = std::chrono::steady_clock::now(); auto tid = omp_get_thread_num(); if (stats_[tid].find(name) == stats_[tid].end()) { stats_[tid][name] = std::chrono::duration<double, std::milli>(0); } stats_[tid][name] += cur_time - start_time_[tid][name]; } #else void Start(const std::string&) {} void Stop(const std::string&) {} #endif // TIMETAG void Print() const { #ifdef TIMETAG std::unordered_map<std::string, std::chrono::duration<double, std::milli>> stats(stats_[0].begin(), stats_[0].end()); for (size_t i = 1; i < stats_.size(); ++i) { for (auto it = stats_[i].begin(); it != stats_[i].end(); ++it) { if (stats.find(it->first) == stats.end()) { stats[it->first] = it->second; } else { stats[it->first] += it->second; } } } std::map<std::string, std::chrono::duration<double, std::milli>> ordered( stats.begin(), stats.end()); for (auto it = ordered.begin(); it != ordered.end(); ++it) { Log::Info("%s costs:\t %f", it->first.c_str(), it->second * 1e-3); } #endif // TIMETAG } #ifdef TIMETAG std::vector< std::unordered_map<std::string, std::chrono::steady_clock::time_point>> start_time_; std::vector<std::unordered_map<std::string, std::chrono::duration<double, std::milli>>> stats_; #endif // TIMETAG }; // Note: this class is not thread-safe, don't use it inside omp blocks class FunctionTimer { public: #ifdef TIMETAG FunctionTimer(const std::string& name, Timer& timer) : timer_(timer) { timer.Start(name); name_ = name; } ~FunctionTimer() { timer_.Stop(name_); } private: std::string name_; Timer& timer_; #else FunctionTimer(const std::string&, Timer&) {} #endif // TIMETAG }; } // namespace Common extern Common::Timer global_timer; } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
api.c
// RUN: %libomptarget-compile-run-and-check-generic // XFAIL: nvptx64-nvidia-cuda // XFAIL: nvptx64-nvidia-cuda-newRTL // Fails on amdgcn with error: GPU Memory Error // XFAIL: amdgcn-amd-amdhsa // XFAIL: amdgcn-amd-amdhsa-newRTL #include <stdio.h> #include <omp.h> // --------------------------------------------------------------------------- // Various definitions copied from OpenMP RTL extern void __tgt_register_requires(int64_t); // End of definitions copied from OpenMP RTL. // --------------------------------------------------------------------------- #pragma omp requires unified_shared_memory #define N 1024 void init(int A[], int B[], int C[]) { for (int i = 0; i < N; ++i) { A[i] = 0; B[i] = 1; C[i] = i; } } int main(int argc, char *argv[]) { const int device = omp_get_default_device(); // Manual registration of requires flags for Clang versions // that do not support requires. __tgt_register_requires(8); // CHECK: Initial device: [[INITIAL_DEVICE:[0-9]+]] printf("Initial device: %d\n", omp_get_initial_device()); // CHECK: Num devices: [[INITIAL_DEVICE]] printf("Num devices: %d\n", omp_get_num_devices()); // // Target alloc & target memcpy // int A[N], B[N], C[N]; // Init init(A, B, C); int *pA, *pB, *pC; // map ptrs pA = &A[0]; pB = &B[0]; pC = &C[0]; int *d_A = (int *)omp_target_alloc(N * sizeof(int), device); int *d_B = (int *)omp_target_alloc(N * sizeof(int), device); int *d_C = (int *)omp_target_alloc(N * sizeof(int), device); // CHECK: omp_target_alloc succeeded printf("omp_target_alloc %s\n", d_A && d_B && d_C ? "succeeded" : "failed"); omp_target_memcpy(d_B, pB, N * sizeof(int), 0, 0, device, omp_get_initial_device()); omp_target_memcpy(d_C, pC, N * sizeof(int), 0, 0, device, omp_get_initial_device()); #pragma omp target is_device_ptr(d_A, d_B, d_C) device(device) { #pragma omp parallel for schedule(static, 1) for (int i = 0; i < N; i++) { d_A[i] = d_B[i] + d_C[i] + 1; } } omp_target_memcpy(pA, d_A, N * sizeof(int), 0, 0, omp_get_initial_device(), device); // CHECK: Test omp_target_memcpy: Succeeded int fail = 0; for (int i = 0; i < N; ++i) { if (A[i] != i + 2) fail++; } if (fail) { printf("Test omp_target_memcpy: Failed\n"); } else { printf("Test omp_target_memcpy: Succeeded\n"); } // // target_is_present and target_associate/disassociate_ptr // init(A, B, C); // CHECK: B is not present, associating it... // CHECK: omp_target_associate_ptr B succeeded if (!omp_target_is_present(B, device)) { printf("B is not present, associating it...\n"); int rc = omp_target_associate_ptr(B, d_B, N * sizeof(int), 0, device); printf("omp_target_associate_ptr B %s\n", !rc ? "succeeded" : "failed"); } // CHECK: C is not present, associating it... // CHECK: omp_target_associate_ptr C succeeded if (!omp_target_is_present(C, device)) { printf("C is not present, associating it...\n"); int rc = omp_target_associate_ptr(C, d_C, N * sizeof(int), 0, device); printf("omp_target_associate_ptr C %s\n", !rc ? "succeeded" : "failed"); } // CHECK: Inside target data: A is not present // CHECK: Inside target data: B is present // CHECK: Inside target data: C is present #pragma omp target data map(from : B, C) device(device) { printf("Inside target data: A is%s present\n", omp_target_is_present(A, device) ? "" : " not"); printf("Inside target data: B is%s present\n", omp_target_is_present(B, device) ? "" : " not"); printf("Inside target data: C is%s present\n", omp_target_is_present(C, device) ? "" : " not"); #pragma omp target map(from : A) device(device) { #pragma omp parallel for schedule(static, 1) for (int i = 0; i < N; i++) A[i] = B[i] + C[i] + 1; } } // CHECK: B is present, disassociating it... // CHECK: omp_target_disassociate_ptr B succeeded // CHECK: C is present, disassociating it... // CHECK: omp_target_disassociate_ptr C succeeded if (omp_target_is_present(B, device)) { printf("B is present, disassociating it...\n"); int rc = omp_target_disassociate_ptr(B, device); printf("omp_target_disassociate_ptr B %s\n", !rc ? "succeeded" : "failed"); } if (omp_target_is_present(C, device)) { printf("C is present, disassociating it...\n"); int rc = omp_target_disassociate_ptr(C, device); printf("omp_target_disassociate_ptr C %s\n", !rc ? "succeeded" : "failed"); } // CHECK: Test omp_target_associate_ptr: Succeeded fail = 0; for (int i = 0; i < N; ++i) { if (A[i] != i + 2) fail++; } if (fail) { printf("Test omp_target_associate_ptr: Failed\n"); } else { printf("Test omp_target_associate_ptr: Succeeded\n"); } omp_target_free(d_A, device); omp_target_free(d_B, device); omp_target_free(d_C, device); printf("Done!\n"); return 0; }
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define ErrorRelativeWeight PerceptibleReciprocal(16) #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _DoublePixelPacket { double red, green, blue, alpha; } DoublePixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; double quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; double distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; double diffusion, weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *,ExceptionInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *,ExceptionInfo *), SetGrayscaleImage(Image *,ExceptionInfo *), SetImageColormap(Image *,CubeInfo *,ExceptionInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DefineImageColormap(Image *,CubeInfo *,NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info)); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither_method=image_info->dither == MagickFalse ? NoDitherMethod : RiemersmaDitherMethod; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const Image *image, const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (GetPixelAlpha(image,pixel) == OpaqueAlpha)) { alpha_pixel->red=(double) GetPixelRed(image,pixel); alpha_pixel->green=(double) GetPixelGreen(image,pixel); alpha_pixel->blue=(double) GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); return; } alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel)); alpha_pixel->red=alpha*GetPixelRed(image,pixel); alpha_pixel->green=alpha*GetPixelGreen(image,pixel); alpha_pixel->blue=alpha*GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); } static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info, const PixelInfo *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->alpha == OpaqueAlpha)) { alpha_pixel->red=(double) pixel->red; alpha_pixel->green=(double) pixel->green; alpha_pixel->blue=(double) pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; return; } alpha=(double) (QuantumScale*pixel->alpha); alpha_pixel->red=alpha*pixel->red; alpha_pixel->green=alpha*pixel->green; alpha_pixel->blue=alpha*pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3; return(id); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define AssignImageTag "Assign/Image" ColorspaceType colorspace; ssize_t y; /* Allocate image colormap. */ colorspace=image->colorspace; if (cube_info->quantize_info->colorspace != UndefinedColorspace) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace, exception); cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); if (SetImageColormap(image,cube_info,exception) == MagickFalse) return(MagickFalse); /* Create a reduced color image. */ if (cube_info->quantize_info->dither_method != NoDitherMethod) (void) DitherImage(image,cube_info,exception); else { CacheView *image_view; MagickBooleanType status; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; Quantum *magick_restrict q; ssize_t count, x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; const NodeInfo *node_info; ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,q,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum( image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum( image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum( image->colormap[index].blue),q); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum( image->colormap[index].alpha),q); } q+=GetPixelChannels(image); } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image,exception); if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) { double intensity; /* Monochrome image. */ intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 : QuantumRange; if (image->colors > 1) { intensity=0.0; if (GetPixelInfoLuma(image->colormap+0) > GetPixelInfoLuma(image->colormap+1)) intensity=(double) QuantumRange; } image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image,exception); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (IssRGBCompatibleColorspace(colorspace) == MagickFalse)) (void) TransformImageColorspace(image,colorspace,exception); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; double bisect; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if (cube_info->quantize_info->colorspace != image->colorspace) { if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace,exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace, exception); } midpoint.red=(double) QuantumRange/2.0; midpoint.green=(double) QuantumRange/2.0; midpoint.blue=(double) QuantumRange/2.0; midpoint.alpha=(double) QuantumRange/2.0; error.alpha=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if (cube_info->quantize_info->colorspace != image->colorspace) if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double alpha, beta, distance, pixel; DoublePixelPacket *magick_restrict q; PixelInfo *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(MagickRealType) (QuantumScale*p->alpha); beta=(MagickRealType) (QuantumScale*q->alpha); } pixel=alpha*p->red-beta*q->red; distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->green-beta*q->green; distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->blue-beta*q->blue; distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=p->alpha-q->alpha; distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image, ExceptionInfo *exception) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the DefineImageColormap method is: % % void DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double alpha; PixelInfo *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(double) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); q->alpha=(double) OpaqueAlpha; } else { double opacity; opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha); q->alpha=(double) ClampToQuantum(opacity); if (q->alpha == OpaqueAlpha) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); } else { double gamma; gamma=(double) (QuantumScale*q->alpha); gamma=PerceptibleReciprocal(gamma); q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.blue); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; size_t number_threads; ssize_t i; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2* sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; DoublePixelPacket **pixels; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; Quantum *magick_restrict q; size_t index; ssize_t x, v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; ssize_t i; ssize_t u; u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel); if (x > 0) { pixel.red+=7.0*cube_info->diffusion*current[u-v].red/16; pixel.green+=7.0*cube_info->diffusion*current[u-v].green/16; pixel.blue+=7.0*cube_info->diffusion*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=7.0*cube_info->diffusion*current[u-v].alpha/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=cube_info->diffusion*previous[u+v].red/16; pixel.green+=cube_info->diffusion*previous[u+v].green/16; pixel.blue+=cube_info->diffusion*previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=cube_info->diffusion*previous[u+v].alpha/16; } pixel.red+=5.0*cube_info->diffusion*previous[u].red/16; pixel.green+=5.0*cube_info->diffusion*previous[u].green/16; pixel.blue+=5.0*cube_info->diffusion*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=5.0*cube_info->diffusion*previous[u].alpha/16; if (x > 0) { pixel.red+=3.0*cube_info->diffusion*previous[u-v].red/16; pixel.green+=3.0*cube_info->diffusion*previous[u-v].green/16; pixel.blue+=3.0*cube_info->diffusion*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=3.0*cube_info->diffusion*previous[u-v].alpha/16; } } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { NodeInfo *node_info; size_t node_id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { node_id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[node_id] == (NodeInfo *) NULL) break; node_info=node_info->child[node_id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image)); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red), q+u*GetPixelChannels(image)); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green), q+u*GetPixelChannels(image)); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue), q+u*GetPixelChannels(image)); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha), q+u*GetPixelChannels(image)); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixelInfo(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].alpha=pixel.alpha-color.alpha; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CubeInfo *p; DoublePixelPacket color, pixel; MagickBooleanType proceed; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { Quantum *magick_restrict q; ssize_t i; /* Distribute error. */ q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); AssociateAlphaPixel(image,cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].red; pixel.green+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].green; pixel.blue+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.alpha+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].alpha; } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { NodeInfo *node_info; size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(double) (4.0*(QuantumRange+1.0)*((double) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) p->cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q); if (cube_info->associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) memmove(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType Riemersma(Image *image,CacheView *image_view, CubeInfo *cube_info,const size_t level,const unsigned int direction, ExceptionInfo *exception) { MagickBooleanType status; status=MagickTrue; if (level == 1) switch (direction) { case WestGravity: { status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); break; } case EastGravity: { status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); break; } case NorthGravity: { status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); break; } case SouthGravity: { status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); break; } default: break; } else switch (direction) { case WestGravity: { status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); break; } case EastGravity: { status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); break; } case NorthGravity: { status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); break; } case SouthGravity: { status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); break; } default: break; } return(status); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; size_t extent, level; artifact=GetImageArtifact(image,"dither:diffusion-amount"); if (artifact != (const char *) NULL) cube_info->diffusion=StringToDoubleInterval(artifact,1.0); if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info,exception)); /* Distribute quantization error along a Hilbert curve. */ (void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; extent=MagickMax(image->columns,image->rows); level=(size_t) log2((double) extent); if ((1UL << level) < extent) level++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); status=MagickTrue; if (level > 0) status=Riemersma(image,image_view,cube_info,level,NorthGravity,exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; double weight; size_t length; ssize_t i; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) memset(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither_method == NoDitherMethod) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]=PerceptibleReciprocal(weight); weight*=exp(log(1.0/ErrorRelativeWeight)/(ErrorQueueLength-1.0)); } cube_info->diffusion=1.0; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) memset(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image, ExceptionInfo *exception) { CacheView *image_view; double alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; ssize_t index, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,exception); (void) memset(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) GetPixelIndex(image,p); if (image->alpha_trait != UndefinedPixelTrait) { alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*image->colormap[index].alpha); } distance=fabs((double) (alpha*GetPixelRed(image,p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) memset(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K m e a n s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KmeansImage() applies k-means color reduction to an image. This is a % colorspace clustering or segmentation technique. % % The format of the KmeansImage method is: % % MagickBooleanType KmeansImage(Image *image,const size_t number_colors, % const size_t max_iterations,const double tolerance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_colors: number of colors to use as seeds. % % o max_iterations: maximum number of iterations while converging. % % o tolerance: the maximum tolerance. % % o exception: return any errors or warnings in this structure. % */ typedef struct _KmeansInfo { double red, green, blue, alpha, black, count, distortion; } KmeansInfo; static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info) { ssize_t i; assert(kmeans_info != (KmeansInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (kmeans_info[i] != (KmeansInfo *) NULL) kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]); kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info); return(kmeans_info); } static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors) { KmeansInfo **kmeans_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads, sizeof(*kmeans_info)); if (kmeans_info == (KmeansInfo **) NULL) return((KmeansInfo **) NULL); (void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info)); for (i=0; i < (ssize_t) number_threads; i++) { kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors, sizeof(**kmeans_info)); if (kmeans_info[i] == (KmeansInfo *) NULL) return(DestroyKmeansThreadSet(kmeans_info)); } return(kmeans_info); } static inline double KmeansMetric(const Image *magick_restrict image, const Quantum *magick_restrict p,const PixelInfo *magick_restrict q) { double gamma, metric, pixel; gamma=1.0; metric=0.0; if ((image->alpha_trait != UndefinedPixelTrait) || (q->alpha_trait != UndefinedPixelTrait)) { pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ? q->alpha : OpaqueAlpha); metric+=pixel*pixel; if (image->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*GetPixelAlpha(image,p); if (q->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*q->alpha; } if (image->colorspace == CMYKColorspace) { pixel=QuantumScale*(GetPixelBlack(image,p)-q->black); metric+=gamma*pixel*pixel; gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p)); gamma*=QuantumScale*(QuantumRange-q->black); } metric*=3.0; pixel=QuantumScale*(GetPixelRed(image,p)-q->red); if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs((double) pixel) > 0.5) pixel-=0.5; pixel*=2.0; } metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelGreen(image,p)-q->green); metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue); metric+=gamma*pixel*pixel; return(metric); } MagickExport MagickBooleanType KmeansImage(Image *image, const size_t number_colors,const size_t max_iterations,const double tolerance, ExceptionInfo *exception) { #define KmeansImageTag "Kmeans/Image" #define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info)) CacheView *image_view; const char *colors; double previous_tolerance; KmeansInfo **kmeans_pixels; MagickBooleanType verbose, status; ssize_t n; size_t number_threads; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colors=GetImageArtifact(image,"kmeans:seed-colors"); if (colors == (const char *) NULL) { CubeInfo *cube_info; QuantizeInfo *quantize_info; size_t colors, depth; /* Seed clusters from color quantization. */ quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->colorspace=image->colorspace; quantize_info->number_colors=number_colors; quantize_info->dither_method=NoDitherMethod; colors=number_colors; for (depth=1; colors != 0; depth++) colors>>=2; cube_info=GetCubeInfo(quantize_info,depth,number_colors); if (cube_info == (CubeInfo *) NULL) { quantize_info=DestroyQuantizeInfo(quantize_info); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=SetImageColormap(image,cube_info,exception); } DestroyCubeInfo(cube_info); quantize_info=DestroyQuantizeInfo(quantize_info); if (status == MagickFalse) return(status); } else { char color[MagickPathExtent]; const char *p; /* Seed clusters from color list (e.g. red;green;blue). */ status=AcquireImageColormap(image,number_colors,exception); if (status == MagickFalse) return(status); for (n=0, p=colors; n < (ssize_t) image->colors; n++) { const char *q; for (q=p; *q != '\0'; q++) if (*q == ';') break; (void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1, MagickPathExtent)); (void) QueryColorCompliance(color,AllCompliance,image->colormap+n, exception); if (*q == '\0') { n++; break; } p=q+1; } if (n < (ssize_t) image->colors) { RandomInfo *random_info; /* Seed clusters from random values. */ random_info=AcquireRandomInfo(); for ( ; n < (ssize_t) image->colors; n++) { (void) QueryColorCompliance("#000",AllCompliance,image->colormap+n, exception); image->colormap[n].red=RandomColorComponent(random_info); image->colormap[n].green=RandomColorComponent(random_info); image->colormap[n].blue=RandomColorComponent(random_info); if (image->alpha_trait != UndefinedPixelTrait) image->colormap[n].alpha=RandomColorComponent(random_info); if (image->colorspace == CMYKColorspace) image->colormap[n].black=RandomColorComponent(random_info); } random_info=DestroyRandomInfo(random_info); } } /* Iterative refinement. */ kmeans_pixels=AcquireKmeansThreadSet(number_colors); if (kmeans_pixels == (KmeansInfo **) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); previous_tolerance=0.0; verbose=IsStringTrue(GetImageArtifact(image,"debug")); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); image_view=AcquireAuthenticCacheView(image,exception); for (n=0; n < (ssize_t) max_iterations; n++) { double distortion; ssize_t i; ssize_t y; for (i=0; i < (ssize_t) number_threads; i++) (void) memset(kmeans_pixels[i],0,image->colors*sizeof(*kmeans_pixels[i])); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double min_distance; ssize_t i; ssize_t j; /* Assign each pixel whose mean has the least squared color distance. */ j=0; min_distance=KmeansMetric(image,q,image->colormap+0); for (i=1; i < (ssize_t) image->colors; i++) { double distance; if (min_distance <= MagickEpsilon) break; distance=KmeansMetric(image,q,image->colormap+i); if (distance < min_distance) { min_distance=distance; j=i; } } kmeans_pixels[id][j].red+=QuantumScale*GetPixelRed(image,q); kmeans_pixels[id][j].green+=QuantumScale*GetPixelGreen(image,q); kmeans_pixels[id][j].blue+=QuantumScale*GetPixelBlue(image,q); if (image->alpha_trait != UndefinedPixelTrait) kmeans_pixels[id][j].alpha+=QuantumScale*GetPixelAlpha(image,q); if (image->colorspace == CMYKColorspace) kmeans_pixels[id][j].black+=QuantumScale*GetPixelBlack(image,q); kmeans_pixels[id][j].count++; kmeans_pixels[id][j].distortion+=min_distance; SetPixelIndex(image,(Quantum) j,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } if (status == MagickFalse) break; /* Reduce sums to [0] entry. */ for (i=1; i < (ssize_t) number_threads; i++) { ssize_t j; for (j=0; j < (ssize_t) image->colors; j++) { kmeans_pixels[0][j].red+=kmeans_pixels[i][j].red; kmeans_pixels[0][j].green+=kmeans_pixels[i][j].green; kmeans_pixels[0][j].blue+=kmeans_pixels[i][j].blue; if (image->alpha_trait != UndefinedPixelTrait) kmeans_pixels[0][j].alpha+=kmeans_pixels[i][j].alpha; if (image->colorspace == CMYKColorspace) kmeans_pixels[0][j].black+=kmeans_pixels[i][j].black; kmeans_pixels[0][j].count+=kmeans_pixels[i][j].count; kmeans_pixels[0][j].distortion+=kmeans_pixels[i][j].distortion; } } /* Calculate the new means (centroids) of the pixels in the new clusters. */ distortion=0.0; for (i=0; i < (ssize_t) image->colors; i++) { double gamma; gamma=PerceptibleReciprocal((double) kmeans_pixels[0][i].count); image->colormap[i].red=gamma*QuantumRange*kmeans_pixels[0][i].red; image->colormap[i].green=gamma*QuantumRange*kmeans_pixels[0][i].green; image->colormap[i].blue=gamma*QuantumRange*kmeans_pixels[0][i].blue; if (image->alpha_trait != UndefinedPixelTrait) image->colormap[i].alpha=gamma*QuantumRange*kmeans_pixels[0][i].alpha; if (image->colorspace == CMYKColorspace) image->colormap[i].black=gamma*QuantumRange*kmeans_pixels[0][i].black; distortion+=kmeans_pixels[0][i].distortion; } if (verbose != MagickFalse) (void) FormatLocaleFile(stderr,"distortion[%.20g]: %*g %*g\n",(double) n, GetMagickPrecision(),distortion,GetMagickPrecision(), fabs(distortion-previous_tolerance)); if (fabs(distortion-previous_tolerance) <= tolerance) break; previous_tolerance=distortion; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n, max_iterations); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels); if (image->progress_monitor != (MagickProgressMonitor) NULL) (void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType) max_iterations-1,max_iterations); if (status == MagickFalse) return(status); return(SyncImage(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const DitherMethod dither_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither_method: choose from UndefinedDitherMethod, NoDitherMethod, % RiemersmaDitherMethod, FloydSteinbergDitherMethod. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const DitherMethod dither_method,ExceptionInfo *exception) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \ MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) PosterizePixel(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) PosterizePixel(image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) PosterizePixel(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) PosterizePixel(image->colormap[i].alpha); } /* Posterize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither_method=dither_method; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.alpha+=node_info->total_color.alpha; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image,ExceptionInfo *exception) { CubeInfo *cube_info; ImageType type; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; type=IdentifyImageType(image,exception); if (IsGrayImageType(type) != MagickFalse) (void) SetGrayscaleImage(image,exception); depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2)) depth--; if ((image->alpha_trait != UndefinedPixelTrait) && (depth > 5)) depth--; if (IsGrayImageType(type) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; size_t depth, maximum_colors, number_images; ssize_t i; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images,exception); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither_method != NoDitherMethod) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % double *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset,double *quantize_error) { size_t n, number_children; ssize_t i; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int QuantizeErrorCompare(const void *error_p,const void *error_q) { double *p, *q; p=(double *) error_p; q=(double *) error_q; if (*p > *q) return(1); if (fabs(*q-*p) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { double *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (double *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(double), QuantizeErrorCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(double *) RelinquishMagickMemory(quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest of the colors % from the reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images,exception); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelInfo *color_1, *color_2; color_1=(PixelInfo *) x; color_2=(PixelInfo *) y; intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)- GetPixelInfoIntensity((const Image *) NULL,color_2); if (intensity < (double) INT_MIN) intensity=(double) INT_MIN; if (intensity > (double) INT_MAX) intensity=(double) INT_MAX; return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; size_t extent; ssize_t *colormap_index, i, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1)); colormap_index=(ssize_t *) AcquireQuantumMemory(extent, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) memset(colormap_index,(-1),extent*sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } (void) memset(colormap_index,0,extent*sizeof(*colormap_index)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColormap() traverses the color cube tree and sets the colormap of % the image. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the SetImageColormap method is: % % MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info, % ExceptionInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { size_t number_colors; number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; DefineImageColormap(image,cube_info,cube_info->root); if (image->colors != number_colors) { image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap, image->colors+1,sizeof(*image->colormap)); if (image->colormap == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } return(MagickTrue); }
convolution_1x1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_neon(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); Mat out2 = top_blob.channel(p + 2); Mat out3 = top_blob.channel(p + 3); Mat out4 = top_blob.channel(p + 4); Mat out5 = top_blob.channel(p + 5); Mat out6 = top_blob.channel(p + 6); Mat out7 = top_blob.channel(p + 7); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; const float bias2 = bias ? bias[p + 2] : 0.f; const float bias3 = bias ? bias[p + 3] : 0.f; const float bias4 = bias ? bias[p + 4] : 0.f; const float bias5 = bias ? bias[p + 5] : 0.f; const float bias6 = bias ? bias[p + 6] : 0.f; const float bias7 = bias ? bias[p + 7] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); out4.fill(bias4); out5.fill(bias5); out6.fill(bias6); out7.fill(bias7); int q = 0; for (; q + 7 < inch; q += 8) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; float* outptr6 = out6; float* outptr7 = out7; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q + 1); const float* img2 = bottom_blob.channel(q + 2); const float* img3 = bottom_blob.channel(q + 3); const float* img4 = bottom_blob.channel(q + 4); const float* img5 = bottom_blob.channel(q + 5); const float* img6 = bottom_blob.channel(q + 6); const float* img7 = bottom_blob.channel(q + 7); const float* kernel0 = kernel + p * inch + q; const float* kernel1 = kernel + (p + 1) * inch + q; const float* kernel2 = kernel + (p + 2) * inch + q; const float* kernel3 = kernel + (p + 3) * inch + q; const float* kernel4 = kernel + (p + 4) * inch + q; const float* kernel5 = kernel + (p + 5) * inch + q; const float* kernel6 = kernel + (p + 6) * inch + q; const float* kernel7 = kernel + (p + 7) * inch + q; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; const float* r4 = img4; const float* r5 = img5; const float* r6 = img6; const float* r7 = img7; int size = outw * outh; int nn = size >> 2; int remain = size & 3; float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); float32x4_t _k2 = vld1q_f32(kernel2); float32x4_t _k3 = vld1q_f32(kernel3); float32x4_t _k4 = vld1q_f32(kernel4); float32x4_t _k5 = vld1q_f32(kernel5); float32x4_t _k6 = vld1q_f32(kernel6); float32x4_t _k7 = vld1q_f32(kernel7); float32x4_t _k0n = vld1q_f32(kernel0 + 4); float32x4_t _k1n = vld1q_f32(kernel1 + 4); float32x4_t _k2n = vld1q_f32(kernel2 + 4); float32x4_t _k3n = vld1q_f32(kernel3 + 4); float32x4_t _k4n = vld1q_f32(kernel4 + 4); float32x4_t _k5n = vld1q_f32(kernel5 + 4); float32x4_t _k6n = vld1q_f32(kernel6 + 4); float32x4_t _k7n = vld1q_f32(kernel7 + 4); #ifdef __clang__ // gcc reject over 30 oprands :( if (nn > 0) { asm volatile( "prfm pldl1keep, [%9, #128] \n" "ld1 {v17.4s}, [%9], #16 \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v18.4s}, [%1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v19.4s}, [%2] \n" "0: \n" "fmla v18.4s, v17.4s, %34.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v20.4s}, [%3] \n" "fmla v19.4s, v17.4s, %35.s[0] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v21.4s}, [%4] \n" "fmla v20.4s, v17.4s, %36.s[0] \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v22.4s}, [%5] \n" "fmla v21.4s, v17.4s, %37.s[0] \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v23.4s}, [%6] \n" "fmla v22.4s, v17.4s, %38.s[0] \n" "prfm pldl1keep, [%10, #128] \n" "ld1 {v16.4s}, [%10], #16 \n" "fmla v23.4s, v17.4s, %39.s[0] \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v24.4s}, [%7] \n" "fmla v18.4s, v16.4s, %34.s[1] \n" "fmla v19.4s, v16.4s, %35.s[1] \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v25.4s}, [%8] \n" "fmla v24.4s, v17.4s, %40.s[0] \n" "fmla v25.4s, v17.4s, %41.s[0] \n" "fmla v20.4s, v16.4s, %36.s[1] \n" "fmla v21.4s, v16.4s, %37.s[1] \n" "prfm pldl1keep, [%11, #128] \n" "ld1 {v17.4s}, [%11], #16 \n" "fmla v22.4s, v16.4s, %38.s[1] \n" "fmla v23.4s, v16.4s, %39.s[1] \n" "fmla v18.4s, v17.4s, %34.s[2] \n" "fmla v19.4s, v17.4s, %35.s[2] \n" "fmla v24.4s, v16.4s, %40.s[1] \n" "fmla v25.4s, v16.4s, %41.s[1] \n" "fmla v20.4s, v17.4s, %36.s[2] \n" "fmla v21.4s, v17.4s, %37.s[2] \n" "prfm pldl1keep, [%12, #128] \n" "ld1 {v16.4s}, [%12], #16 \n" "fmla v22.4s, v17.4s, %38.s[2] \n" "fmla v23.4s, v17.4s, %39.s[2] \n" "fmla v18.4s, v16.4s, %34.s[3] \n" "fmla v19.4s, v16.4s, %35.s[3] \n" "fmla v24.4s, v17.4s, %40.s[2] \n" "fmla v25.4s, v17.4s, %41.s[2] \n" "fmla v20.4s, v16.4s, %36.s[3] \n" "fmla v21.4s, v16.4s, %37.s[3] \n" "prfm pldl1keep, [%13, #128] \n" "ld1 {v17.4s}, [%13], #16 \n" "fmla v22.4s, v16.4s, %38.s[3] \n" "fmla v23.4s, v16.4s, %39.s[3] \n" "fmla v18.4s, v17.4s, %42.s[0] \n" "fmla v19.4s, v17.4s, %43.s[0] \n" "fmla v24.4s, v16.4s, %40.s[3] \n" "fmla v25.4s, v16.4s, %41.s[3] \n" "fmla v20.4s, v17.4s, %44.s[0] \n" "fmla v21.4s, v17.4s, %45.s[0] \n" "prfm pldl1keep, [%14, #128] \n" "ld1 {v16.4s}, [%14], #16 \n" "fmla v22.4s, v17.4s, %46.s[0] \n" "fmla v23.4s, v17.4s, %47.s[0] \n" "fmla v18.4s, v16.4s, %42.s[1] \n" "fmla v19.4s, v16.4s, %43.s[1] \n" "fmla v24.4s, v17.4s, %48.s[0] \n" "fmla v25.4s, v17.4s, %49.s[0] \n" "fmla v20.4s, v16.4s, %44.s[1] \n" "fmla v21.4s, v16.4s, %45.s[1] \n" "prfm pldl1keep, [%15, #128] \n" "ld1 {v17.4s}, [%15], #16 \n" "fmla v22.4s, v16.4s, %46.s[1] \n" "fmla v23.4s, v16.4s, %47.s[1] \n" "fmla v18.4s, v17.4s, %42.s[2] \n" "fmla v19.4s, v17.4s, %43.s[2] \n" "fmla v24.4s, v16.4s, %48.s[1] \n" "fmla v25.4s, v16.4s, %49.s[1] \n" "fmla v20.4s, v17.4s, %44.s[2] \n" "fmla v21.4s, v17.4s, %45.s[2] \n" "prfm pldl1keep, [%16, #128] \n" "ld1 {v16.4s}, [%16], #16 \n" "fmla v22.4s, v17.4s, %46.s[2] \n" "fmla v23.4s, v17.4s, %47.s[2] \n" "fmla v18.4s, v16.4s, %42.s[3] \n" "fmla v19.4s, v16.4s, %43.s[3] \n" "fmla v24.4s, v17.4s, %48.s[2] \n" "fmla v25.4s, v17.4s, %49.s[2] \n" "fmla v20.4s, v16.4s, %44.s[3] \n" "fmla v21.4s, v16.4s, %45.s[3] \n" "st1 {v18.4s}, [%1], #16 \n" "fmla v22.4s, v16.4s, %46.s[3] \n" "st1 {v19.4s}, [%2], #16 \n" "fmla v23.4s, v16.4s, %47.s[3] \n" "st1 {v20.4s}, [%3], #16 \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v17.4s}, [%9], #16 \n" "fmla v24.4s, v16.4s, %48.s[3] \n" "st1 {v21.4s}, [%4], #16 \n" "fmla v25.4s, v16.4s, %49.s[3] \n" "st1 {v22.4s}, [%5], #16 \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v18.4s}, [%1] \n" "st1 {v23.4s}, [%6], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v19.4s}, [%2] \n" "st1 {v24.4s}, [%7], #16 \n" "subs %w0, %w0, #1 \n" "st1 {v25.4s}, [%8], #16 \n" "bne 0b \n" "sub %9, %9, #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(r3), // %12 "=r"(r4), // %13 "=r"(r5), // %14 "=r"(r6), // %15 "=r"(r7) // %16 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(r3), "13"(r4), "14"(r5), "15"(r6), "16"(r7), "w"(_k0), // %34 "w"(_k1), // %35 "w"(_k2), // %36 "w"(_k3), // %37 "w"(_k4), // %38 "w"(_k5), // %39 "w"(_k6), // %40 "w"(_k7), // %41 "w"(_k0n), // %42 "w"(_k1n), // %43 "w"(_k2n), // %44 "w"(_k3n), // %45 "w"(_k4n), // %46 "w"(_k5n), // %47 "w"(_k6n), // %48 "w"(_k7n) // %49 : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25" //, "v26", "v27", "v28", "v29", "v30", "v31" ); } #else for (; nn > 0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _out0p = vld1q_f32(outptr0); float32x4_t _out1p = vld1q_f32(outptr1); float32x4_t _out2p = vld1q_f32(outptr2); float32x4_t _out3p = vld1q_f32(outptr3); float32x4_t _out4p = vld1q_f32(outptr4); float32x4_t _out5p = vld1q_f32(outptr5); float32x4_t _out6p = vld1q_f32(outptr6); float32x4_t _out7p = vld1q_f32(outptr7); _out0p = vfmaq_laneq_f32(_out0p, _p, _k0, 0); _out1p = vfmaq_laneq_f32(_out1p, _p, _k1, 0); _out2p = vfmaq_laneq_f32(_out2p, _p, _k2, 0); _out3p = vfmaq_laneq_f32(_out3p, _p, _k3, 0); _out4p = vfmaq_laneq_f32(_out4p, _p, _k4, 0); _out5p = vfmaq_laneq_f32(_out5p, _p, _k5, 0); _out6p = vfmaq_laneq_f32(_out6p, _p, _k6, 0); _out7p = vfmaq_laneq_f32(_out7p, _p, _k7, 0); float32x4_t _p1 = vld1q_f32(r1); _out0p = vfmaq_laneq_f32(_out0p, _p1, _k0, 1); _out1p = vfmaq_laneq_f32(_out1p, _p1, _k1, 1); _out2p = vfmaq_laneq_f32(_out2p, _p1, _k2, 1); _out3p = vfmaq_laneq_f32(_out3p, _p1, _k3, 1); _out4p = vfmaq_laneq_f32(_out4p, _p1, _k4, 1); _out5p = vfmaq_laneq_f32(_out5p, _p1, _k5, 1); _out6p = vfmaq_laneq_f32(_out6p, _p1, _k6, 1); _out7p = vfmaq_laneq_f32(_out7p, _p1, _k7, 1); float32x4_t _p2 = vld1q_f32(r2); _out0p = vfmaq_laneq_f32(_out0p, _p2, _k0, 2); _out1p = vfmaq_laneq_f32(_out1p, _p2, _k1, 2); _out2p = vfmaq_laneq_f32(_out2p, _p2, _k2, 2); _out3p = vfmaq_laneq_f32(_out3p, _p2, _k3, 2); _out4p = vfmaq_laneq_f32(_out4p, _p2, _k4, 2); _out5p = vfmaq_laneq_f32(_out5p, _p2, _k5, 2); _out6p = vfmaq_laneq_f32(_out6p, _p2, _k6, 2); _out7p = vfmaq_laneq_f32(_out7p, _p2, _k7, 2); float32x4_t _p3 = vld1q_f32(r3); _out0p = vfmaq_laneq_f32(_out0p, _p3, _k0, 3); _out1p = vfmaq_laneq_f32(_out1p, _p3, _k1, 3); _out2p = vfmaq_laneq_f32(_out2p, _p3, _k2, 3); _out3p = vfmaq_laneq_f32(_out3p, _p3, _k3, 3); _out4p = vfmaq_laneq_f32(_out4p, _p3, _k4, 3); _out5p = vfmaq_laneq_f32(_out5p, _p3, _k5, 3); _out6p = vfmaq_laneq_f32(_out6p, _p3, _k6, 3); _out7p = vfmaq_laneq_f32(_out7p, _p3, _k7, 3); float32x4_t _p4 = vld1q_f32(r4); _out0p = vfmaq_laneq_f32(_out0p, _p4, _k0n, 0); _out1p = vfmaq_laneq_f32(_out1p, _p4, _k1n, 0); _out2p = vfmaq_laneq_f32(_out2p, _p4, _k2n, 0); _out3p = vfmaq_laneq_f32(_out3p, _p4, _k3n, 0); _out4p = vfmaq_laneq_f32(_out4p, _p4, _k4n, 0); _out5p = vfmaq_laneq_f32(_out5p, _p4, _k5n, 0); _out6p = vfmaq_laneq_f32(_out6p, _p4, _k6n, 0); _out7p = vfmaq_laneq_f32(_out7p, _p4, _k7n, 0); float32x4_t _p5 = vld1q_f32(r5); _out0p = vfmaq_laneq_f32(_out0p, _p5, _k0n, 1); _out1p = vfmaq_laneq_f32(_out1p, _p5, _k1n, 1); _out2p = vfmaq_laneq_f32(_out2p, _p5, _k2n, 1); _out3p = vfmaq_laneq_f32(_out3p, _p5, _k3n, 1); _out4p = vfmaq_laneq_f32(_out4p, _p5, _k4n, 1); _out5p = vfmaq_laneq_f32(_out5p, _p5, _k5n, 1); _out6p = vfmaq_laneq_f32(_out6p, _p5, _k6n, 1); _out7p = vfmaq_laneq_f32(_out7p, _p5, _k7n, 1); float32x4_t _p6 = vld1q_f32(r6); _out0p = vfmaq_laneq_f32(_out0p, _p6, _k0n, 2); _out1p = vfmaq_laneq_f32(_out1p, _p6, _k1n, 2); _out2p = vfmaq_laneq_f32(_out2p, _p6, _k2n, 2); _out3p = vfmaq_laneq_f32(_out3p, _p6, _k3n, 2); _out4p = vfmaq_laneq_f32(_out4p, _p6, _k4n, 2); _out5p = vfmaq_laneq_f32(_out5p, _p6, _k5n, 2); _out6p = vfmaq_laneq_f32(_out6p, _p6, _k6n, 2); _out7p = vfmaq_laneq_f32(_out7p, _p6, _k7n, 2); float32x4_t _p7 = vld1q_f32(r7); _out0p = vfmaq_laneq_f32(_out0p, _p7, _k0n, 3); _out1p = vfmaq_laneq_f32(_out1p, _p7, _k1n, 3); _out2p = vfmaq_laneq_f32(_out2p, _p7, _k2n, 3); _out3p = vfmaq_laneq_f32(_out3p, _p7, _k3n, 3); _out4p = vfmaq_laneq_f32(_out4p, _p7, _k4n, 3); _out5p = vfmaq_laneq_f32(_out5p, _p7, _k5n, 3); _out6p = vfmaq_laneq_f32(_out6p, _p7, _k6n, 3); _out7p = vfmaq_laneq_f32(_out7p, _p7, _k7n, 3); vst1q_f32(outptr0, _out0p); vst1q_f32(outptr1, _out1p); vst1q_f32(outptr2, _out2p); vst1q_f32(outptr3, _out3p); vst1q_f32(outptr4, _out4p); vst1q_f32(outptr5, _out5p); vst1q_f32(outptr6, _out6p); vst1q_f32(outptr7, _out7p); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; r6 += 4; r7 += 4; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; outptr4 += 4; outptr5 += 4; outptr6 += 4; outptr7 += 4; } #endif for (; remain > 0; remain--) { // TODO neon optimize float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3] + *r4 * kernel0[4] + *r5 * kernel0[5] + *r6 * kernel0[6] + *r7 * kernel0[7]; float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3] + *r4 * kernel1[4] + *r5 * kernel1[5] + *r6 * kernel1[6] + *r7 * kernel1[7]; float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3] + *r4 * kernel2[4] + *r5 * kernel2[5] + *r6 * kernel2[6] + *r7 * kernel2[7]; float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3] + *r4 * kernel3[4] + *r5 * kernel3[5] + *r6 * kernel3[6] + *r7 * kernel3[7]; float sum4 = *r0 * kernel4[0] + *r1 * kernel4[1] + *r2 * kernel4[2] + *r3 * kernel4[3] + *r4 * kernel4[4] + *r5 * kernel4[5] + *r6 * kernel4[6] + *r7 * kernel4[7]; float sum5 = *r0 * kernel5[0] + *r1 * kernel5[1] + *r2 * kernel5[2] + *r3 * kernel5[3] + *r4 * kernel5[4] + *r5 * kernel5[5] + *r6 * kernel5[6] + *r7 * kernel5[7]; float sum6 = *r0 * kernel6[0] + *r1 * kernel6[1] + *r2 * kernel6[2] + *r3 * kernel6[3] + *r4 * kernel6[4] + *r5 * kernel6[5] + *r6 * kernel6[6] + *r7 * kernel6[7]; float sum7 = *r0 * kernel7[0] + *r1 * kernel7[1] + *r2 * kernel7[2] + *r3 * kernel7[3] + *r4 * kernel7[4] + *r5 * kernel7[5] + *r6 * kernel7[6] + *r7 * kernel7[7]; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; r0++; r1++; r2++; r3++; r4++; r5++; r6++; r7++; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; } } for (; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; float* outptr6 = out6; float* outptr7 = out7; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch + q; const float* kernel1 = kernel + (p + 1) * inch + q; const float* kernel2 = kernel + (p + 2) * inch + q; const float* kernel3 = kernel + (p + 3) * inch + q; const float* kernel4 = kernel + (p + 4) * inch + q; const float* kernel5 = kernel + (p + 5) * inch + q; const float* kernel6 = kernel + (p + 6) * inch + q; const float* kernel7 = kernel + (p + 7) * inch + q; const float k0 = kernel0[0]; const float k1 = kernel1[0]; const float k2 = kernel2[0]; const float k3 = kernel3[0]; const float k4 = kernel4[0]; const float k5 = kernel5[0]; const float k6 = kernel6[0]; const float k7 = kernel7[0]; const float* r0 = img0; int size = outw * outh; int nn = size >> 2; int remain = size & 3; float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); float32x4_t _k4 = vdupq_n_f32(k4); float32x4_t _k5 = vdupq_n_f32(k5); float32x4_t _k6 = vdupq_n_f32(k6); float32x4_t _k7 = vdupq_n_f32(k7); for (; nn > 0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _out0p = vld1q_f32(outptr0); float32x4_t _out1p = vld1q_f32(outptr1); float32x4_t _out2p = vld1q_f32(outptr2); float32x4_t _out3p = vld1q_f32(outptr3); float32x4_t _out4p = vld1q_f32(outptr4); float32x4_t _out5p = vld1q_f32(outptr5); float32x4_t _out6p = vld1q_f32(outptr6); float32x4_t _out7p = vld1q_f32(outptr7); _out0p = vfmaq_f32(_out0p, _p, _k0); _out1p = vfmaq_f32(_out1p, _p, _k1); _out2p = vfmaq_f32(_out2p, _p, _k2); _out3p = vfmaq_f32(_out3p, _p, _k3); _out4p = vfmaq_f32(_out4p, _p, _k4); _out5p = vfmaq_f32(_out5p, _p, _k5); _out6p = vfmaq_f32(_out6p, _p, _k6); _out7p = vfmaq_f32(_out7p, _p, _k7); vst1q_f32(outptr0, _out0p); vst1q_f32(outptr1, _out1p); vst1q_f32(outptr2, _out2p); vst1q_f32(outptr3, _out3p); vst1q_f32(outptr4, _out4p); vst1q_f32(outptr5, _out5p); vst1q_f32(outptr6, _out6p); vst1q_f32(outptr7, _out7p); r0 += 4; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; outptr4 += 4; outptr5 += 4; outptr6 += 4; outptr7 += 4; } for (; remain > 0; remain--) { // TODO neon optimize float sum0 = *r0 * k0; float sum1 = *r0 * k1; float sum2 = *r0 * k2; float sum3 = *r0 * k3; float sum4 = *r0 * k4; float sum5 = *r0 * k5; float sum6 = *r0 * k6; float sum7 = *r0 * k7; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; r0++; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; } } } #else nn_outch = outch / 6; remain_outch_start = nn_outch * 6; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 6; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); Mat out2 = top_blob.channel(p + 2); Mat out3 = top_blob.channel(p + 3); Mat out4 = top_blob.channel(p + 4); Mat out5 = top_blob.channel(p + 5); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; const float bias2 = bias ? bias[p + 2] : 0.f; const float bias3 = bias ? bias[p + 3] : 0.f; const float bias4 = bias ? bias[p + 4] : 0.f; const float bias5 = bias ? bias[p + 5] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); out4.fill(bias4); out5.fill(bias5); int q = 0; for (; q + 3 < inch; q += 4) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q + 1); const float* img2 = bottom_blob.channel(q + 2); const float* img3 = bottom_blob.channel(q + 3); const float* kernel0 = kernel + p * inch + q; const float* kernel1 = kernel + (p + 1) * inch + q; const float* kernel2 = kernel + (p + 2) * inch + q; const float* kernel3 = kernel + (p + 3) * inch + q; const float* kernel4 = kernel + (p + 4) * inch + q; const float* kernel5 = kernel + (p + 5) * inch + q; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; #if __ARM_NEON int nn = size >> 2; int remain = size & 3; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); float32x4_t _k2 = vld1q_f32(kernel2); float32x4_t _k3 = vld1q_f32(kernel3); float32x4_t _k4 = vld1q_f32(kernel4); float32x4_t _k5 = vld1q_f32(kernel5); if (nn > 0) { asm volatile( "pld [%7, #128] \n" "vld1.f32 {d24-d25}, [%7 :128]! \n" // q12 = r0 "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :128] \n" // q6 = outptr0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :128] \n" // q7 = outptr1 "vmla.f32 q6, q12, %e22[0] \n" "0: \n" "pld [%3, #128] \n" "vld1.f32 {d16-d17}, [%3 :128] \n" // q8 = outptr2 "vmla.f32 q7, q12, %e23[0] \n" "pld [%4, #128] \n" "vld1.f32 {d18-d19}, [%4 :128] \n" // q9 = outptr3 "vmla.f32 q8, q12, %e24[0] \n" "pld [%8, #128] \n" "vld1.f32 {d26-d27}, [%8 :128]! \n" // q13 = r1 "vmla.f32 q9, q12, %e25[0] \n" "pld [%5, #128] \n" "vld1.f32 {d20-d21}, [%5 :128] \n" // q10 = outptr4 "vmla.f32 q6, q13, %e22[1] \n" "vmla.f32 q7, q13, %e23[1] \n" "pld [%6, #128] \n" "vld1.f32 {d22-d23}, [%6 :128] \n" // q11 = outptr5 "vmla.f32 q10, q12, %e26[0] \n" "vmla.f32 q11, q12, %e27[0] \n" "vmla.f32 q8, q13, %e24[1] \n" "vmla.f32 q9, q13, %e25[1] \n" "pld [%9, #128] \n" "vld1.f32 {d28-d29}, [%9 :128]! \n" // q14 = r2 "vmla.f32 q10, q13, %e26[1] \n" "vmla.f32 q11, q13, %e27[1] \n" "vmla.f32 q6, q14, %f22[0] \n" "vmla.f32 q7, q14, %f23[0] \n" "vmla.f32 q8, q14, %f24[0] \n" "vmla.f32 q9, q14, %f25[0] \n" "pld [%10, #128] \n" "vld1.f32 {d30-d31}, [%10 :128]! \n" // q15 = r3 "vmla.f32 q10, q14, %f26[0] \n" "vmla.f32 q11, q14, %f27[0] \n" "vmla.f32 q6, q15, %f22[1] \n" "vmla.f32 q7, q15, %f23[1] \n" "vmla.f32 q8, q15, %f24[1] \n" "vmla.f32 q9, q15, %f25[1] \n" "pld [%7, #128] \n" "vld1.f32 {d24-d25}, [%7 :128]! \n" // q12 = r0 "vmla.f32 q10, q15, %f26[1] \n" "vmla.f32 q11, q15, %f27[1] \n" "vst1.f32 {d12-d13}, [%1 :128]! \n" "vst1.f32 {d14-d15}, [%2 :128]! \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :128] \n" // q6 = outptr0 "vst1.f32 {d16-d17}, [%3 :128]! \n" "vst1.f32 {d18-d19}, [%4 :128]! \n" "vmla.f32 q6, q12, %e22[0] \n" "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :128] \n" // q7 = outptr1 "subs %0, #1 \n" "vst1.f32 {d20-d21}, [%5 :128]! \n" "vst1.f32 {d22-d23}, [%6 :128]! \n" "bne 0b \n" "sub %7, #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(r0), // %7 "=r"(r1), // %8 "=r"(r2), // %9 "=r"(r3) // %10 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(r0), "8"(r1), "9"(r2), "10"(r3), "w"(_k0), // %22 "w"(_k1), // %23 "w"(_k2), // %24 "w"(_k3), // %25 "w"(_k4), // %26 "w"(_k5) // %27 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __ARM_NEON for (; remain > 0; remain--) { // TODO neon optimize float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3]; float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3]; float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3]; float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3]; float sum4 = *r0 * kernel4[0] + *r1 * kernel4[1] + *r2 * kernel4[2] + *r3 * kernel4[3]; float sum5 = *r0 * kernel5[0] + *r1 * kernel5[1] + *r2 * kernel5[2] + *r3 * kernel5[3]; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; r0++; r1++; r2++; r3++; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; } } for (; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch + q; const float* kernel1 = kernel + (p + 1) * inch + q; const float* kernel2 = kernel + (p + 2) * inch + q; const float* kernel3 = kernel + (p + 3) * inch + q; const float* kernel4 = kernel + (p + 4) * inch + q; const float* kernel5 = kernel + (p + 5) * inch + q; const float k0 = kernel0[0]; const float k1 = kernel1[0]; const float k2 = kernel2[0]; const float k3 = kernel3[0]; const float k4 = kernel4[0]; const float k5 = kernel5[0]; const float* r0 = img0; int size = outw * outh; #if __ARM_NEON int nn = size >> 2; int remain = size & 3; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); float32x4_t _k4 = vdupq_n_f32(k4); float32x4_t _k5 = vdupq_n_f32(k5); if (nn > 0) { asm volatile( "pld [%7, #128] \n" "vld1.f32 {d24-d25}, [%7 :128]! \n" // q12 = r0 "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :128] \n" // q6 = outptr0 "0: \n" "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :128] \n" // q7 = outptr1 "vmla.f32 q6, q12, %q16 \n" "pld [%3, #128] \n" "vld1.f32 {d16-d17}, [%3 :128] \n" // q8 = outptr2 "vmla.f32 q7, q12, %q17 \n" "pld [%4, #128] \n" "vld1.f32 {d18-d19}, [%4 :128] \n" // q9 = outptr3 "vmla.f32 q8, q12, %q18 \n" "pld [%5, #128] \n" "vld1.f32 {d20-d21}, [%5 :128] \n" // q10 = outptr4 "vmla.f32 q9, q12, %q19 \n" "pld [%6, #128] \n" "vld1.f32 {d22-d23}, [%6 :128] \n" // q11 = outptr5 "vmla.f32 q10, q12, %q20 \n" "vmla.f32 q11, q12, %q21 \n" "pld [%7, #128] \n" "vld1.f32 {d24-d25}, [%7 :128]! \n" // q12 = r0 "vst1.f32 {d12-d13}, [%1 :128]! \n" "vst1.f32 {d14-d15}, [%2 :128]! \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :128] \n" // q6 = outptr0 "vst1.f32 {d16-d17}, [%3 :128]! \n" "vst1.f32 {d18-d19}, [%4 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d20-d21}, [%5 :128]! \n" "vst1.f32 {d22-d23}, [%6 :128]! \n" "bne 0b \n" "sub %7, #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(r0) // %7 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(r0), "w"(_k0), // %16 "w"(_k1), // %17 "w"(_k2), // %18 "w"(_k3), // %19 "w"(_k4), // %20 "w"(_k5) // %21 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12"); } #endif // __ARM_NEON for (; remain > 0; remain--) { // TODO neon optimize float sum0 = *r0 * k0; float sum1 = *r0 * k1; float sum2 = *r0 * k2; float sum3 = *r0 * k3; float sum4 = *r0 * k4; float sum5 = *r0 * k5; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; r0++; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; } } } #endif // __ARM_NEON && __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); Mat out2 = top_blob.channel(p + 2); Mat out3 = top_blob.channel(p + 3); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; const float bias2 = bias ? bias[p + 2] : 0.f; const float bias3 = bias ? bias[p + 3] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); int q = 0; for (; q + 3 < inch; q += 4) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q + 1); const float* img2 = bottom_blob.channel(q + 2); const float* img3 = bottom_blob.channel(q + 3); const float* kernel0 = kernel + p * inch + q; const float* kernel1 = kernel + (p + 1) * inch + q; const float* kernel2 = kernel + (p + 2) * inch + q; const float* kernel3 = kernel + (p + 3) * inch + q; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); float32x4_t _k2 = vld1q_f32(kernel2); float32x4_t _k3 = vld1q_f32(kernel3); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%5, #256] \n" "ld1 {v6.4s, v7.4s}, [%5], #32 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "0: \n" "fmla v8.4s, v6.4s, %18.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" "fmla v9.4s, v7.4s, %18.s[0] \n" "fmla v10.4s, v6.4s, %19.s[0] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" "fmla v11.4s, v7.4s, %19.s[0] \n" "fmla v12.4s, v6.4s, %20.s[0] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v14.4s, v15.4s}, [%4] \n" "fmla v13.4s, v7.4s, %20.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4s, v5.4s}, [%6], #32 \n" "fmla v14.4s, v6.4s, %21.s[0] \n" "fmla v15.4s, v7.4s, %21.s[0] \n" "fmla v8.4s, v4.4s, %18.s[1] \n" "fmla v9.4s, v5.4s, %18.s[1] \n" "fmla v10.4s, v4.4s, %19.s[1] \n" "fmla v11.4s, v5.4s, %19.s[1] \n" "fmla v12.4s, v4.4s, %20.s[1] \n" "fmla v13.4s, v5.4s, %20.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v6.4s, v7.4s}, [%7], #32 \n" "fmla v14.4s, v4.4s, %21.s[1] \n" "fmla v15.4s, v5.4s, %21.s[1] \n" "fmla v8.4s, v6.4s, %18.s[2] \n" "fmla v9.4s, v7.4s, %18.s[2] \n" "fmla v10.4s, v6.4s, %19.s[2] \n" "fmla v11.4s, v7.4s, %19.s[2] \n" "fmla v12.4s, v6.4s, %20.s[2] \n" "fmla v13.4s, v7.4s, %20.s[2] \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v4.4s, v5.4s}, [%8], #32 \n" "fmla v14.4s, v6.4s, %21.s[2] \n" "fmla v15.4s, v7.4s, %21.s[2] \n" "fmla v8.4s, v4.4s, %18.s[3] \n" "fmla v9.4s, v5.4s, %18.s[3] \n" "fmla v10.4s, v4.4s, %19.s[3] \n" "fmla v11.4s, v5.4s, %19.s[3] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "fmla v12.4s, v4.4s, %20.s[3] \n" "fmla v13.4s, v5.4s, %20.s[3] \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v6.4s, v7.4s}, [%5], #32 \n" "fmla v14.4s, v4.4s, %21.s[3] \n" "fmla v15.4s, v5.4s, %21.s[3] \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "bne 0b \n" "sub %5, %5, #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k0), // %18 "w"(_k1), // %19 "w"(_k2), // %20 "w"(_k3) // %21 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%5, #256] \n" "vld1.f32 {d12-d15}, [%5 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n" "0: \n" "vmla.f32 q8, q6, %e18[0] \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n" "vmla.f32 q9, q7, %e18[0] \n" "vmla.f32 q10, q6, %e19[0] \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128] \n" "vmla.f32 q11, q7, %e19[0] \n" "vmla.f32 q12, q6, %e20[0] \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128] \n" "vmla.f32 q13, q7, %e20[0] \n" "pld [%6, #256] \n" "vld1.f32 {d8-d11}, [%6 :128]! \n" "vmla.f32 q14, q6, %e21[0] \n" "vmla.f32 q15, q7, %e21[0] \n" "vmla.f32 q8, q4, %e18[1] \n" "vmla.f32 q9, q5, %e18[1] \n" "vmla.f32 q10, q4, %e19[1] \n" "vmla.f32 q11, q5, %e19[1] \n" "vmla.f32 q12, q4, %e20[1] \n" "vmla.f32 q13, q5, %e20[1] \n" "pld [%7, #256] \n" "vld1.f32 {d12-d15}, [%7 :128]! \n" "vmla.f32 q14, q4, %e21[1] \n" "vmla.f32 q15, q5, %e21[1] \n" "vmla.f32 q8, q6, %f18[0] \n" "vmla.f32 q9, q7, %f18[0] \n" "vmla.f32 q10, q6, %f19[0] \n" "vmla.f32 q11, q7, %f19[0] \n" "vmla.f32 q12, q6, %f20[0] \n" "vmla.f32 q13, q7, %f20[0] \n" "pld [%8, #256] \n" "vld1.f32 {d8-d11}, [%8 :128]! \n" "vmla.f32 q14, q6, %f21[0] \n" "vmla.f32 q15, q7, %f21[0] \n" "vmla.f32 q8, q4, %f18[1] \n" "vmla.f32 q9, q5, %f18[1] \n" "vmla.f32 q10, q4, %f19[1] \n" "vmla.f32 q11, q5, %f19[1] \n" "vmla.f32 q12, q4, %f20[1] \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "vmla.f32 q13, q5, %f20[1] \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "vmla.f32 q14, q4, %f21[1] \n" "pld [%5, #256] \n" "vld1.f32 {d12-d15}, [%5 :128]! \n" "vmla.f32 q15, q5, %f21[1] \n" "vst1.f32 {d24-d27}, [%3 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n" "subs %0, #1 \n" "vst1.f32 {d28-d31}, [%4 :128]! \n" "bne 0b \n" "sub %5, #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k0), // %18 "w"(_k1), // %19 "w"(_k2), // %20 "w"(_k3) // %21 : "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { // TODO neon optimize float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3]; float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3]; float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3]; float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3]; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; r0++; r1++; r2++; r3++; outptr0++; outptr1++; outptr2++; outptr3++; } } for (; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch + q; const float* kernel1 = kernel + (p + 1) * inch + q; const float* kernel2 = kernel + (p + 2) * inch + q; const float* kernel3 = kernel + (p + 3) * inch + q; const float k0 = kernel0[0]; const float k1 = kernel1[0]; const float k2 = kernel2[0]; const float k3 = kernel3[0]; const float* r0 = img0; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%5, #256] \n" "ld1 {v6.4s, v7.4s}, [%5], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "fmla v8.4s, v6.4s, %12.4s \n" "fmla v9.4s, v7.4s, %12.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" "fmla v10.4s, v6.4s, %13.4s \n" "fmla v11.4s, v7.4s, %13.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" "fmla v12.4s, v6.4s, %14.4s \n" "fmla v13.4s, v7.4s, %14.4s \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v14.4s, v15.4s}, [%4] \n" "fmla v14.4s, v6.4s, %15.4s \n" "fmla v15.4s, v7.4s, %15.4s \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v6.4s, v7.4s}, [%5], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "bne 0b \n" "sub %5, %5, #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(r0) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%5, #256] \n" "vld1.f32 {d12-d15}, [%5 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n" "vmla.f32 q8, q6, %q12 \n" "vmla.f32 q9, q7, %q12 \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n" "vmla.f32 q10, q6, %q13 \n" "vmla.f32 q11, q7, %q13 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128] \n" "vmla.f32 q12, q6, %q14 \n" "vmla.f32 q13, q7, %q14 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128] \n" "vmla.f32 q14, q6, %q15 \n" "vmla.f32 q15, q7, %q15 \n" "vst1.f32 {d24-d27}, [%3 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d12-d15}, [%5 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d28-d31}, [%4 :128]! \n" "bne 0b \n" "sub %5, #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(r0) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { // TODO neon optimize float sum0 = *r0 * k0; float sum1 = *r0 * k1; float sum2 = *r0 * k2; float sum3 = *r0 * k3; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; r0++; outptr0++; outptr1++; outptr2++; outptr3++; } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q + 3 < inch; q += 4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q + 1); const float* img2 = bottom_blob.channel(q + 2); const float* img3 = bottom_blob.channel(q + 3); const float* kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1] \n" "fmla v0.4s, v2.4s, %12.4s \n" "fmla v1.4s, v3.4s, %12.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v2.4s, v3.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %13.4s \n" "fmla v1.4s, v3.4s, %13.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v2.4s, v3.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %14.4s \n" "fmla v1.4s, v3.4s, %14.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v2.4s, v3.4s}, [%5], #32 \n" "fmla v0.4s, v2.4s, %15.4s \n" "fmla v1.4s, v3.4s, %15.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "v0", "v1", "v2", "v3"); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q3, %q12 \n" "pld [%3, #256] \n" "vld1.f32 {d4-d7}, [%3 :128]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q3, %q13 \n" "pld [%4, #256] \n" "vld1.f32 {d4-d7}, [%4 :128]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q3, %q14 \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q3, %q15 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0++; r1++; r2++; r3++; outptr++; } } for (; q < inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float* r0 = img0; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1] \n" "fmla v0.4s, v2.4s, %6.4s \n" "fmla v1.4s, v3.4s, %6.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "v0", "v1", "v2", "v3"); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q3, %q6 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { float sum = *r0 * k0; *outptr += sum; r0++; outptr++; } } } } static void conv1x1s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); Mat out2 = top_blob.channel(p + 2); Mat out3 = top_blob.channel(p + 3); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; const float bias2 = bias ? bias[p + 2] : 0.f; const float bias3 = bias ? bias[p + 3] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); int q = 0; for (; q + 3 < inch; q += 4) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q + 1); const float* img2 = bottom_blob.channel(q + 2); const float* img3 = bottom_blob.channel(q + 3); const float* kernel0 = kernel + p * inch + q; const float* kernel1 = kernel + (p + 1) * inch + q; const float* kernel2 = kernel + (p + 2) * inch + q; const float* kernel3 = kernel + (p + 3) * inch + q; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; for (int i = 0; i < outh; i++) { int size = outw; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); float32x4_t _k2 = vld1q_f32(kernel2); float32x4_t _k3 = vld1q_f32(kernel3); #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld2 {v4.4s, v5.4s}, [%5], #32 \n" "ld2 {v6.4s, v7.4s}, [%5], #32 \n" "and v5.16b, v6.16b, v6.16b \n" // v4 v5 "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "fmla v8.4s, v4.4s, %18.s[0] \n" "fmla v9.4s, v5.4s, %18.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" "fmla v10.4s, v4.4s, %19.s[0] \n" "fmla v11.4s, v5.4s, %19.s[0] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" "fmla v12.4s, v4.4s, %20.s[0] \n" "fmla v13.4s, v5.4s, %20.s[0] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v14.4s, v15.4s}, [%4] \n" "prfm pldl1keep, [%6, #512] \n" "ld2 {v6.4s, v7.4s}, [%6], #32 \n" "fmla v14.4s, v4.4s, %21.s[0] \n" "fmla v15.4s, v5.4s, %21.s[0] \n" "ld2 {v4.4s, v5.4s}, [%6], #32 \n" "and v7.16b, v4.16b, v4.16b \n" // v6 v7 "fmla v8.4s, v6.4s, %18.s[1] \n" "fmla v9.4s, v7.4s, %18.s[1] \n" "fmla v10.4s, v6.4s, %19.s[1] \n" "fmla v11.4s, v7.4s, %19.s[1] \n" "fmla v12.4s, v6.4s, %20.s[1] \n" "fmla v13.4s, v7.4s, %20.s[1] \n" "prfm pldl1keep, [%7, #512] \n" "ld2 {v4.4s, v5.4s}, [%7], #32 \n" "fmla v14.4s, v6.4s, %21.s[1] \n" "fmla v15.4s, v7.4s, %21.s[1] \n" "ld2 {v6.4s, v7.4s}, [%7], #32 \n" "and v5.16b, v6.16b, v6.16b \n" // v4 v5 "fmla v8.4s, v4.4s, %18.s[2] \n" "fmla v9.4s, v5.4s, %18.s[2] \n" "fmla v10.4s, v4.4s, %19.s[2] \n" "fmla v11.4s, v5.4s, %19.s[2] \n" "fmla v12.4s, v4.4s, %20.s[2] \n" "fmla v13.4s, v5.4s, %20.s[2] \n" "prfm pldl1keep, [%8, #512] \n" "ld2 {v6.4s, v7.4s}, [%8], #32 \n" "fmla v14.4s, v4.4s, %21.s[2] \n" "fmla v15.4s, v5.4s, %21.s[2] \n" "ld2 {v4.4s, v5.4s}, [%8], #32 \n" "and v7.16b, v4.16b, v4.16b \n" // v6 v7 "fmla v8.4s, v6.4s, %18.s[3] \n" "fmla v9.4s, v7.4s, %18.s[3] \n" "fmla v10.4s, v6.4s, %19.s[3] \n" "fmla v11.4s, v7.4s, %19.s[3] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "fmla v12.4s, v6.4s, %20.s[3] \n" "fmla v13.4s, v7.4s, %20.s[3] \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "fmla v14.4s, v6.4s, %21.s[3] \n" "fmla v15.4s, v7.4s, %21.s[3] \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k0), // %18 "w"(_k1), // %19 "w"(_k2), // %20 "w"(_k3) // %21 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%5, #512] \n" "vld2.f32 {d8-d11}, [%5]! \n" "vld2.f32 {d12-d15}, [%5]! \n" "vand q5, q6, q6 \n" // q4 q5 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1] \n" "vmla.f32 q8, q4, %e18[0] \n" "vmla.f32 q9, q5, %e18[0] \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n" "vmla.f32 q10, q4, %e19[0] \n" "vmla.f32 q11, q5, %e19[0] \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3] \n" "vmla.f32 q12, q4, %e20[0] \n" "vmla.f32 q13, q5, %e20[0] \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n" "pld [%6, #512] \n" "vld2.f32 {d12-d15}, [%6]! \n" "vmla.f32 q14, q4, %e21[0] \n" "vmla.f32 q15, q5, %e21[0] \n" "vld2.f32 {d8-d11}, [%6]! \n" "vand q7, q4, q4 \n" // q6 q7 "vmla.f32 q8, q6, %e18[1] \n" "vmla.f32 q9, q7, %e18[1] \n" "vmla.f32 q10, q6, %e19[1] \n" "vmla.f32 q11, q7, %e19[1] \n" "vmla.f32 q12, q6, %e20[1] \n" "vmla.f32 q13, q7, %e20[1] \n" "pld [%7, #512] \n" "vld2.f32 {d8-d11}, [%7]! \n" "vmla.f32 q14, q6, %e21[1] \n" "vmla.f32 q15, q7, %e21[1] \n" "vld2.f32 {d12-d15}, [%7]! \n" "vand q5, q6, q6 \n" // q4 q5 "vmla.f32 q8, q4, %f18[0] \n" "vmla.f32 q9, q5, %f18[0] \n" "vmla.f32 q10, q4, %f19[0] \n" "vmla.f32 q11, q5, %f19[0] \n" "vmla.f32 q12, q4, %f20[0] \n" "vmla.f32 q13, q5, %f20[0] \n" "pld [%8, #512] \n" "vld2.f32 {d12-d15}, [%8]! \n" "vmla.f32 q14, q4, %f21[0] \n" "vmla.f32 q15, q5, %f21[0] \n" "vld2.f32 {d8-d11}, [%8]! \n" "vand q7, q4, q4 \n" // q6 q7 "vmla.f32 q8, q6, %f18[1] \n" "vmla.f32 q9, q7, %f18[1] \n" "vmla.f32 q10, q6, %f19[1] \n" "vmla.f32 q11, q7, %f19[1] \n" "vst1.f32 {d16-d19}, [%1]! \n" "vmla.f32 q12, q6, %f20[1] \n" "vmla.f32 q13, q7, %f20[1] \n" "vst1.f32 {d20-d23}, [%2]! \n" "vmla.f32 q14, q6, %f21[1] \n" "vmla.f32 q15, q7, %f21[1] \n" "vst1.f32 {d24-d27}, [%3]! \n" "subs %0, #1 \n" "vst1.f32 {d28-d31}, [%4]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k0), // %18 "w"(_k1), // %19 "w"(_k2), // %20 "w"(_k3) // %21 : "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { // TODO neon optimize float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3]; float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3]; float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3]; float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3]; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr0++; outptr1++; outptr2++; outptr3++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch + q; const float* kernel1 = kernel + (p + 1) * inch + q; const float* kernel2 = kernel + (p + 2) * inch + q; const float* kernel3 = kernel + (p + 3) * inch + q; const float k0 = kernel0[0]; const float k1 = kernel1[0]; const float k2 = kernel2[0]; const float k3 = kernel3[0]; const float* r0 = img0; for (int i = 0; i < outh; i++) { int size = outw; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld2 {v4.4s, v5.4s}, [%5], #32 \n" "ld2 {v6.4s, v7.4s}, [%5], #32 \n" "and v5.16b, v6.16b, v6.16b \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "fmla v8.4s, v4.4s, %12.4s \n" "fmla v9.4s, v5.4s, %12.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" "fmla v10.4s, v4.4s, %13.4s \n" "fmla v11.4s, v5.4s, %13.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "fmla v12.4s, v4.4s, %14.4s \n" "fmla v13.4s, v5.4s, %14.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v14.4s, v15.4s}, [%4] \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "fmla v14.4s, v4.4s, %15.4s \n" "fmla v15.4s, v5.4s, %15.4s \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(r0) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%5, #512] \n" "vld2.f32 {d8-d11}, [%5]! \n" "vld2.f32 {d12-d15}, [%5]! \n" "vand q5, q6, q6 \n" // q4 q5 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1] \n" "vmla.f32 q8, q4, %q12 \n" "vmla.f32 q9, q5, %q12 \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n" "vmla.f32 q10, q4, %q13 \n" "vmla.f32 q11, q5, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3] \n" "vst1.f32 {d16-d19}, [%1]! \n" "vmla.f32 q12, q4, %q14 \n" "vmla.f32 q13, q5, %q14 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n" "vst1.f32 {d20-d23}, [%2]! \n" "vmla.f32 q14, q4, %q15 \n" "vmla.f32 q15, q5, %q15 \n" "vst1.f32 {d24-d27}, [%3]! \n" "subs %0, #1 \n" "vst1.f32 {d28-d31}, [%4]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(r0) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { // TODO neon optimize float sum0 = *r0 * k0; float sum1 = *r0 * k1; float sum2 = *r0 * k2; float sum3 = *r0 * k3; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; r0 += 2; outptr0++; outptr1++; outptr2++; outptr3++; } r0 += tailstep; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q + 3 < inch; q += 4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q + 1); const float* img2 = bottom_blob.channel(q + 2); const float* img3 = bottom_blob.channel(q + 3); const float* kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1] \n" "fmla v0.4s, v2.4s, %12.4s \n" "fmla v1.4s, v8.4s, %12.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %13.4s \n" "fmla v1.4s, v8.4s, %13.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "ld2 {v8.4s, v9.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %14.4s \n" "fmla v1.4s, v8.4s, %14.4s \n" "prfm pldl1keep, [%5, #512] \n" "ld2 {v2.4s, v3.4s}, [%5], #32 \n" "ld2 {v8.4s, v9.4s}, [%5], #32 \n" "fmla v0.4s, v2.4s, %15.4s \n" "fmla v1.4s, v8.4s, %15.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9"); } #else if (nn > 0) { asm volatile( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q8, %q12 \n" "pld [%3, #512] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vld2.f32 {d16-d19}, [%3]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q8, %q13 \n" "pld [%4, #512] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vld2.f32 {d16-d19}, [%4]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q8, %q14 \n" "pld [%5, #512] \n" "vld2.f32 {d4-d7}, [%5]! \n" "vld2.f32 {d16-d19}, [%5]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q8, %q15 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q < inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float* r0 = img0; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1] \n" "fmla v0.4s, v2.4s, %6.4s \n" "fmla v1.4s, v8.4s, %6.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9"); } #else if (nn > 0) { asm volatile( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q8, %q6 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { float sum = *r0 * k0; *outptr += sum; r0 += 2; outptr++; } r0 += tailstep; } } } }
calculate_signed_distance_to_3d_skin_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Daniel Baumgaertner // Johannes Wolf // #if !defined(KRATOS_CALCULATE_DISTANCE_PROCESS_H_INCLUDED ) #define KRATOS_CALCULATE_DISTANCE_PROCESS_H_INCLUDED // System includes #include <string> #include <iostream> #include <ctime> // External includes // Project includes #include "includes/define.h" #include "processes/process.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "spatial_containers/octree_binary.h" #include "utilities/spatial_containers_configure.h" #include "utilities/timer.h" #include "utilities/math_utils.h" #include "utilities/geometry_utilities.h" #include "geometries/triangle_3d_3.h" #include "geometries/quadrilateral_3d_4.h" #include "utilities/body_normal_calculation_utils.h" #include "includes/kratos_flags.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/binbased_nodes_in_element_locator.h" #include "processes/calculate_distance_to_skin_process.h" #ifdef _OPENMP #include "omp.h" #endif using namespace boost::numeric::ublas; namespace Kratos { class DistanceSpatialContainersConfigure { public: class CellNodeData { double mDistance; double mCoordinates[3]; std::size_t mId; public: double& Distance(){return mDistance;} double& X() {return mCoordinates[0];} double& Y() {return mCoordinates[1];} double& Z() {return mCoordinates[2];} double& operator[](int i) {return mCoordinates[i];} std::size_t& Id(){return mId;} }; ///@name Type Definitions ///@{ enum { Dimension = 3, DIMENSION = 3, MAX_LEVEL = 12, MIN_LEVEL = 2 // this cannot be less than 2!!! }; typedef Point PointType; /// always the point 3D typedef std::vector<double>::iterator DistanceIteratorType; typedef ModelPart::ElementsContainerType::ContainerType ContainerType; typedef ContainerType::value_type PointerType; typedef ContainerType::iterator IteratorType; typedef ModelPart::ElementsContainerType::ContainerType ResultContainerType; typedef ResultContainerType::value_type ResultPointerType; typedef ResultContainerType::iterator ResultIteratorType; typedef Element::Pointer pointer_type; typedef CellNodeData cell_node_data_type; typedef std::vector<CellNodeData*> data_type; typedef std::vector<PointerType>::iterator PointerTypeIterator; /// Pointer definition of DistanceSpatialContainersConfigure KRATOS_CLASS_POINTER_DEFINITION(DistanceSpatialContainersConfigure); ///@} ///@name Life Cycle ///@{ /// Default constructor. DistanceSpatialContainersConfigure() {} /// Destructor. virtual ~DistanceSpatialContainersConfigure() {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ static data_type* AllocateData() { return new data_type(27, (CellNodeData*)NULL); } static void CopyData(data_type* source, data_type* destination) { *destination = *source; } static void DeleteData(data_type* data) { delete data; } static inline void CalculateBoundingBox(const PointerType& rObject, PointType& rLowPoint, PointType& rHighPoint) { rHighPoint = rObject->GetGeometry().GetPoint(0); rLowPoint = rObject->GetGeometry().GetPoint(0); for (unsigned int point = 0; point<rObject->GetGeometry().PointsNumber(); point++) { for(std::size_t i = 0; i<3; i++) { rLowPoint[i] = (rLowPoint[i] > rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rLowPoint[i]; rHighPoint[i] = (rHighPoint[i] < rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rHighPoint[i]; } } } static inline void GetBoundingBox(const PointerType rObject, double* rLowPoint, double* rHighPoint) { for(std::size_t i = 0; i<3; i++) { rLowPoint[i] = rObject->GetGeometry().GetPoint(0)[i]; rHighPoint[i] = rObject->GetGeometry().GetPoint(0)[i]; } for (unsigned int point = 0; point<rObject->GetGeometry().PointsNumber(); point++) { for(std::size_t i = 0; i<3; i++) { rLowPoint[i] = (rLowPoint[i] > rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rLowPoint[i]; rHighPoint[i] = (rHighPoint[i] < rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rHighPoint[i]; } } } static inline bool Intersection(const PointerType& rObj_1, const PointerType& rObj_2) { Element::GeometryType& geom_1 = rObj_1->GetGeometry(); Element::GeometryType& geom_2 = rObj_2->GetGeometry(); return geom_1.HasIntersection(geom_2); } static inline bool IntersectionBox(const PointerType& rObject, const PointType& rLowPoint, const PointType& rHighPoint) { return rObject->GetGeometry().HasIntersection(rLowPoint, rHighPoint); } static inline bool IsIntersected(const Element::Pointer rObject, double Tolerance, const double* rLowPoint, const double* rHighPoint) { Point low_point(rLowPoint[0] - Tolerance, rLowPoint[1] - Tolerance, rLowPoint[2] - Tolerance); Point high_point(rHighPoint[0] + Tolerance, rHighPoint[1] + Tolerance, rHighPoint[2] + Tolerance); KRATOS_THROW_ERROR(std::logic_error, "Not Implemented method", "") //return HasIntersection(rObject->GetGeometry(), low_point, high_point); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { return " Spatial Containers Configure"; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const {} /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} protected: private: /// Assignment operator. DistanceSpatialContainersConfigure& operator=(DistanceSpatialContainersConfigure const& rOther); /// Copy constructor. DistanceSpatialContainersConfigure(DistanceSpatialContainersConfigure const& rOther); }; // Class DistanceSpatialContainersConfigure ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** Detail class definition. */ class CalculateSignedDistanceTo3DSkinProcess : public Process { public: ///@name Type Definitions ///@{ /// Pointer definition of CalculateSignedDistanceTo3DSkinProcess KRATOS_CLASS_POINTER_DEFINITION(CalculateSignedDistanceTo3DSkinProcess); typedef DistanceSpatialContainersConfigure ConfigurationType; typedef OctreeBinaryCell<ConfigurationType> CellType; typedef OctreeBinary<CellType> OctreeType; typedef ConfigurationType::cell_node_data_type CellNodeDataType; typedef Point PointType; /// always the point 3D typedef OctreeType::cell_type::object_container_type object_container_type; typedef struct{ array_1d<double,3> Coordinates; array_1d<double,3> StructElemNormal; unsigned int EdgeNode1; unsigned int EdgeNode2; }IntersectionNodeStruct; typedef struct{ std::vector<IntersectionNodeStruct> IntNodes; }TetEdgeStruct; ///@} ///@name Life Cycle ///@{ /// Constructor. CalculateSignedDistanceTo3DSkinProcess(ModelPart& rThisModelPartStruc, ModelPart& rThisModelPartFluid) : mrSkinModelPart(rThisModelPartStruc), mrBodyModelPart(rThisModelPartStruc), mrFluidModelPart(rThisModelPartFluid) { } /// Destructor. ~CalculateSignedDistanceTo3DSkinProcess() override { } ///@} ///@name Operators ///@{ void operator()() { Execute(); } ///@} ///@name Operations ///@{ ///****************************************************************************************************************** ///****************************************************************************************************************** void Execute() override { KRATOS_TRY; GenerateOctree(); //DistanceFluidStructure(); CalculateDistanceToSkinProcess<3> distance_process(mrFluidModelPart, mrBodyModelPart); distance_process.Execute(); // ------------------------------------------------------------------ // GenerateNodes(); CalculateDistance2(); // I have to change this. Pooyan. //mrSkinModelPart.GetCommunicator().AssembleCurrentData(DISTANCE); // std::ofstream mesh_file1("octree1.post.msh"); // std::ofstream res_file("octree1.post.res"); // Timer::Start("Writing Gid conform Mesh"); // PrintGiDMesh(mesh_file1); // PrintGiDResults(res_file); // octree.PrintGiDMeshNew(mesh_file2); // Timer::Stop("Writing Gid conform Mesh"); // delete octree. TODO: Carlos // ------------------------------------------------------------------ KRATOS_CATCH(""); } ///****************************************************************************************************************** ///****************************************************************************************************************** /** * This function maps the nodal pressure values computed in the CFD analysis to the respective * structural nodes, i.e. for each structural node inside a fluid tetrahedra positive and negative * face pressure is computed by mapping between the nodal values of the tetrahedra. Afterwards * the resulting delta is applied as new nodal pressure. */ void MappingPressureToStructure(BinBasedFastPointLocator<3>& node_locator) { //loop over nodes and find the tetra in which it falls, than do interpolation Vector N; const int max_results = 10000; BinBasedFastPointLocator<3>::ResultContainerType results(max_results); const int n_structure_nodes = mrSkinModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N) //MY NEW LOOP: reset the viisted flaf for (int i = 0; i < n_structure_nodes; i++) { ModelPart::NodesContainerType::iterator iparticle = mrSkinModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_structure_node = *(iparticle.base()); p_structure_node->Set(VISITED, false); } for (int i = 0; i < n_structure_nodes; i++) { ModelPart::NodesContainerType::iterator iparticle = mrSkinModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_structure_node = *(iparticle.base()); BinBasedFastPointLocator<3>::ResultIteratorType result_begin = results.begin(); Element::Pointer pElement; bool is_found = node_locator.FindPointOnMesh(p_structure_node->Coordinates(), N, pElement, result_begin, max_results); if (is_found == true) { array_1d<double,4> nodalPressures; const Vector& ElementalDistances = pElement->GetValue(ELEMENTAL_DISTANCES); Geometry<Node<3> >& geom = pElement->GetGeometry(); for(unsigned int j=0; j<geom.size(); j++) { nodalPressures[j] = geom[j].FastGetSolutionStepValue(PRESSURE); } if(pElement->GetValue(SPLIT_ELEMENT)==true) { array_1d<double,4> Npos,Nneg; // Do mapping ComputeDiscontinuousInterpolation((*p_structure_node),pElement->GetGeometry(),ElementalDistances,Npos,Nneg); // Compute face pressure double p_positive_structure = inner_prod(nodalPressures,Npos); double p_negative_structure = inner_prod(nodalPressures,Nneg); // Assign ModelPart::ElementIteratorface pressure to structure node p_structure_node->FastGetSolutionStepValue(POSITIVE_FACE_PRESSURE) = p_positive_structure; p_structure_node->FastGetSolutionStepValue(NEGATIVE_FACE_PRESSURE) = p_negative_structure; p_structure_node->Set(VISITED); } else { double p = inner_prod(nodalPressures,N); p_structure_node->FastGetSolutionStepValue(POSITIVE_FACE_PRESSURE) = p; p_structure_node->FastGetSolutionStepValue(NEGATIVE_FACE_PRESSURE) = p; p_structure_node->Set(VISITED); } } } //AND NOW WE "TREAT" the bad nodes, the ones that belong to the structural faces that by some chance did not cross the fluid elements //to such nodes we simply extrapolate the pressure from the neighbors int n_bad_nodes=0; for (int i = 0; i < n_structure_nodes; i++) { ModelPart::NodesContainerType::iterator iparticle = mrSkinModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_structure_node = *(iparticle.base()); if (p_structure_node->IsNot(VISITED)) n_bad_nodes++; } //KRATOS_WATCH("THERE WERE THIS MANY BAD NODES ORIGINALLY") //KRATOS_WATCH(n_bad_nodes) while (n_bad_nodes >= 1.0) { int n_bad_nodes_backup = n_bad_nodes; for (int i = 0; i < n_structure_nodes; i++) { ModelPart::NodesContainerType::iterator iparticle = mrSkinModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_structure_node = *(iparticle.base()); //here we store the number of neigbor nodes that were given the pressure in the previous loop (i.e. were found) if (p_structure_node->IsNot(VISITED)) { int n_good_neighbors = 0; double pos_pres = 0.0; double neg_pres = 0.0; GlobalPointersVector< Node < 3 > >& neighours = p_structure_node->GetValue(NEIGHBOUR_NODES); for (GlobalPointersVector< Node < 3 > >::iterator j = neighours.begin(); j != neighours.end(); j++) { if (j->Is(VISITED)) { n_good_neighbors++; pos_pres += j->FastGetSolutionStepValue(POSITIVE_FACE_PRESSURE); neg_pres += j->FastGetSolutionStepValue(NEGATIVE_FACE_PRESSURE); //KRATOS_WATCH("Good neighbor found") } } if (n_good_neighbors != 0) { pos_pres /= n_good_neighbors; neg_pres /= n_good_neighbors; p_structure_node->FastGetSolutionStepValue(POSITIVE_FACE_PRESSURE) = pos_pres; p_structure_node->FastGetSolutionStepValue(NEGATIVE_FACE_PRESSURE) = neg_pres; p_structure_node->Set(VISITED); n_bad_nodes--; } //KRATOS_WATCH(pos_pres) //KRATOS_WATCH(neg_pres) } } if(n_bad_nodes == n_bad_nodes_backup) break; //WE BREAK THE WHILE HERE, OTHERWISE THE CODE HANGS (it was not able to remove any other node) /*int n_bad_nodes=0; for (int i = 0; i < n_structure_nodes; i++) { ModelPart::NodesContainerType::iterator iparticle = mrSkinModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_structure_node = *(iparticle.base()); if (p_structure_node->IsNot(VISITED)) n_bad_nodes++; } */ //KRATOS_WATCH(n_bad_nodes) } //THE BELOW ONE IS A "CHEAT".. THERE IS A PROBLEM OF INCORRECT PROJECTION BETWEEN THE MESHES AT SOME POINTS //FOR NODES WITH PRESSURE VERY DIFFERENT FROM THAT OF THE NEIGHBORS, I JUST TAKE THE NEIGHBOR PRESSURE AVERAGED for (int i = 0; i < n_structure_nodes; i++) { ModelPart::NodesContainerType::iterator iparticle = mrSkinModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_structure_node = *(iparticle.base()); double pos_pressure=p_structure_node->FastGetSolutionStepValue(POSITIVE_FACE_PRESSURE); double neg_pressure=p_structure_node->FastGetSolutionStepValue(NEGATIVE_FACE_PRESSURE); GlobalPointersVector< Node < 3 > >& neighours = p_structure_node->GetValue(NEIGHBOUR_NODES); if (neighours.size()>=1.0) { double av_pos_pres=0.0; double av_neg_pres=0.0; for( GlobalPointersVector< Node<3> >::iterator j = neighours.begin(); j != neighours.end(); j++) { av_pos_pres+=j->FastGetSolutionStepValue(POSITIVE_FACE_PRESSURE); av_neg_pres+=j->FastGetSolutionStepValue(NEGATIVE_FACE_PRESSURE); } av_pos_pres/=neighours.size(); av_neg_pres/=neighours.size(); //IF the average pressure of the neighbors is 10 times lower than of the given node, something is bad and we reset its value if (fabs(pos_pressure)>3.0*fabs(av_pos_pres)) { p_structure_node->FastGetSolutionStepValue(POSITIVE_FACE_PRESSURE) = av_pos_pres; //KRATOS_WATCH("BAD NODE") } if (fabs(neg_pressure)>3.0*fabs(av_neg_pres)) { p_structure_node->FastGetSolutionStepValue(NEGATIVE_FACE_PRESSURE) = av_neg_pres; //KRATOS_WATCH("BAD NODE") } } } } ///****************************************************************************************************************** ///****************************************************************************************************************** void ComputeDiscontinuousInterpolation( const Node<3>& pNode, Geometry< Node<3> >& geom, const array_1d<double,4>& distances, array_1d<double,4>& Npos, array_1d<double,4>& Nneg) { //count positives int n_positives = 0; for(unsigned int i=0; i<distances.size(); i++) if(distances[i]>0) n_positives++; //generate the points on the edges at the zero of the distance function //generate "father nodes", defined as the end nodes of the edge on which the local point is located std::vector< Point > edge_points; edge_points.reserve(4); array_1d<unsigned int, 4> positive_fathers, negative_fathers; //there are at most 4 cut edges unsigned int k=0; unsigned int l=0; for(unsigned int i=0; i<3; i++) { for(unsigned int j=i+1; j<4; j++) // go through the edges 01, 02, 03, 12, 13, 23 { double di = distances[i]; double dj = distances[j]; if(di*dj < 0) //edge is cut { //generate point on edge by linear interpolation double Ni = fabs(dj) / ( fabs(di) + fabs(dj) ); double Nj = 1.0 - Ni; Point edge_point(Ni * geom[i] + Nj * geom[j]); edge_points.push_back(edge_point); //store the id of the positive and negative fathers if(di > 0.0) { positive_fathers[k++] = i; negative_fathers[l++] = j; } else { positive_fathers[k++] = j; negative_fathers[l++] = i; } } } } if(edge_points.size() == 3) { //compute local shape functions (tell how to interpolate from the edge nodes) Vector Nlocal(3); //form a triangle with the edge nodes Triangle3D3< Point > triangle(Point::Pointer(new Point(edge_points[0])), Point::Pointer(new Point(edge_points[1])), Point::Pointer(new Point(edge_points[2])) ); array_1d<double,3> local_coords; local_coords = triangle.PointLocalCoordinates(local_coords, pNode); for(unsigned int i=0; i<3;i++) Nlocal[i] = triangle.ShapeFunctionValue(i, local_coords ); noalias(Npos) = ZeroVector(4); noalias(Nneg) = ZeroVector(4); for(unsigned int i=0; i<3; i++) { Npos[ positive_fathers[i] ] += Nlocal[i]; Nneg[ negative_fathers[i] ] += Nlocal[i]; } } if(edge_points.size() == 4) { //compute local shape functions (tell how to interpolate from the edge nodes) Vector Nlocal(4); //form a quadrilatera with the 4 cut nodes array_1d<double,3> x21 = edge_points[1] - edge_points[0]; array_1d<double,3> x31 = edge_points[2] - edge_points[0]; array_1d<double,3> x41 = edge_points[3] - edge_points[0]; //define a vector oriented as x21 array_1d<double,3> v1 = x21 / norm_2(x21); BoundedMatrix<double,4,3> DN_DX; array_1d<double,4> msN; double Area; GeometryUtils::CalculateGeometryData( geom, DN_DX, msN, Area ); array_1d<double,3> n = prod(trans(DN_DX),distances); n /= norm_2(n); array_1d<double,3> v2; MathUtils<double>::CrossProduct(v2,v1,n); // v2 = v1 x n array_1d<double,3> angles; angles[0] = 0.0; //angle between x21 and v1 angles[1] = atan2( inner_prod(x31,v2), inner_prod(x31,v1) ); //angle between x31 and v1 angles[2] = atan2( inner_prod(x41,v2), inner_prod(x41,v1) ); //angle between x31 and v1 double max_angle = 0.0; double min_angle = 0.0; unsigned int min_pos = 1; unsigned int max_pos = 1; for(unsigned int i=1; i<3; i++) { if(angles[i] < min_angle) { min_pos = i+1; //this is the local index of the edge point which forms the minimal angle min_angle = angles[i]; } else if(angles[i] > max_angle) { max_pos = i+1; //this is the local index of the edge point which forms the maximal angle max_angle = angles[i]; } } //find the pos of the center node unsigned int center_pos = 0; for(unsigned int i=1; i<4; i++) { if((i!= min_pos) && (i!=max_pos)) { center_pos = i; } } //form a quadrilateral with the edge nodes Quadrilateral3D4< Point > quad = Quadrilateral3D4< Point >( Point::Pointer(new Point(edge_points[0])), Point::Pointer(new Point(edge_points[min_pos])), Point::Pointer(new Point(edge_points[center_pos])), Point::Pointer(new Point(edge_points[max_pos])) ); array_1d<double,3> local_coords; local_coords = quad.PointLocalCoordinates(local_coords, pNode); array_1d<unsigned int, 4> indices; indices[0] = 0; indices[1] = min_pos; indices[2] = center_pos; indices[3] = max_pos; for(unsigned int i=0; i<4;i++) Nlocal[ i ] = quad.ShapeFunctionValue(i, local_coords ); noalias(Npos) = ZeroVector(4); noalias(Nneg) = ZeroVector(4); for(unsigned int i=0; i<4; i++) { Npos[ positive_fathers[i] ] += Nlocal[indices[i]]; Nneg[ negative_fathers[i] ] += Nlocal[indices[i]]; } } } ///****************************************************************************************************************** ///****************************************************************************************************************** void AveragePressureToNode(BinBasedFastPointLocator<3>& node_locator, Node<3>& node) { //loop over nodes and find the tetra in which it falls, than do interpolation Vector N; const int max_results = 10000; BinBasedFastPointLocator<3>::ResultContainerType results(max_results); BinBasedFastPointLocator<3>::ResultIteratorType result_begin = results.begin(); Element::Pointer pElement; bool is_found = node_locator.FindPointOnMesh(node.Coordinates(), N, pElement, result_begin, max_results); if (is_found == true) { array_1d<double,4> nodalPressures; const Vector& ElementalDistances = pElement->GetValue(ELEMENTAL_DISTANCES); Geometry<Node<3> >& geom = pElement->GetGeometry(); for(unsigned int i=0; i<4; i++) nodalPressures[i] = geom[i].GetSolutionStepValue(PRESSURE); if(pElement->GetValue(SPLIT_ELEMENT)==true) { // Compute average of all positive and all negative values double positiveAverage = 0; double negativeAverage = 0; unsigned int nPos = 0; unsigned int nNeg = 0; for(unsigned int i=0 ; i<4 ; i++) { if(ElementalDistances[i]>=0) { positiveAverage += nodalPressures[i]; nPos++; } else { negativeAverage += nodalPressures[i]; nNeg++; } } positiveAverage /= nPos; negativeAverage /= nNeg; // Assign Pressures node.GetSolutionStepValue(POSITIVE_FACE_PRESSURE,0) = positiveAverage; node.GetSolutionStepValue(NEGATIVE_FACE_PRESSURE,0) = negativeAverage; } else { // Compute average of all positive and all negative values double Average = 0; // for output of for(unsigned int i = 0 ; i<4 ; i++) Average += nodalPressures[i]; Average /= 4; // Assign Pressures node.GetSolutionStepValue(POSITIVE_FACE_PRESSURE,0) = Average; node.GetSolutionStepValue(NEGATIVE_FACE_PRESSURE,0) = Average; } } } ///****************************************************************************************************************** ///****************************************************************************************************************** void DistanceFluidStructure() { //std::cout << "Start calculating Elemental distances..." << std::endl; // Initialize Elemental distances in the domain Initialize(); // Initialize index table that defines line Edges of fluid Element BoundedMatrix<unsigned int,6,2> TetEdgeIndexTable; SetIndexTable(TetEdgeIndexTable); // loop over all fluid Elements // this loop is parallelized using openmp #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif ModelPart::ElementsContainerType& pElements = mrFluidModelPart.Elements(); DenseVector<unsigned int> Element_partition; CreatePartition(number_of_threads, pElements.size(), Element_partition); #pragma omp parallel for for (int k = 0; k < number_of_threads; k++) { ModelPart::ElementsContainerType::iterator it_begin = pElements.ptr_begin() + Element_partition[k]; ModelPart::ElementsContainerType::iterator it_end = pElements.ptr_begin() + Element_partition[k+1]; // assemble all Elements for (ModelPart::ElementIterator it = it_begin; it != it_end; ++it) { CalcElementDistances( it , TetEdgeIndexTable ); } } // Finally, each tetrahedral Element has 4 distance values. But each node belongs to // several Elements, such that it is assigned several distance values // --> now synchronize these values by finding the minimal distance and assign to each node a minimal nodal distance AssignMinimalNodalDistance(); //std::cout << "Finished calculating Elemental distances..." << std::endl; } ///****************************************************************************************************************** ///****************************************************************************************************************** void Initialize() { const double initial_distance = 1.0; ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray(); // reset the node distance to 1.0 which is the maximum distance in our normalized space. int nodesSize = nodes.size(); #pragma omp parallel for firstprivate(nodesSize) for(int i = 0 ; i < nodesSize ; i++) nodes[i]->GetSolutionStepValue(DISTANCE) = initial_distance; ModelPart::ElementsContainerType::ContainerType& fluid_Elements = mrFluidModelPart.ElementsArray(); array_1d<double,4> ElementalDistances; ElementalDistances[0] = initial_distance; ElementalDistances[1] = initial_distance; ElementalDistances[2] = initial_distance; ElementalDistances[3] = initial_distance; // reset the Elemental distance to 1.0 which is the maximum distance in our normalized space. // also initialize the embedded velocity of the fluid Element int ElementsSize = fluid_Elements.size(); #pragma omp parallel for firstprivate(ElementsSize) for(int i = 0 ; i < ElementsSize ; i++) { fluid_Elements[i]->GetValue(ELEMENTAL_DISTANCES) = ElementalDistances; fluid_Elements[i]->GetValue(SPLIT_ELEMENT) = false; fluid_Elements[i]->GetValue(EMBEDDED_VELOCITY)=ZeroVector(3); } } ///****************************************************************************************************************** ///****************************************************************************************************************** void SetIndexTable( BoundedMatrix<unsigned int,6,2>& TetEdgeIndexTable ) { // Initialize index table to define line Edges of fluid Element TetEdgeIndexTable(0,0) = 0; TetEdgeIndexTable(0,1) = 1; TetEdgeIndexTable(1,0) = 0; TetEdgeIndexTable(1,1) = 2; TetEdgeIndexTable(2,0) = 0; TetEdgeIndexTable(2,1) = 3; TetEdgeIndexTable(3,0) = 1; TetEdgeIndexTable(3,1) = 2; TetEdgeIndexTable(4,0) = 1; TetEdgeIndexTable(4,1) = 3; TetEdgeIndexTable(5,0) = 2; TetEdgeIndexTable(5,1) = 3; } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalcElementDistances( ModelPart::ElementsContainerType::iterator& i_fluidElement, BoundedMatrix<unsigned int,6,2> TetEdgeIndexTable ) { std::vector<OctreeType::cell_type*> leaves; std::vector<TetEdgeStruct> IntersectedTetEdges; unsigned int NumberIntersectionsOnTetCorner = 0; // Get leaves of octree intersecting with fluid Element mpOctree->GetIntersectedLeaves(*(i_fluidElement).base(),leaves); int intersection_counter = 0; // Loop over all 6 line Edges of the tetrahedra for(unsigned int i_tetEdge = 0; i_tetEdge < 6; i_tetEdge++) { IdentifyIntersectionNodes( i_fluidElement, i_tetEdge, leaves, IntersectedTetEdges, NumberIntersectionsOnTetCorner, TetEdgeIndexTable, intersection_counter ); } if (intersection_counter!=0) { i_fluidElement->GetValue(EMBEDDED_VELOCITY) /= intersection_counter; } if(IntersectedTetEdges.size() > 0) CalcDistanceTo3DSkin( IntersectedTetEdges , i_fluidElement , NumberIntersectionsOnTetCorner ); } ///****************************************************************************************************************** ///****************************************************************************************************************** void IdentifyIntersectionNodes( ModelPart::ElementsContainerType::iterator& i_fluidElement, unsigned int i_tetEdge, std::vector<OctreeType::cell_type*>& leaves, std::vector<TetEdgeStruct>& IntersectedTetEdges, unsigned int& NumberIntersectionsOnTetCorner, BoundedMatrix<unsigned int,6,2> TetEdgeIndexTable, int& intersection_counter ) { std::vector<unsigned int> IntersectingStructElemID; TetEdgeStruct NewTetEdge; unsigned int NumberIntersectionsOnTetCornerCurrentEdge = 0; // Get nodes of line Edge unsigned int EdgeStartIndex = TetEdgeIndexTable(i_tetEdge,0); unsigned int EdgeEndIndex = TetEdgeIndexTable(i_tetEdge,1); PointType& P1 = i_fluidElement->GetGeometry()[EdgeStartIndex]; PointType& P2 = i_fluidElement->GetGeometry()[EdgeEndIndex]; double EdgeNode1[3] = {P1.X() , P1.Y() , P1.Z()}; double EdgeNode2[3] = {P2.X() , P2.Y() , P2.Z()}; // loop over all octree cells which are intersected by the fluid Element for(unsigned int i_cell = 0 ; i_cell < leaves.size() ; i_cell++) { // Structural Element contained in one cell of the octree object_container_type* struct_elem = (leaves[i_cell]->pGetObjects()); // loop over all structural Elements within each octree cell for(object_container_type::iterator i_StructElement = struct_elem->begin(); i_StructElement != struct_elem->end(); i_StructElement++) { if( StructuralElementNotYetConsidered( (*i_StructElement)->Id() , IntersectingStructElemID ) ) { // Calculate and associate intersection point to the current fluid Element double IntersectionPoint[3] = {0.0 , 0.0 , 0.0}; int TetEdgeHasIntersections = IntersectionTriangleSegment( (*i_StructElement)->GetGeometry() , EdgeNode1 , EdgeNode2 , IntersectionPoint ); if( TetEdgeHasIntersections == 1 ) { IntersectionNodeStruct NewIntersectionNode; // Assign information to the intersection node NewIntersectionNode.Coordinates[0] = IntersectionPoint[0]; NewIntersectionNode.Coordinates[1] = IntersectionPoint[1]; NewIntersectionNode.Coordinates[2] = IntersectionPoint[2]; if( IsIntersectionNodeOnTetEdge( IntersectionPoint , EdgeNode1 , EdgeNode2 ) ) { if ( IsNewIntersectionNode( NewIntersectionNode , IntersectedTetEdges ) ) { // Calculate normal of the structural Element at the position of the intersection point CalculateNormal3D((*i_StructElement)->GetGeometry()[0], (*i_StructElement)->GetGeometry()[1], (*i_StructElement)->GetGeometry()[2], NewIntersectionNode.StructElemNormal); // check, how many intersection nodes are located on corner points of the tetrahedra if ( IsIntersectionOnCorner( NewIntersectionNode , EdgeNode1 , EdgeNode2) ) { NumberIntersectionsOnTetCornerCurrentEdge++; // only allow one intersection node on a tet edge if(NumberIntersectionsOnTetCornerCurrentEdge < 2) { // add the new intersection point to the list of intersection points of the fluid Element NewIntersectionNode.EdgeNode1 = EdgeStartIndex; NewIntersectionNode.EdgeNode2 = EdgeEndIndex; NewTetEdge.IntNodes.push_back(NewIntersectionNode); // if tet edge belonging to this intersection point is not already marked as "IntersectedTetEdge" --> put it into the respective container // when a second intersection node is found, then it is not necessary to push_back again if( NewTetEdge.IntNodes.size() == 1 ) IntersectedTetEdges.push_back(NewTetEdge); } // this corner intersection node is only considered once for each tet edge if(NumberIntersectionsOnTetCornerCurrentEdge==1) { NumberIntersectionsOnTetCorner++; } } else { // add the new intersection point to the list of intersection points of the fluid Element NewIntersectionNode.EdgeNode1 = EdgeStartIndex; NewIntersectionNode.EdgeNode2 = EdgeEndIndex; NewTetEdge.IntNodes.push_back(NewIntersectionNode); // velocity mapping structure --> fluid array_1d<double,3> emb_vel = (*i_StructElement)->GetGeometry()[0].GetSolutionStepValue(VELOCITY); emb_vel += (*i_StructElement)->GetGeometry()[1].GetSolutionStepValue(VELOCITY); emb_vel += (*i_StructElement)->GetGeometry()[2].GetSolutionStepValue(VELOCITY); i_fluidElement->GetValue(EMBEDDED_VELOCITY) += emb_vel/3; intersection_counter++; } } } } } } } // Finally put the found intersection nodes into the container if( NewTetEdge.IntNodes.size() > 0 ) { if(NumberIntersectionsOnTetCornerCurrentEdge == 0) IntersectedTetEdges.push_back(NewTetEdge); } } ///****************************************************************************************************************** ///****************************************************************************************************************** bool StructuralElementNotYetConsidered( unsigned int IDCurrentStructElem, std::vector<unsigned int>& IntersectingStructElemID ) { // check if the structural Element was already considered as intersecting Element for(unsigned int k = 0 ; k < IntersectingStructElemID.size() ; k++) { if( IDCurrentStructElem == IntersectingStructElemID[k] ) return false; } // if structural Element has not been considered in another octree, which also intersects the fluid Element // add the new object ID to the vector IntersectingStructElemID.push_back( IDCurrentStructElem ); return true; } ///****************************************************************************************************************** ///****************************************************************************************************************** bool IsIntersectionNodeOnTetEdge( double* IntersectionPoint, double* EdgeNode1, double* EdgeNode2 ) { // check, if intersection point is located on any edge of the fluid Element array_1d<double,3> ConnectVectTetNodeIntNode1; array_1d<double,3> ConnectVectTetNodeIntNode2; array_1d<double,3> EdgeVector; ConnectVectTetNodeIntNode1[0] = IntersectionPoint[0] - EdgeNode1[0]; ConnectVectTetNodeIntNode1[1] = IntersectionPoint[1] - EdgeNode1[1]; ConnectVectTetNodeIntNode1[2] = IntersectionPoint[2] - EdgeNode1[2]; ConnectVectTetNodeIntNode2[0] = IntersectionPoint[0] - EdgeNode2[0]; ConnectVectTetNodeIntNode2[1] = IntersectionPoint[1] - EdgeNode2[1]; ConnectVectTetNodeIntNode2[2] = IntersectionPoint[2] - EdgeNode2[2]; double LengthConnectVect1 = norm_2( ConnectVectTetNodeIntNode1 ); double LengthConnectVect2 = norm_2( ConnectVectTetNodeIntNode2 ); EdgeVector[0] = EdgeNode2[0] - EdgeNode1[0]; EdgeVector[1] = EdgeNode2[1] - EdgeNode1[1]; EdgeVector[2] = EdgeNode2[2] - EdgeNode1[2]; double MaxEdgeLength = norm_2( EdgeVector ); // if both connection vectors (corner point --> intersection point) // are smaller or equal to the edge length of tetrahedra, // then intersection point is located on the edge if( (LengthConnectVect1 <= (MaxEdgeLength)) && (LengthConnectVect2 <= (MaxEdgeLength)) ) return true; else return false; } ///****************************************************************************************************************** ///****************************************************************************************************************** bool IsNewIntersectionNode( IntersectionNodeStruct& NewIntersectionNode, std::vector<TetEdgeStruct>& IntersectedTetEdges ) { array_1d<double,3> DiffVector; double NormDiffVector = 0; unsigned int NumberIntNodes = 0; for( unsigned int i_TetEdge = 0 ; i_TetEdge < IntersectedTetEdges.size() ; i_TetEdge++ ) { NumberIntNodes = IntersectedTetEdges[i_TetEdge].IntNodes.size(); for( unsigned int i_IntNode = 0 ; i_IntNode < NumberIntNodes ; i_IntNode++ ) { DiffVector[0] = NewIntersectionNode.Coordinates[0] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[0]; DiffVector[1] = NewIntersectionNode.Coordinates[1] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[1]; DiffVector[2] = NewIntersectionNode.Coordinates[2] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[2]; NormDiffVector = norm_2(DiffVector); if( NormDiffVector < epsilon ) return false; } } // if the new intersection node is not existing (as intersection with a corner point), then return false return true; } ///****************************************************************************************************************** ///****************************************************************************************************************** bool IsIntersectionOnCorner( IntersectionNodeStruct& NewIntersectionNode, double* EdgeNode1, double* EdgeNode2 ) { array_1d<double,3> DiffVector; double NormDiffVector; DiffVector[0] = EdgeNode1[0] - NewIntersectionNode.Coordinates[0]; DiffVector[1] = EdgeNode1[1] - NewIntersectionNode.Coordinates[1]; DiffVector[2] = EdgeNode1[2] - NewIntersectionNode.Coordinates[2]; NormDiffVector = norm_2(DiffVector); if( NormDiffVector < epsilon ) return true; DiffVector[0] = EdgeNode2[0] - NewIntersectionNode.Coordinates[0]; DiffVector[1] = EdgeNode2[1] - NewIntersectionNode.Coordinates[1]; DiffVector[2] = EdgeNode2[2] - NewIntersectionNode.Coordinates[2]; NormDiffVector = norm_2(DiffVector); if( NormDiffVector < epsilon ) return true; else return false; } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalculateNormal3D( Point& Point1, Point& Point2, Point& Point3, array_1d<double,3>& rResultNormal ) { array_1d<double,3> v1 = Point2 - Point1; array_1d<double,3> v2 = Point3 - Point1; MathUtils<double>::CrossProduct(rResultNormal,v1,v2); rResultNormal *= 0.5; } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalcDistanceTo3DSkin( std::vector<TetEdgeStruct>& IntersectedTetEdges, ModelPart::ElementsContainerType::iterator& i_fluid_Element, unsigned int NumberIntersectionsOnTetCorner ) { std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure; array_1d<double,4> ElementalDistances; FillIntNodesContainer(IntersectedTetEdges,NodesOfApproximatedStructure); // Intersection with one corner point if( NodesOfApproximatedStructure.size() == 1 && NumberIntersectionsOnTetCorner == 1 ) { CalcSignedDistancesToOneIntNode(i_fluid_Element,NodesOfApproximatedStructure,ElementalDistances); i_fluid_Element->GetValue(SPLIT_ELEMENT) = true; } // Intersection with two corner points / one tetrahedra edge if( NodesOfApproximatedStructure.size() == 2 && NumberIntersectionsOnTetCorner == 2 ) { CalcSignedDistancesToTwoIntNodes(i_fluid_Element,NodesOfApproximatedStructure,ElementalDistances); i_fluid_Element->GetValue(SPLIT_ELEMENT) = true; } // Intersection with three tetrahedra edges if( NodesOfApproximatedStructure.size() == 3 ) { CalcSignedDistancesToThreeIntNodes(i_fluid_Element,NodesOfApproximatedStructure,ElementalDistances); i_fluid_Element->GetValue(SPLIT_ELEMENT) = true; } // Intersection with more than three tetrahedra edges if( NodesOfApproximatedStructure.size() > 3 ) { CalcSignedDistancesToMoreThanThreeIntNodes(i_fluid_Element,NodesOfApproximatedStructure,ElementalDistances,IntersectedTetEdges); i_fluid_Element->GetValue(SPLIT_ELEMENT) = true; } // Postprocessing treatment of Elemental distances if( i_fluid_Element->GetValue(SPLIT_ELEMENT) == true ) AvoidZeroDistances(i_fluid_Element, ElementalDistances); // In case there is intersection with fluid Element: assign distances to the Element if( i_fluid_Element->GetValue(SPLIT_ELEMENT) == true ) i_fluid_Element->GetValue(ELEMENTAL_DISTANCES) = ElementalDistances; } ///****************************************************************************************************************** ///****************************************************************************************************************** void FillIntNodesContainer( std::vector<TetEdgeStruct>& IntersectedTetEdges, std::vector<IntersectionNodeStruct>& NodesOfApproximatedStructure ) { const unsigned int NumberCutEdges = IntersectedTetEdges.size(); for(unsigned int i_TetEdge = 0 ; i_TetEdge < NumberCutEdges ; i_TetEdge++) { unsigned int NumberIntNodes = IntersectedTetEdges[i_TetEdge].IntNodes.size(); for( unsigned int i_IntNode = 0 ; i_IntNode < NumberIntNodes ; i_IntNode++ ) { NodesOfApproximatedStructure.push_back(IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode]); } } } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalcSignedDistancesToOneIntNode( ModelPart::ElementsContainerType::iterator& i_fluid_Element, std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure, array_1d<double,4>& ElementalDistances ) { Geometry< Node<3> >& rFluidGeom = i_fluid_Element->GetGeometry(); Point P1; P1.Coordinates() = NodesOfApproximatedStructure[0].Coordinates; array_1d<double,3>& Normal = NodesOfApproximatedStructure[0].StructElemNormal; // Compute distance values for all tet-nodes for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++) { ElementalDistances[i_TetNode] = PointDistanceToPlane(P1, Normal, rFluidGeom[i_TetNode]); } } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalcSignedDistancesToTwoIntNodes( ModelPart::ElementsContainerType::iterator& i_fluid_Element, std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure, array_1d<double,4>& ElementalDistances ) { Geometry< Node<3> >& rFluidGeom = i_fluid_Element->GetGeometry(); Point P1; P1.Coordinates() = NodesOfApproximatedStructure[0].Coordinates; // Get normal at intersections, average them and check direction of distances array_1d<double,3> NormalAtIntersectionNode1 = NodesOfApproximatedStructure[0].StructElemNormal; array_1d<double,3> NormalAtIntersectionNode2 = NodesOfApproximatedStructure[1].StructElemNormal; // Compute normal of surface plane array_1d<double,3> Normal; Normal[0] = 0.5*(NormalAtIntersectionNode1[0] + NormalAtIntersectionNode2[0]); Normal[1] = 0.5*(NormalAtIntersectionNode1[1] + NormalAtIntersectionNode2[1]); Normal[2] = 0.5*(NormalAtIntersectionNode1[2] + NormalAtIntersectionNode2[2]); // Check whether orientation of normal is in direction of the normal of the intersecting structure // Note: The normal of the approx. surface can be max. 90deg to every surrounding normal of the structure at the intersection nodes const array_1d<double,3> NormalAtOneIntersectionNode = NodesOfApproximatedStructure[0].StructElemNormal; bool NormalWrongOriented = false; if(inner_prod(NormalAtOneIntersectionNode,Normal)<0) NormalWrongOriented = true; // switch direction of normal if(NormalWrongOriented) Normal *=-1; // Compute distance values for all tet-nodes for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++) { ElementalDistances[i_TetNode] = PointDistanceToPlane(P1, Normal, rFluidGeom[i_TetNode]); } } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalcSignedDistancesToThreeIntNodes( ModelPart::ElementsContainerType::iterator& i_fluid_Element, std::vector<IntersectionNodeStruct>& NodesOfApproximatedStructure, array_1d<double,4>& ElementalDistances ) { Geometry< Node<3> >& rFluidGeom = i_fluid_Element->GetGeometry(); Point P1; Point P2; Point P3; P1.Coordinates() = NodesOfApproximatedStructure[0].Coordinates; P2.Coordinates() = NodesOfApproximatedStructure[1].Coordinates; P3.Coordinates() = NodesOfApproximatedStructure[2].Coordinates; array_1d<double,3> Normal; CalculateNormal3D(P1,P2,P3,Normal); // Check whether orientation of normal is in direction of the normal of the intersecting structure // Note: The normal of the approx. surface can be max. 90deg to every surrounding normal of the structure at the intersection nodes const array_1d<double,3> NormalAtOneIntersectionNode = NodesOfApproximatedStructure[0].StructElemNormal; bool NormalWrongOriented = false; if(inner_prod(NormalAtOneIntersectionNode,Normal)<0) NormalWrongOriented = true; // switch direction of normal if(NormalWrongOriented) Normal *=-1; // Compute distance values for all tet-nodes for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++) { ElementalDistances[i_TetNode] = PointDistanceToPlane(P1, Normal, rFluidGeom[i_TetNode] ); } } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalcSignedDistancesToMoreThanThreeIntNodes( ModelPart::ElementsContainerType::iterator& i_fluid_Element, std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure, array_1d<double,4>& ElementalDistances, std::vector<TetEdgeStruct>& IntersectedTetEdges ) { unsigned int numberCutEdges = NodesOfApproximatedStructure.size(); // Compute average of the intersection nodes which is a node on the plane we look for Point P_mean; for(unsigned int k=0; k<numberCutEdges; k++) for(unsigned int i=0; i<3; i++) P_mean.Coordinates()[i] += NodesOfApproximatedStructure[k].Coordinates[i]; for(unsigned int i=0; i<3; i++) P_mean.Coordinates()[i] /= numberCutEdges; // Compute normal for the best-fitted plane array_1d<double,3> N_mean; Matrix coordinates(numberCutEdges,3); for(unsigned int i=0; i<numberCutEdges; i++) for(unsigned int j=0; j<3; j++) coordinates(i,j) = NodesOfApproximatedStructure[i].Coordinates[j] - P_mean[j]; Matrix A = prod(trans(coordinates),coordinates); Matrix V(3,3); Vector lambda(3); // Calculate the eigenvectors V and the corresponding eigenvalues lambda EigenVectors(A, V, lambda); // Look for the minimal eigenvalue all lambdas unsigned int min_pos = 0; double min_lambda = lambda[min_pos]; for(unsigned int i=1;i<3; i++) if(min_lambda > lambda[i]) { min_lambda = lambda[i]; min_pos = i; } // the normal equals to the eigenvector which corresponds to the minimal eigenvalue for(unsigned int i=0;i<3; i++) N_mean[i] = V(min_pos,i); N_mean /= norm_2(N_mean); // Check whether orientation of normal is in direction of the normal of the intersecting structure // Note: The normal of the approx. surface can be max. 90deg to every surrounding normal of the structure at the intersection nodes array_1d<double,3> NormalAtOneIntersectionNode; NormalAtOneIntersectionNode = NodesOfApproximatedStructure[0].StructElemNormal; bool NormalWrongOriented = false; if(inner_prod(NormalAtOneIntersectionNode,N_mean)<0) NormalWrongOriented = true; // switch direction of normal if(NormalWrongOriented) N_mean *=-1; // Determine about the minimal distance by considering the distances to both triangles for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++) { ElementalDistances[i_TetNode] = PointDistanceToPlane(P_mean, N_mean, i_fluid_Element->GetGeometry()[i_TetNode] ); } // ################################################# unsigned int numberDoubleCutEdges = 0; unsigned int indexDoubleCutEdge = 0; // figure out the edges which are cut more than once for(unsigned int i_TetEdge = 0 ; i_TetEdge < IntersectedTetEdges.size() ; i_TetEdge++) { unsigned int NumberIntNodes = IntersectedTetEdges[i_TetEdge].IntNodes.size(); if(NumberIntNodes == 2) { numberDoubleCutEdges++; indexDoubleCutEdge = i_TetEdge; } } if((numberDoubleCutEdges >= 1)) { array_1d<double,3> normal_1 = IntersectedTetEdges[indexDoubleCutEdge].IntNodes[0].StructElemNormal; array_1d<double,3> normal_2 = IntersectedTetEdges[indexDoubleCutEdge].IntNodes[1].StructElemNormal; // normalize normals normal_1 /= norm_2(normal_1); normal_2 /= norm_2(normal_2); const double pi = 3.1415926; // compute angle between normals double angle_n1n2 = acos( inner_prod(normal_1,normal_2) ); // rad --> degree angle_n1n2 *= 180 / pi; // if angle between -60º and 120º, take the mean if( (angle_n1n2 > -60) && (angle_n1n2 < 120) ) { // take the mean of the normals N_mean = 0.5 * (normal_1 + normal_2); } else { N_mean = 0.5 * (normal_1 - normal_2); } // Based on N_mean and P_mean compute the distances to that plane for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++) { ElementalDistances[i_TetNode] = PointDistanceToPlane(P_mean, N_mean, i_fluid_Element->GetGeometry()[i_TetNode] ); } } } ///****************************************************************************************************************** ///****************************************************************************************************************** /** * This function calculates the distance of a 3D point to a plane spanned by a 3D triangle * @param Plane base point * @param planeNormal * @param ToPoint The point which distance is required * @return The distance between the point and the plane spanned by the 3D triangle */ double PointDistanceToPlane( Point& planeBasePoint, array_1d<double, 3>& planeNormal, Point& ToPoint) { // calculate vector pointing from a node in the plane (e.g. triangle point 1) to the considered node ToPoint array_1d<double,3> planeToPointVec = ToPoint - planeBasePoint; // projection of node on the plane const double sn = inner_prod(planeToPointVec,planeNormal); const double sd = inner_prod(planeNormal,planeNormal); double DistanceToPlane = sn / sqrt(sd); if( fabs(DistanceToPlane) < epsilon ) DistanceToPlane = 0; return DistanceToPlane; } ///****************************************************************************************************************** ///****************************************************************************************************************** void AssignMinimalNodalDistance() { // loop over all fluid Elements for( ModelPart::ElementIterator i_fluid_Element = mrFluidModelPart.ElementsBegin(); i_fluid_Element != mrFluidModelPart.ElementsEnd(); i_fluid_Element++) { Geometry< Node<3> >& geom = i_fluid_Element->GetGeometry(); const Vector& ElementalDistances = i_fluid_Element->GetValue(ELEMENTAL_DISTANCES); // Assign distances to the single nodes, if a smaller value is found for( unsigned int i_TetNode = 0; i_TetNode < 4; i_TetNode++ ) { double currentNodeDist = geom[i_TetNode].GetSolutionStepValue(DISTANCE); double nodeInElemDist = ElementalDistances[i_TetNode]; if( fabs( nodeInElemDist ) < fabs( currentNodeDist ) ) geom[i_TetNode].GetSolutionStepValue(DISTANCE) = nodeInElemDist; // overwrite nodal distance (which is global) } // loop i_TetNode } // loop i_fluidElement } ///****************************************************************************************************************** ///****************************************************************************************************************** /** * If structure directly passes through the corner point of a tetrahedra (leading to zero distances * in the respective node), then a small distance value (different from zero) will be stored for * that point. This is necessary since the embedded solver cannot handle zero distances. * @param Element current Element which was cut by the structure (flag SPLIT_ELEMENT is set to one) * @param ElementalDistances Elemental distances calculated by the intersection pattern */ void AvoidZeroDistances( ModelPart::ElementsContainerType::iterator& Element, array_1d<double,4>& ElementalDistances) { // Assign a distance limit double dist_limit = 1e-5; // bool distChangedToLimit = false; //variable to indicate that a distance value < tolerance is set to a limit distance = tolerance // // for(unsigned int i_node = 0; i_node < 4; i_node++) // { // if(fabs(ElementalDistances[i_node]) < dist_limit) // { // ElementalDistances[i_node] = dist_limit; // distChangedToLimit = true; // } // } // // // Check, if this approach changes the split-flag (might be, that Element is not cut anymore if node with zero distance gets a positive limit distance value // unsigned int numberNodesPositiveDistance = 0; // for(unsigned int i_node = 0; i_node < 4; i_node++) // { // if((ElementalDistances[i_node]) > 0) // numberNodesPositiveDistance++; // } for(unsigned int i_node = 0; i_node < 4; i_node++) { double & di = ElementalDistances[i_node]; if(fabs(di) < dist_limit) { if(di >= 0) di = dist_limit; else di = -dist_limit; } } // Element is not set // if(numberNodesPositiveDistance == 4 && distChangedToLimit == true) // Element->GetValue(SPLIT_ELEMENT) = false; } ///****************************************************************************************************************** ///****************************************************************************************************************** void GenerateSkinModelPart( ModelPart& mrNewSkinModelPart ) { unsigned int id_node = mrFluidModelPart.NumberOfNodes() + 1; unsigned int id_condition = mrFluidModelPart.NumberOfConditions() + 1; mrNewSkinModelPart.Nodes().reserve(mrFluidModelPart.Nodes().size()); mrNewSkinModelPart.Conditions().reserve(mrFluidModelPart.Elements().size()); for(ModelPart::ElementIterator i_fluid_element = mrFluidModelPart.ElementsBegin(); i_fluid_element != mrFluidModelPart.ElementsEnd(); i_fluid_element++) { bool is_split = i_fluid_element->Is(TO_SPLIT); if(is_split == true) { const Vector& distances = i_fluid_element->GetValue(ELEMENTAL_DISTANCES); Geometry< Node<3> >& geom = i_fluid_element->GetGeometry(); // generate the points on the edges at the zero of the distance function std::vector< Point > edge_points; edge_points.reserve(4); // loop over all 6 edges of the tetrahedra for(unsigned int i=0; i<3; i++) { for(unsigned int j=i+1; j<4; j++) // go through the edges 01, 02, 03, 12, 13, 23 { double di = distances[i]; double dj = distances[j]; if(di*dj < 0) //edge is cut { // generate point on edge by linear interpolation double Ni = fabs(dj) / ( fabs(di) + fabs(dj) ); double Nj = 1.0 - Ni; Point edge_point(Ni * geom[i] + Nj * geom[j]); edge_points.push_back(edge_point); } } } // three intersection nodes if(edge_points.size() == 3) { // ######## ADDING NEW NODE ######### Node < 3 >::Pointer pnode1 = mrNewSkinModelPart.CreateNewNode(id_node++,edge_points[0].X(),edge_points[0].Y(),edge_points[0].Z()); Node < 3 >::Pointer pnode2 = mrNewSkinModelPart.CreateNewNode(id_node++,edge_points[1].X(),edge_points[1].Y(),edge_points[1].Z()); Node < 3 >::Pointer pnode3 = mrNewSkinModelPart.CreateNewNode(id_node++,edge_points[2].X(),edge_points[2].Y(),edge_points[2].Z()); // ######## ADDING NEW CONDITION ######### //form a triangle Triangle3D3< Node<3> > triangle(pnode1, pnode2, pnode3); Condition const& rReferenceCondition = KratosComponents<Condition>::Get("Condition3D"); Properties::Pointer properties = mrNewSkinModelPart.rProperties()(0); Condition::Pointer p_condition = rReferenceCondition.Create(id_condition++, triangle, properties); mrNewSkinModelPart.Conditions().push_back(p_condition); } // four intersection nodes if(edge_points.size() == 4) { //form a quadrilatera with the 4 cut nodes array_1d<double,3> x21 = edge_points[1] - edge_points[0]; array_1d<double,3> x31 = edge_points[2] - edge_points[0]; array_1d<double,3> x41 = edge_points[3] - edge_points[0]; //define a vector oriented as x21 array_1d<double,3> v1 = x21 / norm_2(x21); BoundedMatrix<double,4,3> DN_DX; array_1d<double,4> msN; double Area; GeometryUtils::CalculateGeometryData( geom, DN_DX, msN, Area ); array_1d<double,3> n = prod(trans(DN_DX),distances); n /= norm_2(n); array_1d<double,3> v2; MathUtils<double>::CrossProduct(v2,v1,n); // v2 = v1 x n array_1d<double,3> angles; angles[0] = 0.0; //angle between x21 and v1 angles[1] = atan2( inner_prod(x31,v2), inner_prod(x31,v1) ); //angle between x31 and v1 angles[2] = atan2( inner_prod(x41,v2), inner_prod(x41,v1) ); //angle between x31 and v1 double max_angle = 0.0; double min_angle = 0.0; unsigned int min_pos = 1; unsigned int max_pos = 1; for(unsigned int i=1; i<3; i++) { if(angles[i] < min_angle) { min_pos = i+1; //this is the local index of the edge point which forms the minimal angle min_angle = angles[i]; } else if(angles[i] > max_angle) { max_pos = i+1; //this is the local index of the edge point which forms the maximal angle max_angle = angles[i]; } } //find the pos of the center node unsigned int center_pos = 0; for(unsigned int i=1; i<4; i++) { if((i!= min_pos) && (i!=max_pos)) { center_pos = i; } } // ######## ADDING NEW NODE ######### Node < 3 >::Pointer pnode1 = mrNewSkinModelPart.CreateNewNode(id_node++,edge_points[0].X(),edge_points[0].Y(),edge_points[0].Z()); Node < 3 >::Pointer pnode2 = mrNewSkinModelPart.CreateNewNode(id_node++,edge_points[min_pos].X(),edge_points[min_pos].Y(),edge_points[min_pos].Z()); Node < 3 >::Pointer pnode3 = mrNewSkinModelPart.CreateNewNode(id_node++,edge_points[center_pos].X(),edge_points[center_pos].Y(),edge_points[center_pos].Z()); Node < 3 >::Pointer pnode4 = mrNewSkinModelPart.CreateNewNode(id_node++,edge_points[max_pos].X(),edge_points[max_pos].Y(),edge_points[max_pos].Z()); // ######## ADDING NEW CONDITION ######### //form two triangles Triangle3D3< Node<3> > triangle1(pnode1, pnode2, pnode3); Triangle3D3< Node<3> > triangle2(pnode1, pnode3, pnode4); Condition const& rReferenceCondition = KratosComponents<Condition>::Get("Condition3D"); Properties::Pointer properties = mrNewSkinModelPart.rProperties()(0); Condition::Pointer p_condition1 = rReferenceCondition.Create(id_condition++, triangle1, properties); Condition::Pointer p_condition2 = rReferenceCondition.Create(id_condition++, triangle2, properties); mrNewSkinModelPart.Conditions().push_back(p_condition1); mrNewSkinModelPart.Conditions().push_back(p_condition2); } } } } ///****************************************************************************************************************** ///****************************************************************************************************************** void GenerateOctree() { Timer::Start("Generating Octree"); //std::cout << "Generating the Octree..." << std::endl; auto temp_octree = Kratos::make_shared<OctreeType>(); //OctreeType::Pointer temp_octree = OctreeType::Pointer(new OctreeType() ); mpOctree.swap(temp_octree); double low[3]; double high[3]; for (int i = 0 ; i < 3; i++) { low[i] = high[i] = mrFluidModelPart.NodesBegin()->Coordinates()[i]; } // loop over all nodes in the bounding box for(ModelPart::NodeIterator i_node = mrFluidModelPart.NodesBegin(); i_node != mrFluidModelPart.NodesEnd(); i_node++) { const array_1d<double,3>& r_coordinates = i_node->Coordinates(); for (int i = 0 ; i < 3; i++) { low[i] = r_coordinates[i] < low[i] ? r_coordinates[i] : low[i]; high[i] = r_coordinates[i] > high[i] ? r_coordinates[i] : high[i]; } } // loop over all skin nodes for(ModelPart::NodeIterator i_node = mrSkinModelPart.NodesBegin(); i_node != mrSkinModelPart.NodesEnd(); i_node++) { const array_1d<double,3>& r_coordinates = i_node->Coordinates(); for (int i = 0 ; i < 3; i++) { low[i] = r_coordinates[i] < low[i] ? r_coordinates[i] : low[i]; high[i] = r_coordinates[i] > high[i] ? r_coordinates[i] : high[i]; } } mpOctree->SetBoundingBox(low,high); //mpOctree->RefineWithUniformSize(0.0625); // loop over all structure nodes for(ModelPart::NodeIterator i_node = mrSkinModelPart.NodesBegin(); i_node != mrSkinModelPart.NodesEnd(); i_node++) { double temp_point[3]; temp_point[0] = i_node->X(); temp_point[1] = i_node->Y(); temp_point[2] = i_node->Z(); mpOctree->Insert(temp_point); } //mpOctree->Constrain2To1(); // To be removed. Pooyan. // loop over all structure elements for(ModelPart::ElementIterator i_element = mrSkinModelPart.ElementsBegin(); i_element != mrSkinModelPart.ElementsEnd(); i_element++) { mpOctree->Insert(*(i_element).base()); } Timer::Stop("Generating Octree"); // KRATOS_WATCH(mpOctree); // std::cout << "######## WRITING OCTREE MESH #########" << std::endl; // std::ofstream myfile; // myfile.open ("octree.post.msh"); // mpOctree.PrintGiDMesh(myfile); // myfile.close(); //std::cout << "Generating the Octree finished" << std::endl; } ///****************************************************************************************************************** ///****************************************************************************************************************** void GenerateNodes() { Timer::Start("Generating Nodes"); std::vector<OctreeType::cell_type*> all_leaves; mpOctree->GetAllLeavesVector(all_leaves); int leaves_size = all_leaves.size(); #pragma omp parallel for for (int i = 0; i < leaves_size; i++) { *(all_leaves[i]->pGetDataPointer()) = ConfigurationType::AllocateData(); } std::size_t last_id = mrBodyModelPart.NumberOfNodes() + 1; for (std::size_t i = 0; i < all_leaves.size(); i++) { CellType* cell = all_leaves[i]; GenerateCellNode(cell, last_id); } Timer::Stop("Generating Nodes"); } ///****************************************************************************************************************** ///****************************************************************************************************************** void GenerateCellNode(CellType* pCell, std::size_t& LastId) { for (int i_pos=0; i_pos < 8; i_pos++) // position 8 is for center { DistanceSpatialContainersConfigure::cell_node_data_type* p_node = (*(pCell->pGetData()))[i_pos]; if(p_node == 0) { (*(pCell->pGetData()))[i_pos] = new DistanceSpatialContainersConfigure::cell_node_data_type; (*(pCell->pGetData()))[i_pos]->Id() = LastId++; mOctreeNodes.push_back((*(pCell->pGetData()))[i_pos]); SetNodeInNeighbours(pCell,i_pos,(*(pCell->pGetData()))[i_pos]); } } } ///****************************************************************************************************************** ///****************************************************************************************************************** void SetNodeInNeighbours(CellType* pCell, int Position, CellNodeDataType* pNode) { CellType::key_type point_key[3]; pCell->GetKey(Position, point_key); for (std::size_t i_direction = 0; i_direction < 8; i_direction++) { CellType::key_type neighbour_key[3]; if (pCell->GetNeighbourKey(Position, i_direction, neighbour_key)) { CellType* neighbour_cell = mpOctree->pGetCell(neighbour_key); if (!neighbour_cell || (neighbour_cell == pCell)) continue; std::size_t position = neighbour_cell->GetLocalPosition(point_key); if((*neighbour_cell->pGetData())[position]) { std::cout << "ERROR!! Bad Position calculated!!!!!!!!!!! position :" << position << std::endl; continue; } (*neighbour_cell->pGetData())[position] = pNode; } } } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalculateDistance2() { Timer::Start("Calculate Distances2"); ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray(); int nodes_size = nodes.size(); // // first of all we reset the node distance to 1.00 which is the maximum distnace in our normalized space. //#pragma omp parallel for firstprivate(nodes_size) // for(int i = 0 ; i < nodes_size ; i++) // nodes[i]->GetSolutionStepValue(DISTANCE) = 1.00; std::vector<CellType*> leaves; mpOctree->GetAllLeavesVector(leaves); //int leaves_size = leaves.size(); // for(int i = 0 ; i < leaves_size ; i++) // CalculateNotEmptyLeavesDistance(leaves[i]); #pragma omp parallel for firstprivate(nodes_size) for(int i = 0 ; i < nodes_size ; i++) { CalculateNodeDistance(*(nodes[i])); } Timer::Stop("Calculate Distances2"); } ///****************************************************************************************************************** ///****************************************************************************************************************** // void CalculateDistance3() // { // Timer::Start("Calculate Distances2"); // ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray(); // int nodes_size = nodes.size(); //// // first of all we reset the node distance to 1.00 which is the maximum distnace in our normalized space. //#pragma omp parallel for firstprivate(nodes_size) // for(int i = 0 ; i < nodes_size ; i++) // nodes[i]->GetSolutionStepValue(DISTANCE) = 1.00; // std::vector<CellType*> leaves; // mpOctree->GetAllLeavesVector(leaves); // int leaves_size = leaves.size(); // for(int i = 0 ; i < leaves_size ; i++) // CalculateNotEmptyLeavesDistance(leaves[i]); //#pragma omp parallel for firstprivate(nodes_size) // for(int i = 0 ; i < nodes_size ; i++) // { // CalculateNodeDistance(*(nodes[i])); // } // Timer::Stop("Calculate Distances2"); // } // void CalculateDistance4() // { // Timer::Start("Calculate Distances3"); // ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray(); // int nodes_size = nodes.size(); // std::vector<CellType*> leaves; // mpOctree->GetAllLeavesVector(leaves); // int leaves_size = leaves.size(); //#pragma omp parallel for firstprivate(nodes_size) // for(int i = 0 ; i < nodes_size ; i++) // { // CalculateNodeDistanceFromCell(*(nodes[i])); // } // Timer::Stop("Calculate Distances3"); // } void CalculateDistance() { Timer::Start("Calculate Distances"); DistanceSpatialContainersConfigure::data_type& nodes = mOctreeNodes; int nodes_size = nodes.size(); // first of all we reste the node distance to 1.00 which is the maximum distnace in our normalized space. #pragma omp parallel for firstprivate(nodes_size) for(int i = 0 ; i < nodes_size ; i++) nodes[i]->Distance() = 1.00; std::vector<CellType*> leaves; mpOctree->GetAllLeavesVector(leaves); int leaves_size = leaves.size(); for(int i = 0 ; i < leaves_size ; i++) CalculateNotEmptyLeavesDistance(leaves[i]); for(int i_direction = 0 ; i_direction < 1 ; i_direction++) { //#pragma omp parallel for firstprivate(nodes_size) for(int i = 0 ; i < nodes_size ; i++) { if(nodes[i]->X() < 1.00 && nodes[i]->Y() < 1.00 && nodes[i]->Z() < 1.00) // if((*nodes[i])[i_direction] == 0.00) CalculateDistance(*(nodes[i]), i_direction); } } Timer::Stop("Calculate Distances"); } void CalculateDistance(CellNodeDataType& rNode, int i_direction) { double coords[3] = {rNode.X(), rNode.Y(), rNode.Z()}; // KRATOS_WATCH_3(coords); //This function must color the positions in space defined by 'coords'. //coords is of dimension (3) normalized in (0,1)^3 space typedef Element::GeometryType triangle_type; typedef std::vector<std::pair<double, triangle_type*> > intersections_container_type; intersections_container_type intersections; DistanceSpatialContainersConfigure::data_type nodes_array; const double epsilon = 1e-12; double distance = 1.0; // Creating the ray double ray[3] = {coords[0], coords[1], coords[2]}; mpOctree->NormalizeCoordinates(ray); ray[i_direction] = 0; // starting from the lower extreme // KRATOS_WATCH_3(ray) GetIntersectionsAndNodes(ray, i_direction, intersections, nodes_array); // KRATOS_WATCH(nodes_array.size()) for (std::size_t i_node = 0; i_node < nodes_array.size() ; i_node++) { double coord = (*nodes_array[i_node])[i_direction]; // KRATOS_WATCH(intersections.size()); int ray_color= 1; std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin(); while (i_intersection != intersections.end()) { double d = coord - i_intersection->first; if (d > epsilon) { ray_color = -ray_color; distance = d; } else if (d > -epsilon) {//interface distance = 0.00; break; } else { if(distance > -d) distance = -d; break; } i_intersection++; } distance *= ray_color; double& node_distance = nodes_array[i_node]->Distance(); if(fabs(distance) < fabs(node_distance)) node_distance = distance; else if (distance*node_distance < 0.00) // assigning the correct sign node_distance = -node_distance; } } void CalculateNotEmptyLeavesDistance(CellType* pCell) { //typedef Element::GeometryType triangle_type; typedef OctreeType::cell_type::object_container_type object_container_type; object_container_type* objects = (pCell->pGetObjects()); // There are no intersection in empty cells if (objects->empty()) return; for (int i_pos=0; i_pos < 8; i_pos++) // position 8 is for center { double distance = 1.00; // maximum distance is 1.00 for(object_container_type::iterator i_object = objects->begin(); i_object != objects->end(); i_object++) { CellType::key_type keys[3]; pCell->GetKey(i_pos,keys); double cell_point[3]; mpOctree->CalculateCoordinates(keys,cell_point); double d = GeometryUtils::PointDistanceToTriangle3D((*i_object)->GetGeometry()[0], (*i_object)->GetGeometry()[1], (*i_object)->GetGeometry()[2], Point(cell_point[0], cell_point[1], cell_point[2])); if(d < distance) distance = d; } double& node_distance = (*(pCell->pGetData()))[i_pos]->Distance(); if(distance < node_distance) node_distance = distance; } } void CalculateNodeDistance(Node<3>& rNode) { double coord[3] = {rNode.X(), rNode.Y(), rNode.Z()}; double distance = DistancePositionInSpace(coord); double& node_distance = rNode.GetSolutionStepValue(DISTANCE); //const double epsilon = 1.00e-12; //if(fabs(node_distance) > fabs(distance)) // node_distance = distance; /*else*/ if (distance*node_distance < 0.00) // assigning the correct sign node_distance = -node_distance; } // void CalculateNodeDistanceFromCell(Node<3>& rNode) // { // OctreeType::key_type node_key[3] = {octree->CalcKeyNormalized(rNode.X()), octree->CalcKeyNormalized(rNode.Y()), octree->CalcKeyNormalized(rNode.Z())}; // OctreeType::cell_type* pcell = octree->pGetCell(node_key); // object_container_type* objects = (pCell->pGetObjects()); // // We interpolate the cell distances for the node in empty cells // if (objects->empty()) // { // } // double distance = DistancePositionInSpace(coord); // double& node_distance = rNode.GetSolutionStepValue(DISTANCE); // //const double epsilon = 1.00e-12; // if(fabs(node_distance) > fabs(distance)) // node_distance = distance; // else if (distance*node_distance < 0.00) // assigning the correct sign // node_distance = -node_distance; // } double DistancePositionInSpace(double* coords) { //This function must color the positions in space defined by 'coords'. //coords is of dimension (3) normalized in (0,1)^3 space typedef Element::GeometryType triangle_type; typedef std::vector<std::pair<double, triangle_type*> > intersections_container_type; intersections_container_type intersections; const int dimension = 3; const double epsilon = 1e-12; double distances[3] = {1.0, 1.0, 1.0}; for (int i_direction = 0; i_direction < dimension; i_direction++) { // Creating the ray double ray[3] = {coords[0], coords[1], coords[2]}; mpOctree->NormalizeCoordinates(ray); ray[i_direction] = 0; // starting from the lower extreme GetIntersections(ray, i_direction, intersections); // if(intersections.size() == 1) // KRATOS_WATCH_3(ray) // KRATOS_WATCH(intersections.size()); int ray_color= 1; std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin(); while (i_intersection != intersections.end()) { double d = coords[i_direction] - i_intersection->first; if (d > epsilon) { ray_color = -ray_color; distances[i_direction] = d; // if(distances[i_direction] > d) // I think this is redundunt. Pooyan. // { // if(ray_color > 0.00) // distances[i_direction] = d; // else // distances[i_direction] = -d; // } } else if (d > -epsilon) {//interface distances[i_direction] = 0.00; break; } else { if(distances[i_direction] > -d) distances[i_direction] = -d; break; } i_intersection++; } distances[i_direction] *= ray_color; } // if(distances[0]*distances[1] < 0.00 || distances[2]*distances[1] < 0.00) // KRATOS_WATCH_3(distances); //#ifdef _DEBUG // std::cout << "colors : " << colors[0] << ", " << colors[1] << ", " << colors[2] << std::endl; //#endif double distance = (fabs(distances[0]) > fabs(distances[1])) ? distances[1] : distances[0]; distance = (fabs(distance) > fabs(distances[2])) ? distances[2] : distance; return distance; } void GetIntersectionsAndNodes(double* ray, int direction, std::vector<std::pair<double,Element::GeometryType*> >& intersections, DistanceSpatialContainersConfigure::data_type& rNodesArray) { //This function passes the ray through the model and gives the hit point to all objects in its way //ray is of dimension (3) normalized in (0,1)^3 space // direction can be 0,1,2 which are x,y and z respectively const double epsilon = 1.00e-12; // first clearing the intersections points vector intersections.clear(); //OctreeType* octree = &mOctree; OctreeType* octree = mpOctree.get(); OctreeType::key_type ray_key[3] = {octree->CalcKeyNormalized(ray[0]), octree->CalcKeyNormalized(ray[1]), octree->CalcKeyNormalized(ray[2])}; OctreeType::key_type cell_key[3]; // getting the entrance cell from lower extreme ray_key[direction] = 0; OctreeType::cell_type* cell = octree->pGetCell(ray_key); while (cell) { std::size_t position = cell->GetLocalPosition(ray_key); // Is this the local position!?!?!?! OctreeType::key_type node_key[3]; cell->GetKey(position, node_key); if((node_key[0] == ray_key[0]) && (node_key[1] == ray_key[1]) && (node_key[2] == ray_key[2])) { if(cell->pGetData()) { if(cell->pGetData()->size() > position) { CellNodeDataType* p_node = (*cell->pGetData())[position]; if(p_node) { //KRATOS_WATCH(p_node->Id()) rNodesArray.push_back(p_node); } } else KRATOS_WATCH(cell->pGetData()->size()) } } // std::cout << "."; GetCellIntersections(cell, ray, ray_key, direction, intersections); // Add the cell's middle node if existed // cell->GetKey(8, cell_key); // 8 is the central position // ray_key[direction]=cell_key[direction]; // positioning the ray in the middle of cell in its direction // position = cell->GetLocalPosition(ray_key); // if(position < 27) // principal nodes // { // if(cell->pGetData()) // { // if(cell->pGetData()->size() > position) // { // Node<3>* p_node = (*cell->pGetData())[position]; // if(p_node) // { // //KRATOS_WATCH(p_node->Id()) // rNodesArray.push_back(p_node); // } // } // else // KRATOS_WATCH(cell->pGetData()->size()) // } // } // else // { // KRATOS_WATCH(position); // KRATOS_WATCH(*cell); // } // go to the next cell if (cell->GetNeighbourKey(1 + direction * 2, cell_key)) { ray_key[direction] = cell_key[direction]; cell = octree->pGetCell(ray_key); ray_key[direction] -= 1 ;//the key returned by GetNeighbourKey is inside the cell (minkey +1), to ensure that the corresponding //cell get in pGetCell is the right one. //#ifdef _DEBUG // Octree_Pooyan::key_type min_key[3]; // cell->GetMinKey(min_key[0],min_key[1],min_key[2]); // Octree_Pooyan::key_type tmp; // tmp= min_key[direction]; // assert(ray_key[direction]==tmp); //#endif } else cell = NULL; } // KRATOS_WATCH(rNodesArray.size()); // now eliminating the repeated objects if (!intersections.empty()) { //sort std::sort(intersections.begin(), intersections.end()); // unique std::vector<std::pair<double, Element::GeometryType*> >::iterator i_begin = intersections.begin(); std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin(); while (++i_begin != intersections.end()) { // considering the very near points as the same points if (fabs(i_begin->first - i_intersection->first) > epsilon) // if the hit points are far enough they are not the same *(++i_intersection) = *i_begin; } intersections.resize((++i_intersection) - intersections.begin()); } } void GetIntersections(double* ray, int direction, std::vector<std::pair<double,Element::GeometryType*> >& intersections) { //This function passes the ray through the model and gives the hit point to all objects in its way //ray is of dimension (3) normalized in (0,1)^3 space // direction can be 0,1,2 which are x,y and z respectively const double epsilon = 1.00e-12; // first clearing the intersections points vector intersections.clear(); //OctreeType* octree = &mOctree; OctreeType* octree = mpOctree.get(); OctreeType::key_type ray_key[3] = {octree->CalcKeyNormalized(ray[0]), octree->CalcKeyNormalized(ray[1]), octree->CalcKeyNormalized(ray[2])}; OctreeType::key_type cell_key[3]; // getting the entrance cell from lower extreme OctreeType::cell_type* cell = octree->pGetCell(ray_key); while (cell) { // std::cout << "."; GetCellIntersections(cell, ray, ray_key, direction, intersections); // go to the next cell if (cell->GetNeighbourKey(1 + direction * 2, cell_key)) { ray_key[direction] = cell_key[direction]; cell = octree->pGetCell(ray_key); ray_key[direction] -= 1 ;//the key returned by GetNeighbourKey is inside the cell (minkey +1), to ensure that the corresponding //cell get in pGetCell is the right one. //#ifdef _DEBUG // Octree_Pooyan::key_type min_key[3]; // cell->GetMinKey(min_key[0],min_key[1],min_key[2]); // Octree_Pooyan::key_type tmp; // tmp= min_key[direction]; // assert(ray_key[direction]==tmp); //#endif } else cell = NULL; } // now eliminating the repeated objects if (!intersections.empty()) { //sort std::sort(intersections.begin(), intersections.end()); // unique std::vector<std::pair<double, Element::GeometryType*> >::iterator i_begin = intersections.begin(); std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin(); while (++i_begin != intersections.end()) { // considering the very near points as the same points if (fabs(i_begin->first - i_intersection->first) > epsilon) // if the hit points are far enough they are not the same *(++i_intersection) = *i_begin; } intersections.resize((++i_intersection) - intersections.begin()); } } int GetCellIntersections(OctreeType::cell_type* cell, double* ray, OctreeType::key_type* ray_key, int direction, std::vector<std::pair<double, Element::GeometryType*> >& intersections) { //This function passes the ray through the cell and gives the hit point to all objects in its way //ray is of dimension (3) normalized in (0,1)^3 space // direction can be 0,1,2 which are x,y and z respectively //typedef Element::GeometryType triangle_type; typedef OctreeType::cell_type::object_container_type object_container_type; object_container_type* objects = (cell->pGetObjects()); // There are no intersection in empty cells if (objects->empty()) return 0; // std::cout << "X"; // calculating the two extreme of the ray segment inside the cell double ray_point1[3] = {ray[0], ray[1], ray[2]}; double ray_point2[3] = {ray[0], ray[1], ray[2]}; double normalized_coordinate; mpOctree->CalculateCoordinateNormalized(ray_key[direction], normalized_coordinate); ray_point1[direction] = normalized_coordinate; ray_point2[direction] = ray_point1[direction] + mpOctree->CalcSizeNormalized(cell); mpOctree->ScaleBackToOriginalCoordinate(ray_point1); mpOctree->ScaleBackToOriginalCoordinate(ray_point2); for (object_container_type::iterator i_object = objects->begin(); i_object != objects->end(); i_object++) { double intersection[3]={0.00,0.00,0.00}; int is_intersected = IntersectionTriangleSegment((*i_object)->GetGeometry(), ray_point1, ray_point2, intersection); // This intersection has to be optimized for axis aligned rays if (is_intersected == 1) // There is an intersection but not coplanar intersections.push_back(std::pair<double, Element::GeometryType*>(intersection[direction], &((*i_object)->GetGeometry()))); //else if(is_intersected == 2) // coplanar case } return 0; } int IntersectionTriangleSegment(Element::GeometryType& rGeometry, double* RayPoint1, double* RayPoint2, double* IntersectionPoint) { // This is the adaption of the implemnetation provided in: // http://www.softsurfer.com/Archive/algorithm_0105/algorithm_0105.htm#intersect_RayTriangle() const double epsilon = 1.00e-12; array_1d<double,3> u, v, n; // triangle vectors array_1d<double,3> dir, w0, w; // ray vectors double r, a, b; // params to calc ray-plane intersect // get triangle edge vectors and plane normal u = rGeometry[1] - rGeometry[0]; v = rGeometry[2] - rGeometry[0]; MathUtils<double>::CrossProduct(n, u, v); // cross product if (norm_2(n) == 0) // triangle is degenerate return -1; // do not deal with this case double triangle_origin_distance = -inner_prod(n, rGeometry[0]); Point ray_point_1, ray_point_2; for(int i = 0 ; i < 3 ; i++) { dir[i] = RayPoint2[i] - RayPoint1[i]; // ray direction vector w0[i] = RayPoint1[i] - rGeometry[0][i]; ray_point_1[i] = RayPoint1[i]; ray_point_2[i] = RayPoint2[i]; } double sign_distance_1 = inner_prod(n, ray_point_1) + triangle_origin_distance; double sign_distance_2 = inner_prod(n, ray_point_2) + triangle_origin_distance; if (sign_distance_1*sign_distance_2 > epsilon) // segment line point on the same side of plane return 0; a = -inner_prod(n,w0); b = inner_prod(n,dir); if (fabs(b) < epsilon) { // ray is parallel to triangle plane if (a == 0) // ray lies in triangle plane return 2; else return 0; // ray disjoint from plane } // get intersect point of ray with triangle plane r = a / b; if (r < 0.0) // ray goes away from triangle return 0; // => no intersect // for a segment, also test if (r > 1.0) => no intersect for(int i = 0 ; i < 3 ; i++) IntersectionPoint[i] = RayPoint1[i] + r * dir[i]; // intersect point of ray and plane // is I inside T? double uu, uv, vv, wu, wv, D; uu = inner_prod(u,u); uv = inner_prod(u,v); vv = inner_prod(v,v); for(int i = 0 ; i < 3 ; i++) w[i] = IntersectionPoint[i] - rGeometry[0][i]; wu = inner_prod(w,u); wv = inner_prod(w,v); D = uv * uv - uu * vv; // get and test parametric coords double s, t; s = (uv * wv - vv * wu) / D; if (s < 0.0 - epsilon || s > 1.0 + epsilon) // I is outside T return 0; t = (uv * wu - uu * wv) / D; if (t < 0.0 - epsilon || (s + t) > 1.0 + epsilon) // I is outside T return 0; return 1; // I is in T } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "CalculateSignedDistanceTo3DSkinProcess"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << "CalculateSignedDistanceTo3DSkinProcess"; } /// Print object's data. void PrintData(std::ostream& rOStream) const override { } void PrintGiDMesh(std::ostream & rOStream) const { std::vector<CellType*> leaves; mpOctree->GetAllLeavesVector(leaves); std::cout << "writing " << leaves.size() << " leaves" << std::endl; rOStream << "MESH \"leaves\" dimension 3 ElemType Hexahedra Nnode 8" << std::endl; rOStream << "# color 96 96 96" << std::endl; rOStream << "Coordinates" << std::endl; rOStream << "# node number coordinate_x coordinate_y coordinate_z " << std::endl; for(DistanceSpatialContainersConfigure::data_type::const_iterator i_node = mOctreeNodes.begin() ; i_node != mOctreeNodes.end() ; i_node++) { rOStream << (*i_node)->Id() << " " << (*i_node)->X() << " " << (*i_node)->Y() << " " << (*i_node)->Z() << std::endl; //mpOctree->Insert(temp_point); } std::cout << "Nodes written..." << std::endl; rOStream << "end coordinates" << std::endl; rOStream << "Elements" << std::endl; rOStream << "# Element node_1 node_2 node_3 material_number" << std::endl; for (std::size_t i = 0; i < leaves.size(); i++) { if ((leaves[i]->pGetData())) { DistanceSpatialContainersConfigure::data_type& nodes = (*(leaves[i]->pGetData())); rOStream << i + 1; for(int j = 0 ; j < 8 ; j++) rOStream << " " << nodes[j]->Id(); rOStream << std::endl; } } rOStream << "end Elements" << std::endl; } void PrintGiDResults(std::ostream & rOStream) const { std::vector<CellType*> leaves; mpOctree->GetAllLeavesVector(leaves); rOStream << "GiD Post Results File 1.0" << std::endl << std::endl; rOStream << "Result \"Distance\" \"Kratos\" 1 Scalar OnNodes" << std::endl; rOStream << "Values" << std::endl; for(DistanceSpatialContainersConfigure::data_type::const_iterator i_node = mOctreeNodes.begin() ; i_node != mOctreeNodes.end() ; i_node++) { rOStream << (*i_node)->Id() << " " << (*i_node)->Distance() << std::endl; } rOStream << "End Values" << std::endl; } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ModelPart& mrSkinModelPart; ModelPart& mrBodyModelPart; ModelPart& mrFluidModelPart; DistanceSpatialContainersConfigure::data_type mOctreeNodes; Kratos::shared_ptr<OctreeType> mpOctree; static const double epsilon; /** * @} */ /** * calculates the eigenvectors and eigenvalues of given symmetric matrix A. * The eigenvectors and eigenvalues are calculated using the iterative * Gauss-Seidel-method * @param A the given symmetric matrix the eigenvectors are to be calculated. * :WARNING: Matrix A will be overwritten and has to be symmetric * @param V the result matrix (will be overwritten with the eigenvectors) * @param zero_tolerance the largest value considered to be zero */ static inline void EigenVectors(const Matrix& A, Matrix& vectors, Vector& lambda, double zero_tolerance =1e-9, int max_iterations = 10) { Matrix Help= A; for(int i=0; i<3; i++) for(int j=0; j<3; j++) Help(i,j)= Help(i,j); vectors.resize(Help.size1(),Help.size2(),false); lambda.resize(Help.size1(),false); Matrix HelpDummy(Help.size1(),Help.size2()); bool is_converged = false; Matrix unity=ZeroMatrix(Help.size1(),Help.size2()); for(unsigned int i=0; i< Help.size1(); i++) unity(i,i)= 1.0; Matrix V= unity; Matrix VDummy(Help.size1(),Help.size2()); Matrix Rotation(Help.size1(),Help.size2()); for(int iterations=0; iterations<max_iterations; iterations++) { is_converged= true; double a= 0.0; unsigned int index1= 0; unsigned int index2= 1; for(unsigned int i=0; i< Help.size1(); i++) { for(unsigned int j=(i+1); j< Help.size2(); j++) { if((fabs(Help(i,j)) > a ) && (fabs(Help(i,j)) > zero_tolerance)) { a= fabs(Help(i,j)); index1= i; index2= j; is_converged= false; } } } // KRATOS_WATCH(Help); if(is_converged) break; //Calculation of Rotationangle double gamma= (Help(index2,index2)-Help(index1,index1))/(2*Help(index1,index2)); double u=1.0; if(fabs(gamma) > zero_tolerance && fabs(gamma)< (1/zero_tolerance)) { u= gamma/fabs(gamma)*1.0/(fabs(gamma)+sqrt(1.0+gamma*gamma)); } else { if (fabs(gamma)>= (1.0/zero_tolerance)) u= 0.5/gamma; } double c= 1.0/(sqrt(1.0+u*u)); double s= c*u; double teta= s/(1.0+c); //Ratotion of the Matrix HelpDummy= Help; HelpDummy(index2,index2)= Help(index2,index2)+u*Help(index1,index2); HelpDummy(index1,index1)= Help(index1,index1)-u*Help(index1,index2); HelpDummy(index1,index2)= 0.0; HelpDummy(index2,index1)= 0.0; for(unsigned int i=0; i<Help.size1(); i++) { if((i!= index1) && (i!= index2)) { HelpDummy(index2,i)=Help(index2,i)+s*(Help(index1,i)- teta*Help(index2,i)); HelpDummy(i,index2)=Help(index2,i)+s*(Help(index1,i)- teta*Help(index2,i)); HelpDummy(index1,i)=Help(index1,i)-s*(Help(index2,i)+ teta*Help(index1,i)); HelpDummy(i,index1)=Help(index1,i)-s*(Help(index2,i)+ teta*Help(index1,i)); } } Help= HelpDummy; //Calculation of the eigenvectors V Rotation =unity; Rotation(index2,index1)=-s; Rotation(index1,index2)=s; Rotation(index1,index1)=c; Rotation(index2,index2)=c; // Help=ZeroMatrix(A.size1(),A.size1()); VDummy = ZeroMatrix(Help.size1(), Help.size2()); for(unsigned int i=0; i< Help.size1(); i++) { for(unsigned int j=0; j< Help.size1(); j++) { for(unsigned int k=0; k< Help.size1(); k++) { VDummy(i,j) += V(i,k)*Rotation(k,j); } } } V= VDummy; } if(!(is_converged)) { std::cout<<"########################################################"<<std::endl; std::cout<<"Max_Iterations exceed in Jacobi-Seidel-Iteration (eigenvectors)"<<std::endl; std::cout<<"########################################################"<<std::endl; } for(unsigned int i=0; i< Help.size1(); i++) { for(unsigned int j=0; j< Help.size1(); j++) { vectors(i,j)= V(j,i); } } for(unsigned int i=0; i<Help.size1(); i++) lambda(i)= Help(i,i); return; } inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, DenseVector<unsigned int>& partitions) { partitions.resize(number_of_threads + 1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for (unsigned int i = 1; i < number_of_threads; i++) partitions[i] = partitions[i - 1] + partition_size; } ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. CalculateSignedDistanceTo3DSkinProcess& operator=(CalculateSignedDistanceTo3DSkinProcess const& rOther); /// Copy constructor. //CalculateSignedDistanceTo3DSkinProcess(CalculateSignedDistanceTo3DSkinProcess const& rOther); ///@} }; // Class CalculateSignedDistanceTo3DSkinProcess ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >> (std::istream& rIStream, CalculateSignedDistanceTo3DSkinProcess& rThis); /// output stream function inline std::ostream& operator << (std::ostream& rOStream, const CalculateSignedDistanceTo3DSkinProcess& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} const double CalculateSignedDistanceTo3DSkinProcess::epsilon = 1e-18; } // namespace Kratos. #endif // KRATOS_CALCULATE_DISTANCE_PROCESS_H_INCLUDED defined
remarks_parallel_in_target_state_machine.c
// RUN: %clang_cc1 -verify=host -Rpass=openmp -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -verify -Rpass=openmp -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out // RUN: %clang_cc1 -fexperimental-new-pass-manager -verify -Rpass=openmp -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out // host-no-diagnostics void bar(void) { // expected-remark {{[OMP100] Potentially unknown OpenMP target region caller}} #pragma omp parallel // #1 \ // expected-remark@#1 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} \ // expected-remark@#1 {{Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will not attempt to rewrite the state machine use.}} { } } void foo(void) { #pragma omp target teams // #2 \ // expected-remark@#2 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__1_wrapper, kernel ID: __omp_offloading}} \ // expected-remark@#2 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__3_wrapper, kernel ID: __omp_offloading}} { #pragma omp parallel // #3 \ // expected-remark@#3 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} \ // expected-remark@#3 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__1_wrapper, kernel ID: __omp_offloading}} { } bar(); #pragma omp parallel // #4 \ // expected-remark@#4 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} \ // expected-remark@#4 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__3_wrapper, kernel ID: __omp_offloading}} { } } } void spmd(void) { // Verify we do not emit the remarks above for "SPMD" regions. #pragma omp target teams #pragma omp parallel { } #pragma omp target teams distribute parallel for for (int i = 0; i < 100; ++i) { } } // expected-remark@* {{OpenMP runtime call __kmpc_global_thread_num moved to}} // expected-remark@* {{OpenMP runtime call __kmpc_global_thread_num deduplicated}}
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/gem.h" #include "magick/gem-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o l o r s p a c e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageColorspaceType() returns the potential colorspace of image: % sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc. % % To ensure the image type matches its potential, use SetImageColorspaceType(): % % (void) SetImageColorspaceType(image,GetImageColorspaceType(image), % exception); % % The format of the GetImageColorspaceType method is: % % ColorspaceType GetImageColorspaceType(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ColorspaceType GetImageColorspaceType(const Image *image, ExceptionInfo *exception) { ColorspaceType colorspace; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colorspace=image->colorspace; type=IdentifyImageType(image,exception); if ((type == BilevelType) || (type == GrayscaleType) || (type == GrayscaleMatteType)) colorspace=GRAYColorspace; return(colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the RGBTransformImage method is: % % MagickBooleanType RGBTransformImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertRGBToCMY(const Quantum red,const Quantum green, const Quantum blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static void ConvertRGBToLab(const Quantum red,const Quantum green, const Quantum blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const Quantum red,const Quantum green, const Quantum blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLuv(const Quantum red,const Quantum green, const Quantum blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToxyY(const Quantum red,const Quantum green, const Quantum blue,double *low_x,double *low_y,double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); gamma=PerceptibleReciprocal(X+Y+Z); *low_x=gamma*X; *low_y=gamma*Y; *cap_Y=Y; } static void ConvertRGBToYPbPr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static void ConvertRGBToYDbDr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } MagickExport MagickBooleanType RGBTransformImage(Image *image, const ColorspaceType colorspace) { #define RGBTransformImageTag "RGBTransform/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); pixel.red=(MagickRealType) pixel.red; pixel.green=(MagickRealType) pixel.green; pixel.blue=(MagickRealType) pixel.blue; ConvertRGBToCMYK(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGray(q,ClampToQuantum(GetPixelIntensity(image,q))); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to HSI. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; red=ClampToQuantum((MagickRealType) GetPixelRed(q)); green=ClampToQuantum((MagickRealType) GetPixelGreen(q)); blue=ClampToQuantum((MagickRealType) GetPixelBlue(q)); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*X)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*Y)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*Z)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,logmap[ScaleQuantumToMap(red)]); SetPixelGreen(q,logmap[ScaleQuantumToMap(green)]); SetPixelBlue(q,logmap[ScaleQuantumToMap(blue)]); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) ResetMagickMemory(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601LumaColorspace: { /* Initialize Rec601 luma tables: G = 0.298839*R+0.586811*G+0.114350*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (0.298839*(double) i); y_map[i].y=(MagickRealType) (0.586811*(double) i); z_map[i].y=(MagickRealType) (0.114350*(double) i); x_map[i].z=(MagickRealType) (0.298839*(double) i); y_map[i].z=(MagickRealType) (0.586811*(double) i); z_map[i].z=(MagickRealType) (0.114350*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709LumaColorspace: { /* Initialize Rec709 luma tables: G = 0.212656*R+0.715158*G+0.072186*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (0.212656*(double) i); y_map[i].y=(MagickRealType) (0.715158*(double) i); z_map[i].y=(MagickRealType) (0.072186*(double) i); x_map[i].z=(MagickRealType) (0.212656*(double) i); y_map[i].z=(MagickRealType) (0.715158*(double) i); z_map[i].z=(MagickRealType) (0.072186*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.005382*i; y_map[i].x=0.010566*i; z_map[i].x=0.002052*i; x_map[i].y=(-0.003296)*i; y_map[i].y=(-0.006471)*i; z_map[i].y=0.009768*i; x_map[i].z=0.009410*i; y_map[i].z=(-0.007880)*i; z_map[i].z=(-0.001530)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.298839*(1.099*i-0.099); y_map[i].x=0.586811*(1.099*i-0.099); z_map[i].x=0.114350*(1.099*i-0.099); x_map[i].y=(-0.298839)*(1.099*i-0.099); y_map[i].y=(-0.586811)*(1.099*i-0.099); z_map[i].y=0.88600*(1.099*i-0.099); x_map[i].z=0.70100*(1.099*i-0.099); y_map[i].z=(-0.586811)*(1.099*i-0.099); z_map[i].z=(-0.114350)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *magick_restrict q; register size_t blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ (MagickRealType) primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ (MagickRealType) primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ (MagickRealType) primary_info.z; SetPixelRed(q,ScaleMapToQuantum(pixel.red)); SetPixelGreen(q,ScaleMapToQuantum(pixel.green)); SetPixelBlue(q,ScaleMapToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RGBTransformImage) #endif proceed=SetImageProgress(image,RGBTransformImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register size_t blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=ScaleMapToQuantum(pixel.red); image->colormap[i].green=ScaleMapToQuantum(pixel.green); image->colormap[i].blue=ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) ResetMagickMemory(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if (colorspace == LinearGRAYColorspace) image->gamma=1.0; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.0; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,&image->exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; CacheView *image_view; ImageType type; register const PixelPacket *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(MagickTrue); if ((IsGrayColorspace(image->colorspace) == MagickFalse) && (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale"); if (IsStringNotFalse(value) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsGrayPixel(p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse)) type=GrayscaleType; p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; if ((type == GrayscaleType) && (image->matte != MagickFalse)) image->type=GrayscaleMatteType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { const char *value; CacheView *image_view; ImageType type; register ssize_t x; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if ((IsGrayColorspace(image->colorspace) == MagickFalse) && (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale"); if (IsStringNotFalse(value) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsMonochromePixel(p) == MagickFalse) { type=UndefinedType; break; } p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(MagickTrue); (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (colorspace == LinearGRAYColorspace) return(GrayscaleImage(image,Rec709LuminancePixelIntensityMethod)); if (colorspace == GRAYColorspace) return(GrayscaleImage(image,Rec709LumaPixelIntensityMethod)); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace)); /* Convert the reference image from an alternate colorspace to sRGB. */ if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformRGBImage(image,image->colorspace)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformRGBImage(image,image->colorspace); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (RGBTransformImage(image,colorspace) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values to % be [0..QuantumRange]. % % The format of the TransformRGBImage method is: % % MagickBooleanType TransformRGBImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(1.0-cyan)); *green=ClampToQuantum(QuantumRange*(1.0-magenta)); *blue=ClampToQuantum(QuantumRange*(1.0-yellow)); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const MagickRealType value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,Quantum *red,Quantum *green,Quantum *blue) { double gamma, X, Y, Z; gamma=PerceptibleReciprocal(low_y); X=gamma*cap_Y*low_x; Y=cap_Y; Z=gamma*cap_Y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(0.99999999999914679361*Y- 1.2188941887145875e-06*(Pb-0.5)+1.4019995886561440468*(Pr-0.5))); *green=ClampToQuantum(QuantumRange*(0.99999975910502514331*Y- 0.34413567816504303521*(Pb-0.5)-0.71413649331646789076*(Pr-0.5))); *blue=ClampToQuantum(QuantumRange*(1.00000124040004623180*Y+ 1.77200006607230409200*(Pb-0.5)+2.1453384174593273e-06*(Pr-0.5))); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,Quantum *red,Quantum *green,Quantum *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5))); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+0.9562957197589482261*(I-0.5)+ 0.6210244164652610754*(Q-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.2721220993185104464*(I-0.5)- 0.6473805968256950427*(Q-0.5))); *blue=ClampToQuantum(QuantumRange*(Y-1.1069890167364901945*(I-0.5)+ 1.7046149983646481374*(Q-0.5))); } static void ConvertYUVToRGB(const double Y,const double U,const double V, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+ 1.1398279671717170825*(V-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.3946101641414141437*(U-0.5)- 0.5805003156565656797*(V-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+2.0319996843434342537*(U-0.5)- 4.813762626262513e-04*(V-0.5))); } MagickExport MagickBooleanType TransformRGBImage(Image *image, const ColorspaceType colorspace) { #define TransformRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000 }; CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); ConvertCMYKToRGB(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=(MagickRealType) GetPixelGray(q); if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) gray=EncodePixelGamma(gray); SetPixelRed(q,ClampToQuantum(gray)); SetPixelGreen(q,ClampToQuantum(gray)); SetPixelBlue(q,ClampToQuantum(gray)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; X=QuantumScale*GetPixelRed(q); Y=QuantumScale*GetPixelGreen(q); Z=QuantumScale*GetPixelBlue(q); switch (colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=ClampToQuantum(QuantumRange*X); green=ClampToQuantum(QuantumRange*Y); blue=ClampToQuantum(QuantumRange*Z); break; } } SetPixelRed(q,ClampToQuantum((MagickRealType) red)); SetPixelGreen(q,ClampToQuantum((MagickRealType) green)); SetPixelBlue(q,ClampToQuantum((MagickRealType) blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelRed(q))])); green=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelGreen(q))])); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelBlue(q))])); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(1.0*(double) i); y_map[i].x=(0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(1.0*(double) i); y_map[i].y=(0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(1.0*(double) i); y_map[i].z=(-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) (0.0000000); z_map[i].x=(MagickRealType) (1.8215000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) ((-0.4302726)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) ((-0.9271435)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) (0.0000000); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(q)); green=ScaleQuantumToMap(GetPixelGreen(q)); blue=ScaleQuantumToMap(GetPixelBlue(q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransformRGBImage) #endif proceed=SetImageProgress(image,TransformRGBImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; register size_t blue, green, red; red=ScaleQuantumToMap(image->colormap[i].red); green=ScaleQuantumToMap(image->colormap[i].green); blue=ScaleQuantumToMap(image->colormap[i].blue); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=ClampToQuantum(pixel.red); image->colormap[i].green=ClampToQuantum(pixel.green); image->colormap[i].blue=ClampToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(MagickTrue); }
DRB062-matrixvector2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Matrix-vector multiplication: inner level parallelization. */ #define N 1000 #include <omp.h> double a[1000][1000]; double v[1000]; double v_out[1000]; int init() { int i; int j; int k; #pragma omp parallel for private (i,j) for (i = 0; i <= 999; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= 999; j += 1) { a[i][j] = (i * j) + 0.01; } v_out[i] = (i * j) + 0.01; v[i] = (i * j) + 0.01; } return 0; } void mv() { int i; int j; #pragma omp parallel for private (i,j) for (i = 0; i <= 999; i += 1) { double sum = 0.0; #pragma omp parallel for private (j) reduction (+:sum) for (j = 0; j <= 999; j += 1) { sum += a[i][j] * v[j]; } v_out[i] = sum; } } int print() { int i; int j; int k; for (i = 0; i <= 999; i += 1) { for (j = 0; j <= 999; j += 1) { printf("%lf\n",a[i][j]); } printf("%lf\n",v_out[i]); printf("%lf\n",v[i]); } return 0; } int main() { init(); mv(); print(); return 0; }
scaling_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_SCALING_SOLVER_H_INCLUDED ) #define KRATOS_SCALING_SOLVER_H_INCLUDED // System includes #include <cmath> #include <complex> // External includes // Project includes #include "includes/define.h" #include "factories/linear_solver_factory.h" #include "linear_solvers/linear_solver.h" #include "utilities/openmp_utils.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ScalingSolver * @ingroup KratosCore * @brief This solvers rescales in order to improve the conditioning of the system * @details Rescales the matrix, and uses a given linear solver * @author Riccardo Rossi * @tparam TSparseSpaceType The sparse space definition * @tparam TDenseSpaceType The dense space definition * @tparam TReordererType The reorder considered */ template<class TSparseSpaceType, class TDenseSpaceType, class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> > class ScalingSolver : public LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType> { public: ///@name Type Definitions ///@{ /// Pointer definition of ScalingSolver KRATOS_CLASS_POINTER_DEFINITION(ScalingSolver); /// Definition of the base type typedef LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType> BaseType; /// The definition of the spaces (sparse matrix) typedef typename TSparseSpaceType::MatrixType SparseMatrixType; /// The definition of the spaces (vector) typedef typename TSparseSpaceType::VectorType VectorType; /// The definition of the spaces (dense matrix) typedef typename TDenseSpaceType::MatrixType DenseMatrixType; /// The definition of the linear solver factory type typedef LinearSolverFactory<TSparseSpaceType,TDenseSpaceType> LinearSolverFactoryType; /// The index type definition to be consistent typedef typename TSparseSpaceType::IndexType IndexType; ///@} ///@name Life Cycle ///@{ /// Default constructor. ScalingSolver() { } /** * @brief Constructor without parameters * @param pLinearSolver The linear solver to be scaled * @param SymmetricScaling If the scaling is symmetric (true by default) */ ScalingSolver( typename BaseType::Pointer pLinearSolver, const bool SymmetricScaling = true ) : BaseType (), mpLinearSolver(pLinearSolver), mSymmetricScaling(SymmetricScaling) { } /** * @brief Constructor with parameters * @param ThisParameters The configuration parameters of the linear solver */ ScalingSolver(Parameters ThisParameters) : BaseType () { KRATOS_TRY KRATOS_ERROR_IF_NOT(ThisParameters.Has("solver_type")) << "Solver_type must be specified to construct the ScalingSolver" << std::endl; mpLinearSolver = LinearSolverFactoryType().Create(ThisParameters); mSymmetricScaling = ThisParameters.Has("symmetric_scaling") ? ThisParameters["symmetric_scaling"].GetBool() : true; KRATOS_CATCH("") } /// Copy constructor. ScalingSolver(const ScalingSolver& Other) : BaseType(Other) {} /// Destructor. ~ScalingSolver() override {} ///@} ///@name Operators ///@{ /// Assignment operator. ScalingSolver& operator=(const ScalingSolver& Other) { BaseType::operator=(Other); return *this; } ///@} ///@name Operations ///@{ /** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example * when solving a mixed u-p problem, it is important to identify the row associated to v and p. * another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers * which require knowledge on the spatial position of the nodes associated to a given dof. * This function tells if the solver requires such data */ bool AdditionalPhysicalDataIsNeeded() override { return mpLinearSolver->AdditionalPhysicalDataIsNeeded(); } /** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example * when solving a mixed u-p problem, it is important to identify the row associated to v and p. * another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers * which require knowledge on the spatial position of the nodes associated to a given dof. * This function is the place to eventually provide such data */ void ProvideAdditionalData( SparseMatrixType& rA, VectorType& rX, VectorType& rB, typename ModelPart::DofsArrayType& rdof_set, ModelPart& r_model_part ) override { mpLinearSolver->ProvideAdditionalData(rA,rX,rB,rdof_set,r_model_part); } void InitializeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { mpLinearSolver->InitializeSolutionStep(rA,rX,rB); } /** This function is designed to be called at the end of the solve step. * for example this is the place to remove any data that we do not want to save for later @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ void FinalizeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { mpLinearSolver->FinalizeSolutionStep(rA,rX,rB); } /** This function is designed to clean up all internal data in the solver. * Clear is designed to leave the solver object as if newly created. * After a clear a new Initialize is needed */ void Clear() override { mpLinearSolver->Clear(); } /** Normal solve method. Solves the linear system Ax=b and puts the result on SystemVector& rX. rX is also th initial guess for iterative methods. @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ bool Solve(SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { if(this->IsNotConsistent(rA, rX, rB)) return false; VectorType scaling_vector(rX.size()); //obtain the scaling matrix GetScalingWeights(rA,scaling_vector); //scale system if(mSymmetricScaling == false) { KRATOS_THROW_ERROR(std::logic_error,"not yet implemented","") } else { #pragma omp parallel for for(int i=0; i< static_cast<int>(scaling_vector.size()); i++) scaling_vector[i] = sqrt(std::abs(scaling_vector[i])); SymmetricScaling(rA,scaling_vector); } //scale RHS #pragma omp parallel for for(int i=0; i< static_cast<int>(scaling_vector.size()); i++) rB[i] /= scaling_vector[i]; //solve the problem bool is_solved = mpLinearSolver->Solve(rA,rX,rB); //backscale the solution if(mSymmetricScaling == true) { #pragma omp parallel for for(int i=0; i< static_cast<int>(scaling_vector.size()); i++) rX[i] /= scaling_vector[i]; } return is_solved; } ///@} ///@name Access ///@{ IndexType GetIterationsNumber() override { return mpLinearSolver->GetIterationsNumber(); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "Composite Linear Solver. Uses internally the following linear solver " << mpLinearSolver->Info(); return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { BaseType::PrintData(rOStream); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer mpLinearSolver; bool mSymmetricScaling; ///@} ///@name Private Operators ///@{ static void SymmetricScaling( SparseMatrixType& A, const VectorType& aux) { //typedef unsigned int size_type; //typedef double value_type; //create partition OpenMPUtils::PartitionVector partition; int number_of_threads = ParallelUtilities::GetNumThreads(); OpenMPUtils::DivideInPartitions(A.size1(),number_of_threads, partition); //parallel loop #pragma omp parallel { int thread_id = OpenMPUtils::ThisThread(); int number_of_rows = partition[thread_id+1] - partition[thread_id]; typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator row_iter_begin = A.index1_data().begin()+partition[thread_id]; typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator index_2_begin = A.index2_data().begin()+*row_iter_begin; typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::iterator value_begin = A.value_data().begin()+*row_iter_begin; perform_matrix_scaling( number_of_rows, row_iter_begin, index_2_begin, value_begin, partition[thread_id], aux ); } } /** * calculates partial product resetting to Zero the output before */ static void perform_matrix_scaling( int number_of_rows, typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator row_begin, typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator index2_begin, typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::iterator value_begin, unsigned int output_begin_index, const VectorType& weights ) { int row_size; typename SparseMatrixType::index_array_type::const_iterator row_it = row_begin; int kkk = output_begin_index; for(int k = 0; k < number_of_rows; k++) { row_size= *(row_it+1)-*row_it; row_it++; const typename TDenseSpaceType::DataType row_weight = weights[kkk++]; for(int i = 0; i<row_size; i++) { const typename TDenseSpaceType::DataType col_weight = weights[*index2_begin]; typename TDenseSpaceType::DataType t = (*value_begin); t /= (row_weight*col_weight); (*value_begin) = t; //check if this is correcct!! value_begin++; index2_begin++; } } } static void GetScalingWeights( const SparseMatrixType& A, VectorType& aux) { //typedef unsigned int size_type; //typedef double value_type; //create partition OpenMPUtils::PartitionVector partition; int number_of_threads = ParallelUtilities::GetNumThreads(); OpenMPUtils::DivideInPartitions(A.size1(),number_of_threads, partition); //parallel loop #pragma omp parallel { int thread_id = OpenMPUtils::ThisThread(); int number_of_rows = partition[thread_id+1] - partition[thread_id]; typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator row_iter_begin = A.index1_data().begin()+partition[thread_id]; typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator index_2_begin = A.index2_data().begin()+*row_iter_begin; typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::const_iterator value_begin = A.value_data().begin()+*row_iter_begin; GS2weights( number_of_rows, row_iter_begin, index_2_begin, value_begin, partition[thread_id], aux ); } } /** * calculates partial product resetting to Zero the output before */ static void GS2weights( int number_of_rows, typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator row_begin, typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator index2_begin, typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::const_iterator value_begin, unsigned int output_begin_index, VectorType& weights ) { int row_size; typename SparseMatrixType::index_array_type::const_iterator row_it = row_begin; int kkk = output_begin_index; for(int k = 0; k < number_of_rows; k++) { row_size= *(row_it+1)-*row_it; row_it++; double t = 0.0; for(int i = 0; i<row_size; i++) { double tmp = std::abs(*value_begin); t += tmp*tmp; value_begin++; } t = sqrt(t); weights[kkk++] = t; } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class ScalingSolver ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> inline std::istream& operator >> (std::istream& IStream, ScalingSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>& rThis) { return IStream; } /// output stream function template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> inline std::ostream& operator << (std::ostream& OStream, const ScalingSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>& rThis) { rThis.PrintInfo(OStream); OStream << std::endl; rThis.PrintData(OStream); return OStream; } ///@} } // namespace Kratos. #endif // KRATOS_SCALING_SOLVER_H_INCLUDED defined
SE_fgg_expand_all_mex.c
#include "mex.h" #include "SE_fgg.h" void SE_FGG_MEX_params(SE_FGG_params*, const mxArray*, int); #define X prhs[0] #define OPT prhs[1] #define ZX plhs[0] // Output #define ZY plhs[1] // Output #define ZZ plhs[2] // Output #define IDX plhs[3] // Output #ifndef VERBOSE #define VERBOSE 0 #endif void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { const int N = mxGetM(X); double* restrict x = mxGetPr(X); // pack parameters SE_FGG_params params; SE_FGG_MEX_params(&params, OPT, N); // allocate output array ZX = mxCreateDoubleMatrix(params.P,N,mxREAL); ZY = mxCreateDoubleMatrix(params.P,N,mxREAL); ZZ = mxCreateDoubleMatrix(params.P,N,mxREAL); // output const size_t dims[2] = {N,1}; IDX = mxCreateNumericArray(2,dims,mxINT32_CLASS,mxREAL); // wrap in SE_work struct SE_FGG_work work; work.zx = mxGetPr(ZX); work.zy = mxGetPr(ZY); work.zz = mxGetPr(ZZ); work.idx = (int*)mxGetData(IDX); // coordinates and charges const SE_state st = {.x = x, .q = NULL}; if(VERBOSE) mexPrintf("[SE%s FG(E)] N=%d, P=%d\n",PER_STR,N,params.P); #ifdef _OPENMP #pragma omp parallel default(shared) #endif { // now do the work (COMPLIED FOR 2P OR 3P) SE_FGG_expand_all(&work, &st, &params); } // done }
MINDSSCbox.h
/// @brief Applies a box filter to image data /// @note Box filters are often used to efficiently approximate Gaussian filters /// /// @param [out] input is the image to filter /// @param [out] temp1 is a temporary buffer the same size as the input, provided by the caller to provide the function with memory necessary to perform the filter calculations /// @param [out] temp2 is a temporary buffer the same size as the input, provided by the caller to provide the function with memory necessary to perform the filter calculations /// @param [in] hw is /// @param [in] m is dimension #1 of the input image /// @param [in] n is dimension #2 of the input image /// @param [in] o is dimension #3 of the input image /// void boxfilter(float* input,float* temp1,float* temp2,int hw,int m,int n,int o){ // calculate length of the 'input' and 'temp1' vectors int sz=m*n*o; for(int i=0;i<sz;i++){ temp1[i]=input[i]; } for(int k=0;k<o;k++){ for(int j=0;j<n;j++){ for(int i=1;i<m;i++){ temp1[i+j*m+k*m*n]+=temp1[(i-1)+j*m+k*m*n]; } } } for(int k=0;k<o;k++){ for(int j=0;j<n;j++){ for(int i=0;i<(hw+1);i++){ temp2[i+j*m+k*m*n]=temp1[(i+hw)+j*m+k*m*n]; } for(int i=(hw+1);i<(m-hw);i++){ temp2[i+j*m+k*m*n]=temp1[(i+hw)+j*m+k*m*n]-temp1[(i-hw-1)+j*m+k*m*n]; } for(int i=(m-hw);i<m;i++){ temp2[i+j*m+k*m*n]=temp1[(m-1)+j*m+k*m*n]-temp1[(i-hw-1)+j*m+k*m*n]; } } } for(int k=0;k<o;k++){ for(int j=1;j<n;j++){ for(int i=0;i<m;i++){ temp2[i+j*m+k*m*n]+=temp2[i+(j-1)*m+k*m*n]; } } } for(int k=0;k<o;k++){ for(int i=0;i<m;i++){ for(int j=0;j<(hw+1);j++){ temp1[i+j*m+k*m*n]=temp2[i+(j+hw)*m+k*m*n]; } for(int j=(hw+1);j<(n-hw);j++){ temp1[i+j*m+k*m*n]=temp2[i+(j+hw)*m+k*m*n]-temp2[i+(j-hw-1)*m+k*m*n]; } for(int j=(n-hw);j<n;j++){ temp1[i+j*m+k*m*n]=temp2[i+(n-1)*m+k*m*n]-temp2[i+(j-hw-1)*m+k*m*n]; } } } for(int k=1;k<o;k++){ for(int j=0;j<n;j++){ for(int i=0;i<m;i++){ temp1[i+j*m+k*m*n]+=temp1[i+j*m+(k-1)*m*n]; } } } for(int j=0;j<n;j++){ for(int i=0;i<m;i++){ for(int k=0;k<(hw+1);k++){ input[i+j*m+k*m*n]=temp1[i+j*m+(k+hw)*m*n]; } for(int k=(hw+1);k<(o-hw);k++){ input[i+j*m+k*m*n]=temp1[i+j*m+(k+hw)*m*n]-temp1[i+j*m+(k-hw-1)*m*n]; } for(int k=(o-hw);k<o;k++){ input[i+j*m+k*m*n]=temp1[i+j*m+(o-1)*m*n]-temp1[i+j*m+(k-hw-1)*m*n]; } } } } /// @brief Shifts an image by the specified vectorial displacement /// /// @param [in] input is the image to shift /// @param [out] output is where the shifted image will reside /// @param [out] dx is the amount to shift the image along the horizontal axis /// @param [in] dy is the amount to shift the image along the vertical axis /// @param [in] dz is the amount to shift the image along the depth axis /// @param [in] m is dimension #1 of the input image /// @param [in] n is dimension #2 of the input image /// @param [in] o is dimension #3 of the input image /// void imshift(float* input,float* output,int dx,int dy,int dz,int m,int n,int o){ for(int k=0;k<o;k++){ for(int j=0;j<n;j++){ for(int i=0;i<m;i++){ if(i+dy>=0&&i+dy<m&&j+dx>=0&&j+dx<n&&k+dz>=0&&k+dz<o) output[i+j*m+k*m*n]=input[i+dy+(j+dx)*m+(k+dz)*m*n]; else output[i+j*m+k*m*n]=input[i+j*m+k*m*n]; } } } } /*void *distances(void *threadarg) { struct mind_data *my_data; my_data = (struct mind_data *) threadarg; float* im1=my_data->im1; float* d1=my_data->d1; int qs=my_data->qs; int ind_d1=my_data->ind_d1; int m=image_m; int n=image_n; int o=image_o;*/ /// @brief /// /// @param [in] im1 is the image /// @param [out] d1 is /// @param [in] m is dimension #1 of the input image /// @param [in] n is dimension #2 of the input image /// @param [in] o is dimension #3 of the input image /// @param [in] qs is the quantisation /// @param [in] l /// void distances(float* im1,float* d1,int m,int n,int o,int qs,int l){ // calculates the total number of elements in the 3D im1 matrix // creates three temporary buffers the same size as the 3D im1 matrix int sz1=m*n*o; float* w1=new float[sz1]; int len1=6; float* temp1=new float[sz1]; float* temp2=new float[sz1]; int dx[6]={+qs,+qs,-qs,+0,+qs,+0}; int dy[6]={+qs,-qs,+0,-qs,+0,+qs}; int dz[6]={0,+0,+qs,+qs,+qs,+qs}; imshift(im1,w1,dx[l],dy[l],dz[l],m,n,o); for(int i=0;i<sz1;i++){ w1[i]=(w1[i]-im1[i])*(w1[i]-im1[i]); } boxfilter(w1,temp1,temp2,qs,m,n,o); for(int i=0;i<sz1;i++){ d1[i+l*sz1]=w1[i]; } delete temp1; delete temp2; delete w1; } /// @brief /// /// @param [in] qs stands for 'quantisation' /// //__builtin_popcountll(left[i]^right[i]); absolute hamming distances void descriptor(uint64_t* mindq,float* im1,int m,int n,int o,int qs){ timeval time1,time2; //MIND with self-similarity context int dx[6]={+qs,+qs,-qs,+0,+qs,+0}; int dy[6]={+qs,-qs,+0,-qs,+0,+qs}; int dz[6]={0,+0,+qs,+qs,+qs,+qs}; int sx[12]={-qs,+0,-qs,+0,+0,+qs,+0,+0,+0,-qs,+0,+0}; int sy[12]={+0,-qs,+0,+qs,+0,+0,+0,+qs,+0,+0,+0,-qs}; int sz[12]={+0,+0,+0,+0,-qs,+0,-qs,+0,-qs,+0,-qs,+0}; int index[12]={0,0,1,1,2,2,3,3,4,4,5,5}; float sigma=0.75;//1.0;//0.75;//1.5; int rho=ceil(sigma*1.5)*2+1; int len1=6; const int len2=12; image_d=12; int d=12; int sz1=m*n*o; pthread_t thread1, thread2, thread3; //============== DISTANCES USING BOXFILTER =================== float* d1=new float[sz1*len1]; gettimeofday(&time1, NULL); #pragma omp parallel for for(int l=0;l<len1;l++){ distances(im1,d1,m,n,o,qs,l); } gettimeofday(&time2, NULL); float timeMIND1=time2.tv_sec+time2.tv_usec/1e6-(time1.tv_sec+time1.tv_usec/1e6); gettimeofday(&time1, NULL); //quantisation table const int val=6; const unsigned long long power=32; #pragma omp parallel for for(int k=0;k<o;k++){ unsigned int tablei[6]={0,1,3,7,15,31}; float compare[val-1]; for(int i=0;i<val-1;i++){ compare[i]=-log((i+1.5f)/val); } float mind1[12]; for(int j=0;j<n;j++){ for(int i=0;i<m;i++){ for(int l=0;l<len2;l++){ if(i+sy[l]>=0&&i+sy[l]<m&&j+sx[l]>=0&&j+sx[l]<n&&k+sz[l]>=0&&k+sz[l]<o){ mind1[l]=d1[i+sy[l]+(j+sx[l])*m+(k+sz[l])*m*n+index[l]*sz1]; } else{ mind1[l]=d1[i+j*m+k*m*n+index[l]*sz1]; } } float minval=*min_element(mind1,mind1+len2); float sumnoise=0.0f; for(int l=0;l<len2;l++){ mind1[l]-=minval; sumnoise+=mind1[l]; } float noise1=max(sumnoise/(float)len2,1e-6f); for(int l=0;l<len2;l++){ mind1[l]/=noise1; } unsigned long long accum=0; unsigned long long tabled1=1; for(int l=0;l<len2;l++){ //mind1[l]=exp(-mind1[l]); int mind1val=0; for(int c=0;c<val-1;c++){ mind1val+=compare[c]>mind1[l]?1:0; } //int mind1val=min(max((int)(mind1[l]*val-0.5f),0),val-1); accum+=tablei[mind1val]*tabled1; tabled1*=power; } mindq[i+j*m+k*m*n]=accum; } } } gettimeofday(&time2, NULL); float timeMIND2=time2.tv_sec+time2.tv_usec/1e6-(time1.tv_sec+time1.tv_usec/1e6); delete d1; }
GB_binop__lxor_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lxor_fp32 // A.*B function (eWiseMult): GB_AemultB__lxor_fp32 // A*D function (colscale): GB_AxD__lxor_fp32 // D*A function (rowscale): GB_DxB__lxor_fp32 // C+=B function (dense accum): GB_Cdense_accumB__lxor_fp32 // C+=b function (dense accum): GB_Cdense_accumb__lxor_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lxor_fp32 // C=scalar+B GB_bind1st__lxor_fp32 // C=scalar+B' GB_bind1st_tran__lxor_fp32 // C=A+scalar GB_bind2nd__lxor_fp32 // C=A'+scalar GB_bind2nd_tran__lxor_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) != (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_FP32 || GxB_NO_LXOR_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lxor_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lxor_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lxor_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lxor_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lxor_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lxor_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lxor_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lxor_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lxor_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lxor_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lxor_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main_omp.c
#include <pthread.h> #include <omp.h> #include "matriz-operacoes-omp.h" int main(int argc, char *argv[]) { // %%%%%%%%%%%%%%%%%%%%%%%% BEGIN %%%%%%%%%%%%%%%%%%%%%%%% // DECLARAÇÃO de VARIÁVEIS // %%%%%%%%%%%%%%%%%%%%%%%% BEGIN %%%%%%%%%%%%%%%%%%%%%%%% // DECLARAÇÃO DE VARIÁVEIS mymatriz mat_a, mat_b; mymatriz *mmult_MATRIZ_SeqC; mymatriz *mmult_MATRIZ_SeqBlC; mymatriz *mmult_MATRIZ_OMPC; mymatriz *mmult_MATRIZ_OMPBlC; //variaveis para manipulacao de arquivos no disco char filename[100]; FILE *fmat; int nr_line; int *vet_line = NULL; int N, M, La, Lb; //variaveis para controle de blocos matriz_bloco_t **Vsubmat_a = NULL; matriz_bloco_t **Vsubmat_b = NULL; matriz_bloco_t **Vsubmat_c = NULL; //For para executar calculo da média int n_threads = 4; int nro_submatrizes = n_threads; int count_for = 10; //numero de repeticoes para média de runtime //variaveis para controle de tempo (runtime) double start_time, end_time; double tempo_MATRIZ_SeqC = 0; double tempo_MATRIZ_SeqBlC = 0; double tempo_MATRIZ_OMPC = 0; double tempo_MATRIZ_OMPBlC = 0; double speedup_seqC; double speedup_BlC; // %%%%%%%%%%%%%%%%%%%%%%%% END %%%%%%%%%%%%%%%%%%%%%%%% if (argc < 3) { printf("ERRO: Numero de parametros %s <matriz_a> <matriz_b> <threads>\n", argv[0]); exit(1); } if (argv[3] != NULL){ nro_submatrizes = atoi(argv[3]); n_threads = atoi(argv[3]); } // %%%%%%%%%%%%%%%%%%%%%%%% BEGIN %%%%%%%%%%%%%%%%%%%%%%%% //Leitura da Matriz A (arquivo) fmat = fopen(argv[1], "r"); if (fmat == NULL) { printf("Error: Na abertura dos arquivos."); exit(1); } extrai_parametros_matriz(fmat, &N, &La, &vet_line, &nr_line); mat_a.matriz = NULL; mat_a.lin = N; mat_a.col = La; if (malocar(&mat_a)) { printf("ERROR: Out of memory\n"); } filein_matriz(mat_a.matriz, N, La, fmat, vet_line, nr_line); free(vet_line); fclose(fmat); // %%%%%%%%%%%%%%%%%%%%%%%% END %%%%%%%%%%%%%%%%%%%%%%%% // %%%%%%%%%%%%%%%%%%%%%%%% BEGIN %%%%%%%%%%%%%%%%%%%%%%%% // Leitura da Matriz B (arquivo) fmat = fopen(argv[2], "r"); if (fmat == NULL) { printf("Error: Na abertura dos arquivos."); exit(1); } extrai_parametros_matriz(fmat, &Lb, &M, &vet_line, &nr_line); mat_b.matriz = NULL; mat_b.lin = Lb; mat_b.col = M; if (malocar(&mat_b)) { printf("ERROR: Out of memory\n"); } filein_matriz(mat_b.matriz, Lb, M, fmat, vet_line, nr_line); free(vet_line); fclose(fmat); // %%%%%%%%%%%%%%%%%%%%%%%% BEGIN %%%%%%%%%%%%%%%%%%%%%%%% // Multiplicação Sequencial mmult_MATRIZ_SeqC = (mymatriz *)malloc(sizeof(mymatriz)); for (int count = 0; count < count_for; count++) { start_time = wtime(); printf("\rMultiplicação Sequencial, teste %d... ", count+1); fflush(stdout); mmult_MATRIZ_SeqC = mmultiplicar(&mat_a, &mat_b, 3); //1=mais rápido (2.04), 5=mais lento (5.94) end_time = wtime(); tempo_MATRIZ_SeqC += end_time - start_time; } sprintf(filename, "MATRIZ_SeqC.result"); fmat = fopen(filename, "w"); fileout_matriz(mmult_MATRIZ_SeqC, fmat); fclose(fmat); // %%%%%%%%%%%%%%%%%%%%%%%% END %%%%%%%%%%%%%%%%%%%%%%%% // %%%%%%%%%%%%%%%%%%%%%%%% BEGIN %%%%%%%%%%%%%%%%%%%%%%%% // Multiplicação Sequencial em Bloco printf("\n"); mmult_MATRIZ_SeqBlC = (mymatriz *)malloc(sizeof(mymatriz)); for (int count = 0; count < count_for; count++) { start_time = wtime(); printf("\rMultiplicação Sequencial em Bloco, teste %d... ", count+1); fflush(stdout); Vsubmat_a = particionar_matriz(mat_a.matriz, N, La, 1, nro_submatrizes); Vsubmat_a = particionar_matriz(mat_a.matriz, N, La, 1, nro_submatrizes); Vsubmat_b = particionar_matriz(mat_b.matriz, Lb, M, 0, nro_submatrizes); Vsubmat_c = csubmatrizv2(N, M, nro_submatrizes); //multiplicacao de blocos for (int i = 0; i < nro_submatrizes; i++){ multiplicar_submatriz (Vsubmat_a[i], Vsubmat_b[i], Vsubmat_c[i]); } //soma os blocos separados mmult_MATRIZ_SeqBlC = msomar(Vsubmat_c[0]->matriz,Vsubmat_c[1]->matriz, 1); mmult_MATRIZ_SeqBlC = msomar(Vsubmat_c[0]->matriz,Vsubmat_c[1]->matriz, 1); for (int i = 2; i < nro_submatrizes; i++){ mmult_MATRIZ_SeqBlC = msomar(mmult_MATRIZ_SeqBlC,Vsubmat_c[i]->matriz, 1); } end_time = wtime(); tempo_MATRIZ_SeqBlC += end_time - start_time; } sprintf(filename, "MATRIZ_SeqBlC.result"); fmat = fopen(filename, "w"); fileout_matriz(mmult_MATRIZ_SeqBlC, fmat); fclose(fmat); // %%%%%%%%%%%%%%%%%%%%%%%% END %%%%%%%%%%%%%%%%%%%%%%%% // %%%%%%%%%%%%%%%%%%%%%%%% BEGIN %%%%%%%%%%%%%%%%%%%%%%%% // Multiplicação MultiThread printf("\n"); mmult_MATRIZ_OMPC = (mymatriz *)malloc(sizeof(mymatriz)); mmult_MATRIZ_OMPC = malloc(sizeof(mymatriz)); mmult_MATRIZ_OMPC->matriz = NULL; mmult_MATRIZ_OMPC->lin = mat_a.lin; mmult_MATRIZ_OMPC->col = mat_b.col; //realiza a alocação de memória para matriz resultado if (malocar(mmult_MATRIZ_OMPC)) { printf("ERROR: Out of memory\n"); exit(1); }else{ mzerar(mmult_MATRIZ_OMPC); } for (int count = 0; count < count_for; count++) { printf("\rMultiplicação OMP, teste %d... ", count+1); fflush(stdout); mzerar(mmult_MATRIZ_OMPC); start_time = wtime(); int tid; int nthreads; #pragma omp parallel num_threads(n_threads) { tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); multiplicarOMP(&mat_a, &mat_b, mmult_MATRIZ_OMPC, tid, nthreads); } end_time = wtime(); tempo_MATRIZ_OMPC += end_time - start_time; } sprintf(filename, "MATRIZ_OMPC.result"); fmat = fopen(filename, "w"); fileout_matriz(mmult_MATRIZ_OMPC, fmat); fclose(fmat); // %%%%%%%%%%%%%%%%%%%%%%%% END %%%%%%%%%%%%%%%%%%%%%%%% // %%%%%%%%%%%%%%%%%%%%%%%% BEGIN %%%%%%%%%%%%%%%%%%%%%%%% // Multiplicação MultiThreads em Bloco printf("\n"); mmult_MATRIZ_OMPBlC = (mymatriz *)malloc(sizeof(mymatriz)); for (int count = 0; count < count_for; count++) { printf("\rMultiplicação multithread em bloco, teste %d... ", count+1); fflush(stdout); Vsubmat_a = particionar_matriz(mat_a.matriz, N, La, 1, nro_submatrizes); Vsubmat_b = particionar_matriz(mat_b.matriz, Lb, M, 0, nro_submatrizes); Vsubmat_c = csubmatrizv2(N, M, nro_submatrizes); start_time = wtime(); int tid; //int nthreads; #pragma omp parallel num_threads(n_threads) { tid = omp_get_thread_num(); //nthreads = omp_get_num_threads(); multiplicarOMPblocos(Vsubmat_a[tid], Vsubmat_b[tid], Vsubmat_c[tid]); } end_time = wtime(); mmult_MATRIZ_OMPBlC = msomar(Vsubmat_c[0]->matriz,Vsubmat_c[1]->matriz, 1); for (int i = 2; i < n_threads; i++){ mmult_MATRIZ_OMPBlC = msomar(mmult_MATRIZ_OMPBlC,Vsubmat_c[i]->matriz, 1); } tempo_MATRIZ_OMPBlC += end_time - start_time; } sprintf(filename, "MATRIZ_OMPBlC.result"); fmat = fopen(filename, "w"); fileout_matriz(mmult_MATRIZ_OMPBlC, fmat); fclose(fmat); // %%%%%%%%%%%%%%%%%%%%%%%% END %%%%%%%%%%%%%%%%%%%%%%%% // %%%%%%%%%%%%%%%%%%%%%%%% BEGIN %%%%%%%%%%%%%%%%%%%%%%%% // Impressao dos resultados de tempo printf("\n\n\tCOMPARAR MATRIZ_SeqC c/ MATRIZ_SeqBlC\n\t"); mcomparar(mmult_MATRIZ_SeqC, mmult_MATRIZ_SeqBlC); printf("\n\tCOMPARAR MATRIZ_SeqC c/ MATRIZ_OMPC\n\t"); mcomparar(mmult_MATRIZ_SeqC, mmult_MATRIZ_OMPC); printf("\n\tCOMPARAR MATRIZ_SeqC c/ MATRIZ_OMPBlC\n\t"); mcomparar(mmult_MATRIZ_SeqC, mmult_MATRIZ_OMPBlC); printf("\n\tTempo Médio MATRIZ_SeqC:\t%.6f sec \n", tempo_MATRIZ_SeqC / count_for); printf("\tTempo Médio MATRIZ_SeqBlC:\t%.6f sec\n", tempo_MATRIZ_SeqBlC / count_for ); printf("\tTempo Médio MATRIZ_OMPC:\t%.6f sec \n", tempo_MATRIZ_OMPC / count_for); printf("\tTempo Médio MATRIZ_OMPBlC:\t%.6f sec \n", tempo_MATRIZ_OMPBlC / count_for); speedup_seqC = (tempo_MATRIZ_SeqC / count_for) / (tempo_MATRIZ_OMPC / count_for); speedup_BlC = (tempo_MATRIZ_SeqBlC / count_for) / (tempo_MATRIZ_OMPBlC / count_for); printf("\n\tSPEEDUP (MATRIZ_C): \t%.3f (%.2f %c)", speedup_seqC, speedup_seqC*100, 37 ); printf("\n\tSPEEDUP (MATRIZ_BLC): \t%.3f (%.2f %c)\n\n", speedup_BlC, speedup_BlC*100, 37 ); // %%%%%%%%%%%%%%%%%%%%%%%% END %%%%%%%%%%%%%%%%%%%%%%%% // %%%%%%%%%%%%%%%%%%%%%%%% BEGIN %%%%%%%%%%%%%%%%%%%%%%%% //Liberação de memória mliberar(mmult_MATRIZ_SeqC); mliberar(mmult_MATRIZ_SeqBlC); mliberar(mmult_MATRIZ_OMPC); mliberar(mmult_MATRIZ_OMPBlC); free(mmult_MATRIZ_SeqC); free(mmult_MATRIZ_SeqBlC); free(mmult_MATRIZ_OMPC); free(mmult_MATRIZ_OMPBlC); mliberar(&mat_a); mliberar(&mat_b); // %%%%%%%%%%%%%%%%%%%%%%%% END %%%%%%%%%%%%%%%%%%%%%%%% return 0; }
GB_unop__signum_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__signum_fp32_fp32 // op(A') function: GB_unop_tran__signum_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = GB_signumf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_signumf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = GB_signumf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIGNUM || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__signum_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = GB_signumf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = GB_signumf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__signum_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tstile.h
void tstile() { int c0,c1,c2,c3,c5,c6,c7,c9,c11,c10,c4,c12; if(1==1) for( c0 = 0; c0 <= floord(N - 2, 8); c0 += 1) #pragma omp parallel for schedule(dynamic, 1) for( c1 = (c0 + 1) / 2; c1 <= min(c0, (N - 1) / 16); c1 += 1) for( c3 = 16 * c0 - 16 * c1 + 1; c3 <= min(min(N - 1, 16 * c1 + 15), 16 * c0 - 16 * c1 + 16); c3 += 1) { for( c4 = 0; c4 <= c0 - c1; c4 += 1) for( c6 = max(-N + 16 * c1 + 1, -N + c3 + 1); c6 <= min(0, -N + 16 * c1 + 16); c6 += 1) { for( c10 = 16 * c4; c10 <= min(c3 - 1, 16 * c4 + 15); c10 += 1) S[(-c6)][(c3-c6)] = MAX(S[(-c6)][c10+(-c6)] + S[c10+(-c6)+1][(c3-c6)], S[(-c6)][(c3-c6)]); if (c1 + c4 == c0 && 16 * c0 + c6 + 15 >= 16 * c1 + c3) S[(-c6)][(c3-c6)] = MAX(S[(-c6)][(c3-c6)], S[(-c6)+1][(c3-c6)-1] + can_pair(RNA, (-c6), (c3-c6))); } for( c4 = max(c0 - c1 + 1, -c1 + (N + c3) / 16 - 1); c4 <= min((N - 1) / 16, -c1 + (N + c3 - 1) / 16); c4 += 1) for( c6 = max(max(-N + 16 * c1 + 1, -N + c3 + 1), c3 - 16 * c4 - 15); c6 <= min(-N + 16 * c1 + 16, c3 - 16 * c4); c6 += 1) S[(-c6)][(c3-c6)] = MAX(S[(-c6)][(c3-c6)], S[(-c6)+1][(c3-c6)-1] + can_pair(RNA, (-c6), (c3-c6))); } }
declare-pr90861.c
/* Verify that OpenACC 'declare' cleans up for VLAs. */ /* { dg-additional-options "-fdump-tree-gimple" } */ void f1 (void) { #define N_f1 1000 int A_f1[N_f1]; #pragma acc declare copy(A_f1) /* { dg-final { scan-tree-dump-times {#pragma omp target oacc_declare map\(to:A_f1} 1 gimple } } { dg-final { scan-tree-dump-times {#pragma omp target oacc_declare map\(from:A_f1} 1 gimple } } */ } void f2 (void) { int N_f2 = 1000; int A_f2[N_f2]; #pragma acc declare copy(A_f2) /* { dg-final { scan-tree-dump-times {#pragma omp target oacc_declare map\(to:\(\*A_f2} 1 gimple } } { dg-final { scan-tree-dump-times {#pragma omp target oacc_declare map\(from:\(\*A_f2} 1 gimple } } */ }
dijkstra_open_mp.c
/* * A test case provided by Allan Porterfield from http://www.renci.org/ * Liao, 8/27/2010 * */ # include <stdlib.h> # include <stdio.h> # include <time.h> # include <omp.h> # define NV 6 int main ( int argc, char **argv ); int *dijkstra_distance ( int ohd[NV][NV] ); void find_nearest ( int s, int e, int mind[NV], int connected[NV], int *d, int *v ); void init ( int ohd[NV][NV] ); void timestamp ( void ); void update_mind ( int s, int e, int mv, int connected[NV], int ohd[NV][NV], int mind[NV] ); /******************************************************************************/ int main ( int argc, char **argv ) /******************************************************************************/ /* Purpose: MAIN runs an example of Dijkstra's minimum distance algorithm. Discussion: Given the distance matrix that defines a graph, we seek a list of the minimum distances between node 0 and all other nodes. This program sets up a small example problem and solves it. The correct minimum distances are: 0 35 15 45 49 41 Licensing: This code is distributed under the GNU LGPL license. Modified: 01 July 2010 Author: Original C version by Norm Matloff, CS Dept, UC Davis. This C version by John Burkardt. */ { int i; int i4_huge = 2147483647; int j; int *mind; int ohd[NV][NV]; timestamp ( ); fprintf ( stdout, "\n" ); fprintf ( stdout, "DIJKSTRA_OPEN_MP\n" ); fprintf ( stdout, " C version\n" ); fprintf ( stdout, " Use Dijkstra's algorithm to determine the minimum\n" ); fprintf ( stdout, " distance from node 0 to each node in a graph,\n" ); fprintf ( stdout, " given the distances between each pair of nodes.\n" ); fprintf ( stdout, "\n" ); fprintf ( stdout, " Although a very small example is considered, we\n" ); fprintf ( stdout, " demonstrate the use of OpenMP directives for\n" ); fprintf ( stdout, " parallel execution.\n" ); /* Initialize the problem data. */ init ( ohd ); /* Print the distance matrix. */ fprintf ( stdout, "\n" ); fprintf ( stdout, " Distance matrix:\n" ); fprintf ( stdout, "\n" ); for ( i = 0; i < NV; i++ ) { for ( j = 0; j < NV; j++ ) { if ( ohd[i][j] == i4_huge ) { fprintf ( stdout, " Inf" ); } else { fprintf ( stdout, " %3d", ohd[i][j] ); } } fprintf ( stdout, "\n" ); } /* Carry out the algorithm. */ mind = dijkstra_distance ( ohd ); /* Print the results. */ fprintf ( stdout, "\n" ); fprintf ( stdout, " Minimum distances from node 0:\n"); fprintf ( stdout, "\n" ); for ( i = 0; i < NV; i++ ) { fprintf ( stdout, " %2d %2d\n", i, mind[i] ); } /* Terminate. */ free ( mind ); fprintf ( stdout, "\n" ); fprintf ( stdout, "DIJKSTRA_OPEN_MP\n" ); fprintf ( stdout, " Normal end of execution.\n" ); fprintf ( stdout, "\n" ); timestamp ( ); return 0; } /******************************************************************************/ int *dijkstra_distance ( int ohd[NV][NV] ) /******************************************************************************/ /* Purpose: DIJKSTRA_DISTANCE uses Dijkstra's minimum distance algorithm. Discussion: We essentially build a tree. We start with only node 0 connected to the tree, and this is indicated by setting CONNECTED[0] = 1. We initialize MIND[I] to the one step distance from node 0 to node I. Now we search among the unconnected nodes for the node MV whose minimum distance is smallest, and connect it to the tree. For each remaining unconnected node I, we check to see whether the distance from 0 to MV to I is less than that recorded in MIND[I], and if so, we can reduce the distance. After NV-1 steps, we have connected all the nodes to 0, and computed the correct minimum distances. Licensing: This code is distributed under the GNU LGPL license. Modified: 02 July 2010 Author: Original C version by Norm Matloff, CS Dept, UC Davis. This C version by John Burkardt. Parameters: Input, int OHD[NV][NV], the distance of the direct link between nodes I and J. Output, int DIJKSTRA_DISTANCE[NV], the minimum distance from node 0 to each node. */ { int *connected; int i; int i4_huge = 2147483647; int md; int *mind; int mv; int my_first; int my_id; int my_last; int my_md; int my_mv; int my_step; int nth; /* Start out with only node 0 connected to the tree. */ connected = ( int * ) malloc ( NV * sizeof ( int ) ); connected[0] = 1; for ( i = 1; i < NV; i++ ) { connected[i] = 0; } /* Initial estimate of minimum distance is the 1-step distance. */ mind = ( int * ) malloc ( NV * sizeof ( int ) ); for ( i = 0; i < NV; i++ ) { mind[i] = ohd[0][i]; } /* Begin the parallel region. */ # pragma omp parallel private ( my_first, my_id, my_last, my_md, my_mv, my_step ) \ shared ( connected, md, mind, mv, nth, ohd ) { my_id = omp_get_thread_num ( ); nth = omp_get_num_threads ( ); my_first = ( my_id * NV ) / nth; my_last = ( ( my_id + 1 ) * NV ) / nth - 1; /* The SINGLE directive means that the block is to be executed by only one thread, and that thread will be whichever one gets here first. */ # pragma omp single { printf ( "\n" ); printf ( " P%d: Parallel region begins with %d threads\n", my_id, nth ); printf ( "\n" ); } fprintf ( stdout, " P%d: First=%d Last=%d\n", my_id, my_first, my_last ); for ( my_step = 1; my_step < NV; my_step++ ) { /* Before we compare the results of each thread, set the shared variable MD to a big value. Only one thread needs to do this. */ # pragma omp single { md = i4_huge; mv = -1; } /* Each thread finds the nearest unconnected node in its part of the graph. Some threads might have no unconnected nodes left. */ find_nearest ( my_first, my_last, mind, connected, &my_md, &my_mv ); /* In order to determine the minimum of all the MY_MD's, we must insist that only one thread at a time execute this block! */ # pragma omp critical { if ( my_md < md ) { md = my_md; mv = my_mv; } } /* This barrier means that ALL threads have executed the critical block, and therefore MD and MV have the correct value. Only then can we proceed. */ # pragma omp barrier /* If MV is -1, then NO thread found an unconnected node, so we're done early. OpenMP does not like to BREAK out of a parallel region, so we'll just have to let the iteration run to the end, while we avoid doing any more updates. Otherwise, we connect the nearest node. */ # pragma omp single { if ( mv != - 1 ) { connected[mv] = 1; printf ( " P%d: Connecting node %d.\n", my_id, mv ); } } /* Again, we don't want any thread to proceed until the value of CONNECTED is updated. */ # pragma omp barrier /* Now each thread should update its portion of the MIND vector, by checking to see whether the trip from 0 to MV plus the step from MV to a node is closer than the current record. */ if ( mv != -1 ) { update_mind ( my_first, my_last, mv, connected, ohd, mind ); } /* Before starting the next step of the iteration, we need all threads to complete the updating, so we set a BARRIER here. */ #pragma omp barrier } /* Once all the nodes have been connected, we can exit. */ # pragma omp single { printf ( "\n" ); printf ( " P%d: Exiting parallel region.\n", my_id ); } } free ( connected ); return mind; } /******************************************************************************/ void find_nearest ( int s, int e, int mind[NV], int connected[NV], int *d, int *v ) /******************************************************************************/ /* Purpose: FIND_NEAREST finds the nearest unconnected node. Licensing: This code is distributed under the GNU LGPL license. Modified: 02 July 2010 Author: Original C version by Norm Matloff, CS Dept, UC Davis. This C version by John Burkardt. Parameters: Input, int S, E, the first and last nodes that are to be checked. Input, int MIND[NV], the currently computed minimum distance from node 0 to each node. Input, int CONNECTED[NV], is 1 for each connected node, whose minimum distance to node 0 has been determined. Output, int *D, the distance from node 0 to the nearest unconnected node in the range S to E. Output, int *V, the index of the nearest unconnected node in the range S to E. */ { int i; int i4_huge = 2147483647; *d = i4_huge; *v = -1; for ( i = s; i <= e; i++ ) { if ( !connected[i] && ( mind[i] < *d ) ) { *d = mind[i]; *v = i; } } return; } /******************************************************************************/ void init ( int ohd[NV][NV] ) /******************************************************************************/ /* Purpose: INIT initializes the problem data. Discussion: The graph uses 6 nodes, and has the following diagram and distance matrix: N0--15--N2-100--N3 0 40 15 Inf Inf Inf \ | / 40 0 20 10 25 6 \ | / 15 20 0 100 Inf Inf 40 20 10 Inf 10 100 0 Inf Inf \ | / Inf 25 Inf Inf 0 8 \ | / Inf 6 Inf Inf 8 0 N1 / \ / \ 6 25 / \ / \ N5----8-----N4 Licensing: This code is distributed under the GNU LGPL license. Modified: 02 July 2010 Author: Original C version by Norm Matloff, CS Dept, UC Davis. This C version by John Burkardt. Parameters: Output, int OHD[NV][NV], the distance of the direct link between nodes I and J. */ { int i; int i4_huge = 2147483647; int j; for ( i = 0; i < NV; i++ ) { for ( j = 0; j < NV; j++ ) { if ( i == j ) { ohd[i][i] = 0; } else { ohd[i][j] = i4_huge; } } } ohd[0][1] = ohd[1][0] = 40; ohd[0][2] = ohd[2][0] = 15; ohd[1][2] = ohd[2][1] = 20; ohd[1][3] = ohd[3][1] = 10; ohd[1][4] = ohd[4][1] = 25; ohd[2][3] = ohd[3][2] = 100; ohd[1][5] = ohd[5][1] = 6; ohd[4][5] = ohd[5][4] = 8; return; } /******************************************************************************/ void timestamp ( void ) /******************************************************************************/ /* Purpose: TIMESTAMP prints the current YMDHMS date as a time stamp. Example: 31 May 2001 09:45:54 AM Licensing: This code is distributed under the GNU LGPL license. Modified: 24 September 2003 Author: John Burkardt Parameters: None */ { # define TIME_SIZE 40 static char time_buffer[TIME_SIZE]; const struct tm *tm; size_t len; time_t now; now = time ( NULL ); tm = localtime ( &now ); len = strftime ( time_buffer, TIME_SIZE, "%d %B %Y %I:%M:%S %p", tm ); printf ( "%s\n", time_buffer ); return; # undef TIME_SIZE } /******************************************************************************/ void update_mind ( int s, int e, int mv, int connected[NV], int ohd[NV][NV], int mind[NV] ) /******************************************************************************/ /* Purpose: UPDATE_MIND updates the minimum distance vector. Discussion: We've just determined the minimum distance to node MV. For each unconnected node I in the range S to E, check whether the route from node 0 to MV to I is shorter than the currently known minimum distance. Licensing: This code is distributed under the GNU LGPL license. Modified: 02 July 2010 Author: Original C version by Norm Matloff, CS Dept, UC Davis. This C version by John Burkardt. Parameters: Input, int S, E, the first and last nodes that are to be checked. Input, int MV, the node whose minimum distance to node 0 has just been determined. Input, int CONNECTED[NV], is 1 for each connected node, whose minimum distance to node 0 has been determined. Input, int OHD[NV][NV], the distance of the direct link between nodes I and J. Input/output, int MIND[NV], the currently computed minimum distances from node 0 to each node. On output, the values for nodes S through E have been updated. */ { int i; int i4_huge = 2147483647; for ( i = s; i <= e; i++ ) { if ( !connected[i] ) { if ( ohd[mv][i] < i4_huge ) { if ( mind[mv] + ohd[mv][i] < mind[i] ) { mind[i] = mind[mv] + ohd[mv][i]; } } } } return; }
matmul_double_avx2.c
/* * Square matrix multiplication * A[N][N] * B[N][N] = C[N][N] * */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #define N 512 //#define N 16 // read timer in second double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } void init(double **A) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[i][j] = (double)rand()/(double)(RAND_MAX/10.0); } } } void matmul_simd(double **A, double **B, double **C) { int i,j,k; double temp; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { temp = 0; #pragma omp simd reduction(+:temp) simdlen(8) for (k = 0; k < N; k++) { temp += A[i][k] * B[j][k]; } C[i][j] = temp; } } } // Debug functions void print_matrix(double **matrix) { for (int i = 0; i<8; i++) { printf("["); for (int j = 0; j<8; j++) { printf("%.2f ", matrix[i][j]); } puts("]"); } puts(""); } void matmul_serial(double **A, double **B, double **C) { int i,j,k; double temp; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { temp = 0; for (k = 0; k < N; k++) { temp += A[i][k] * B[j][k]; } C[i][j] = temp; } } } double check(double **A, double **B){ double difference = 0; for(int i = 0;i<N; i++){ for (int j = 0; j<N; j++) { difference += A[i][j]- B[i][j];} } return difference; } // Main int main(int argc, char *argv[]) { //Set everything up double **A = malloc(sizeof(double*)*N); double **B = malloc(sizeof(double*)*N); double **C_simd = malloc(sizeof(double*)*N); double **C_serial = malloc(sizeof(double*)*N); double **BT = malloc(sizeof(double*)*N); for (int i = 0; i<N; i++) { A[i] = malloc(sizeof(double)*N); B[i] = malloc(sizeof(double)*N); C_simd[i] = malloc(sizeof(double)*N); C_serial[i] = malloc(sizeof(double)*N); BT[i] = malloc(sizeof(double)*N); } srand(time(NULL)); init(A); init(B); for(int line = 0; line<N; line++){ for(int col = 0; col<N; col++){ BT[line][col] = B[col][line]; } } int i; int num_runs = 10; double elapsed = read_timer(); for (i=0; i<num_runs; i++) matmul_simd(A, BT, C_simd); elapsed = (read_timer() - elapsed); double elapsed_serial = read_timer(); for (i=0; i<num_runs; i++) matmul_serial(A, BT, C_serial); elapsed_serial = (read_timer() - elapsed_serial); print_matrix(A); print_matrix(BT); puts("=\n"); print_matrix(C_simd); puts("---------------------------------"); print_matrix(C_serial); double gflops_omp = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed)); double gflops_serial = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed_serial)); printf("======================================================================================================\n"); printf("\tMatrix Multiplication: A[N][N] * B[N][N] = C[N][N], N=%d\n", N); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (s)\t GFLOPS\n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("matmul_omp:\t\t%4f\t%4f\n", elapsed, gflops_omp); printf("matmul_serial:\t\t%4f\t%4f\n", elapsed_serial, gflops_serial); printf("Correctness check: %f\n", check(C_simd,C_serial)); return 0; }
bst-involutions.h
/* * Copyright 2018-2021 Kyle Berney * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef BST_INVOLUTIONS_H #define BST_INVOLUTIONS_H #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include <omp.h> #include <time.h> #include "common.h" #include "involutions.h" //Permutes sorted array into BST layout for n = 2^d - 1 template<typename TYPE> void permute(TYPE *A, uint64_t n, uint32_t d) { uint64_t j; TYPE temp; //Phase 1 for (uint64_t i = 0; i < n; ++i) { j = rev_d(i+1, d) - 1; if (i < j) { temp = A[i]; A[i] = A[j]; A[j] = temp; } } //Phase 2 for (uint64_t i = 0; i < n; ++i) { if (i+1 > (n-1)/2) { //leafs j = rev_b(i+1, d, d-1) - 1; } else { //internals j = rev_b(i+1, d, d - (__builtin_clzll(i+1) - (64 - d) + 1)) - 1; } if (i < j) { temp = A[i]; A[i] = A[j]; A[j] = temp; } } } //Permutes sorted array into BST layout for n = 2^d - 1 using p processors template<typename TYPE> void permute_parallel(TYPE *A, uint64_t n, uint32_t d, uint32_t p) { uint64_t j; TYPE temp; //Phase 1 #pragma omp parallel for shared(A, n, d) private(j, temp) schedule(guided, B) num_threads(p) for (uint64_t i = 0; i < n; ++i) { j = rev_d(i+1, d) - 1; if (i < j) { temp = A[i]; A[i] = A[j]; A[j] = temp; } } //Phase 2 #pragma omp parallel for shared(A, n, d) private(j, temp) schedule(guided, B) num_threads(p) for (uint64_t i = 0; i < n; ++i) { if (i+1 > (n-1)/2) { //leafs j = rev_b(i+1, d, d-1) - 1; } else { //internals j = rev_b(i+1, d, d - (__builtin_clzll(i+1) - (64 - d) + 1)) - 1; } if (i < j) { temp = A[i]; A[i] = A[j]; A[j] = temp; } } } //Gathers and shifts non-full level of leaves to the end of the array template<typename TYPE> void permute_leaves(TYPE *A, uint64_t n, uint64_t numInternals, uint64_t numLeaves) { unshuffle_dk<TYPE>(A, 2, 2*numLeaves); shift_right<TYPE>(A, 2*numLeaves, numLeaves); shuffle_dk<TYPE>(&A[numLeaves], 1, numLeaves); shift_right<TYPE>(&A[numLeaves], numInternals, numInternals - numLeaves); } //Gathers and shifts non-full level of leaves to the end of the array using p threads template<typename TYPE> void permute_leaves_parallel(TYPE *A, uint64_t n, uint64_t numInternals, uint64_t numLeaves, uint32_t p) { unshuffle_dk_parallel<TYPE>(A, 2, 2*numLeaves, p); shift_right_parallel<TYPE>(A, 2*numLeaves, numLeaves, p); shuffle_dk_parallel<TYPE>(&A[numLeaves], 1, numLeaves, p); shift_right_parallel<TYPE>(&A[numLeaves], numInternals, numInternals - numLeaves, p); } template<typename TYPE> double timePermuteBST(TYPE *A, uint64_t n, uint32_t p) { struct timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); uint32_t h = log2(n); if (n != pow(2, h+1) - 1) { //non-full tree uint64_t numInternals = pow(2, h) - 1; uint64_t numLeaves = n - numInternals; if (p == 1) { permute_leaves<TYPE>(A, n, numInternals, numLeaves); permute<TYPE>(A, n - numLeaves, h); } else { permute_leaves_parallel<TYPE>(A, n, numInternals, numLeaves, p); permute_parallel<TYPE>(A, n - numLeaves, h, p); } } else { //full tree if (p == 1) permute<TYPE>(A, n, h+1); else permute_parallel<TYPE>(A, n, h+1, p); } clock_gettime(CLOCK_MONOTONIC, &end); double ms = ((end.tv_sec*1000000000. + end.tv_nsec) - (start.tv_sec*1000000000. + start.tv_nsec)) / 1000000.; //millisecond return ms; } #endif
datac.h
/*************************************************************************** * datac.h is part of Math Graphic Library * Copyright (C) 2007-2016 Alexey Balakin <mathgl.abalakin@gmail.ru> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU Library General Public License as * * published by the Free Software Foundation; either version 3 of the * * License, or (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU Library General Public * * License along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ***************************************************************************/ #ifndef _MGL_DATAC_H_ #define _MGL_DATAC_H_ #include "mgl2/data.h" #include "mgl2/datac_cf.h" //----------------------------------------------------------------------------- #include <vector> #include <string> //----------------------------------------------------------------------------- #ifndef SWIG dual MGL_EXPORT mglLinearC(const dual *a, long nx, long ny, long nz, mreal x, mreal y, mreal z); dual MGL_EXPORT mglSpline3C(const dual *a, long nx, long ny, long nz, mreal x, mreal y, mreal z,dual *dx=0, dual *dy=0, dual *dz=0); dual MGL_EXPORT mglSpline3Cs(const dual *a, long nx, long ny, long nz, mreal x, mreal y, mreal z); //----------------------------------------------------------------------------- /// Class for working with complex data array class MGL_EXPORT mglDataC : public mglDataA { public: using mglDataA::Momentum; long nx; ///< number of points in 1st dimensions ('x' dimension) long ny; ///< number of points in 2nd dimensions ('y' dimension) long nz; ///< number of points in 3d dimensions ('z' dimension) dual *a; ///< data array std::string id; ///< column (or slice) names bool link; ///< use external data (i.e. don't free it) /// Initiate by other mglDataC variable mglDataC(const mglDataC &d) { a=0; mgl_datac_set(this,&d); } // NOTE: must be constructor for mglDataC& to exclude copy one mglDataC(const mglDataA &d) { a=0; mgl_datac_set(this,&d); } #if MGL_HAVE_RVAL mglDataC(mglDataC &&d):nx(d.nx),ny(d.ny),nz(d.nz),a(d.a),id(d.id),link(d.link) { s=d.s; temp=d.temp; func=d.func; o=d.o; d.a=0; d.func=0; } #endif mglDataC(const mglDataA &re, const mglDataA &im) { a=0; mgl_datac_set_ri(this,&re,&im); } mglDataC(HCDT d) { a=0; mgl_datac_set(this, d); } mglDataC(HCDT re, HCDT im) { a=0; mgl_datac_set_ri(this, re, im); } mglDataC(bool, mglDataC *d) // NOTE: Variable d will be deleted!!! { if(d) { nx=d->nx; ny=d->ny; nz=d->nz; a=d->a; d->a=0; temp=d->temp; func=d->func; o=d->o; s=d->s; id=d->id; link=d->link; delete d; } else { a=0; Create(1); } } /// Initiate by flat array mglDataC(int size, const dual *d) { a=0; Set(d,size); } mglDataC(int rows, int cols, const dual *d) { a=0; Set(d,cols,rows); } mglDataC(int size, const double *d) { a=0; Set(d,size); } mglDataC(int rows, int cols, const double *d) { a=0; Set(d,cols,rows); } mglDataC(int size, const float *d) { a=0; Set(d,size); } mglDataC(int rows, int cols, const float *d) { a=0; Set(d,cols,rows); } mglDataC(const dual *d, int size) { a=0; Set(d,size); } mglDataC(const dual *d, int rows, int cols) { a=0; Set(d,cols,rows); } mglDataC(const double *d, int size) { a=0; Set(d,size); } mglDataC(const double *d, int rows, int cols) { a=0; Set(d,cols,rows); } mglDataC(const float *d, int size) { a=0; Set(d,size); } mglDataC(const float *d, int rows, int cols) { a=0; Set(d,cols,rows); } /// Allocate memory and copy data from std::vector<T> mglDataC(const std::vector<int> &d) { a=0; Set(d); } mglDataC(const std::vector<float> &d) { a=0; Set(d); } mglDataC(const std::vector<double> &d) { a=0; Set(d); } mglDataC(const std::vector<std::complex<double> > &d) { a=0; Set(d); } mglDataC(const std::vector<std::complex<float> > &d) { a=0; Set(d); } /// Read data from file mglDataC(const char *fname) { a=0; Read(fname); } /// Allocate the memory for data array and initialize it zero mglDataC(long xx=1,long yy=1,long zz=1) { a=0; Create(xx,yy,zz); } /// Delete the array virtual ~mglDataC() { if(!link && a) delete []a; } /// Move all data from variable d, and delete this variable. inline void Move(mglDataC *d) // NOTE: Variable d will be deleted!!! { if(d && d->GetNN()>1) { bool l=link; dual *b=a; nx=d->nx; ny=d->ny; nz=d->nz; a=d->a; d->a=b; temp=d->temp; func=d->func; o=d->o; s=d->s; id=d->id; link=d->link; d->link=l; delete d; } else if(d) { *this = d->a[0]; delete d; } } inline dual GetVal(long i, long j=0, long k=0) const { return mgl_datac_get_value(this,i,j,k);} inline void SetVal(dual f, long i, long j=0, long k=0) { mgl_datac_set_value(this,f,i,j,k); } /// Get sizes long GetNx() const { return nx; } long GetNy() const { return ny; } long GetNz() const { return nz; } /// Link external data array (don't delete it at exit) inline void Link(dual *A, long NX, long NY=1, long NZ=1) { mgl_datac_link(this,A,NX,NY,NZ); } inline void Link(mglDataC &d) { Link(d.a,d.nx,d.ny,d.nz); } /// Allocate memory and copy the data from the gsl_vector inline void Set(gsl_vector *m) { mgl_datac_set_vector(this,m); } /// Allocate memory and copy the data from the gsl_matrix inline void Set(gsl_matrix *m) { mgl_datac_set_matrix(this,m); } /// Allocate memory and copy the data from the (float *) array inline void Set(const float *A,long NX,long NY=1,long NZ=1) { mgl_datac_set_float(this,A,NX,NY,NZ); } /// Allocate memory and copy the data from the (double *) array inline void Set(const double *A,long NX,long NY=1,long NZ=1) { mgl_datac_set_double(this,A,NX,NY,NZ); } /// Allocate memory and copy the data from the (complex *) array inline void Set(const dual *A,long NX,long NY=1,long NZ=1) { mgl_datac_set_complex(this,A,NX,NY,NZ); } /// Allocate memory and scanf the data from the string inline void Set(const char *str,long NX,long NY=1,long NZ=1) { mgl_datac_set_values(this,str,NX,NY,NZ); } /// Import data from abstract type inline void Set(HCDT dat) { mgl_datac_set(this, dat); } inline void Set(const mglDataA &dat) { mgl_datac_set(this, &dat); } inline void Set(const mglDataA &re, const mglDataA &im) { mgl_datac_set_ri(this, &re, &im); } inline void Set(HCDT re, HCDT im) { mgl_datac_set_ri(this, re, im); } inline void SetAmpl(const mglDataA &ampl, const mglDataA &phase) { mgl_datac_set_ap(this, &ampl, &phase); } /// Allocate memory and copy data from std::vector<T> inline void Set(const std::vector<int> &d) { if(d.size()>0) { Create(d.size()); for(long i=0;i<nx;i++) a[i] = d[i]; } else Create(1); } inline void Set(const std::vector<float> &d) { if(d.size()>0) Set(&(a[0]),d.size()); else Create(1); } inline void Set(const std::vector<double> &d) { if(d.size()>0) Set(&(a[0]),d.size()); else Create(1); } inline void Set(const std::vector<std::complex<double> > &d) { if(d.size()>0) { Create(d.size()); for(long i=0;i<nx;i++) a[i] = d[i]; } else Create(1); } inline void Set(const std::vector<std::complex<float> > &d) { if(d.size()>0) { Create(d.size()); for(long i=0;i<nx;i++) a[i] = d[i]; } else Create(1); } /// Create or recreate the array with specified size and fill it by zero inline void Create(long mx,long my=1,long mz=1) { mgl_datac_create(this,mx,my,mz); } /// Rearange data dimensions inline void Rearrange(long mx, long my=0, long mz=0) { mgl_datac_rearrange(this,mx,my,mz); } /// Transpose dimensions of the data (generalization of Transpose) inline void Transpose(const char *dim="yx") { mgl_datac_transpose(this,dim); } /// Extend data dimensions inline void Extend(long n1, long n2=0) { mgl_datac_extend(this,n1,n2); } /// Reduce size of the data inline void Squeeze(long rx,long ry=1,long rz=1,bool smooth=false) { mgl_datac_squeeze(this,rx,ry,rz,smooth); } /// Crop the data inline void Crop(long n1, long n2,char dir='x') { mgl_datac_crop(this,n1,n2,dir); } /// Insert data inline void Insert(char dir, long at=0, long num=1) { mgl_datac_insert(this,dir,at,num); } /// Delete data inline void Delete(char dir, long at=0, long num=1) { mgl_datac_delete(this,dir,at,num); } /// Join with another data array inline void Join(const mglDataA &d) { mgl_datac_join(this,&d); } /// Modify the data by specified formula inline void Modify(const char *eq,long dim=0) { mgl_datac_modify(this, eq, dim); } /// Modify the data by specified formula inline void Modify(const char *eq,const mglDataA &vdat, const mglDataA &wdat) { mgl_datac_modify_vw(this,eq,&vdat,&wdat); } /// Modify the data by specified formula inline void Modify(const char *eq,const mglDataA &vdat) { mgl_datac_modify_vw(this,eq,&vdat,0); } /// Modify the data by specified formula assuming x,y,z in range [r1,r2] inline void Fill(mglBase *gr, const char *eq, const char *opt="") { mgl_datac_fill_eq(gr,this,eq,0,0,opt); } inline void Fill(mglBase *gr, const char *eq, const mglDataA &vdat, const char *opt="") { mgl_datac_fill_eq(gr,this,eq,&vdat,0,opt); } inline void Fill(mglBase *gr, const char *eq, const mglDataA &vdat, const mglDataA &wdat,const char *opt="") { mgl_datac_fill_eq(gr,this,eq,&vdat,&wdat,opt); } /// Equidistantly fill the data to range [x1,x2] in direction dir inline void Fill(dual x1,dual x2=mglNaN,char dir='x') { mgl_datac_fill(this,x1,x2,dir); } /// Fill the data by interpolated values of vdat parametrically depended on xdat,ydat,zdat for x,y,z in range [p1,p2] using global spline inline void RefillGS(const mglDataA &xdat, const mglDataA &vdat, mreal x1, mreal x2,long sl=-1) { mgl_datac_refill_gs(this,&xdat,&vdat,x1,x2,sl); } /// Fill the data by interpolated values of vdat parametrically depended on xdat,ydat,zdat for x,y,z in range [p1,p2] inline void Refill(const mglDataA &xdat, const mglDataA &vdat, mreal x1, mreal x2,long sl=-1) { mgl_datac_refill_x(this,&xdat,&vdat,x1,x2,sl); } inline void Refill(const mglDataA &xdat, const mglDataA &vdat, mglPoint p1, mglPoint p2,long sl=-1) { mgl_datac_refill_x(this,&xdat,&vdat,p1.x,p2.x,sl); } inline void Refill(const mglDataA &xdat, const mglDataA &ydat, const mglDataA &vdat, mglPoint p1, mglPoint p2,long sl=-1) { mgl_datac_refill_xy(this,&xdat,&ydat,&vdat,p1.x,p2.x,p1.y,p2.y,sl); } inline void Refill(const mglDataA &xdat, const mglDataA &ydat, const mglDataA &zdat, const mglDataA &vdat, mglPoint p1, mglPoint p2) { mgl_datac_refill_xyz(this,&xdat,&ydat,&zdat,&vdat,p1.x,p2.x,p1.y,p2.y,p1.z,p2.z); } /// Fill the data by interpolated values of vdat parametrically depended on xdat,ydat,zdat for x,y,z in axis range of gr inline void Refill(HMGL gr, const mglDataA &xdat, const mglDataA &vdat, long sl=-1, const char *opt="") { mgl_datac_refill_gr(gr,this,&xdat,0,0,&vdat,sl,opt); } inline void Refill(HMGL gr, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &vdat, long sl=-1, const char *opt="") { mgl_datac_refill_gr(gr,this,&xdat,&ydat,0,&vdat,sl,opt); } inline void Refill(HMGL gr, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &zdat, const mglDataA &vdat, const char *opt="") { mgl_datac_refill_gr(gr,this,&xdat,&ydat,&zdat,&vdat,-1,opt); } /// Put value to data element(s) inline void Put(dual val, long i=-1, long j=-1, long k=-1) { mgl_datac_put_val(this,val,i,j,k); } /// Put array to data element(s) inline void Put(const mglDataA &dat, long i=-1, long j=-1, long k=-1) { mgl_datac_put_dat(this,&dat,i,j,k); } /// Set names for columns (slices) inline void SetColumnId(const char *ids) { mgl_datac_set_id(this,ids); } /// Make new id inline void NewId() { id.clear(); } /// Read data from tab-separated text file with auto determining size inline bool Read(const char *fname) { return mgl_datac_read(this,fname); } /// Read data from text file with specifeid size inline bool Read(const char *fname,long mx,long my=1,long mz=1) { return mgl_datac_read_dim(this,fname,mx,my,mz); } /// Save whole data array (for ns=-1) or only ns-th slice to text file void Save(const char *fname,long ns=-1) const { mgl_datac_save(this,fname,ns); } /// Get whole data array (for ns=-1) or only ns-th slice to string std::string Get(long ns=-1) const { return mgl_datac_to_string(this,ns); } /// Read data from tab-separated text files with auto determining size which filenames are result of sprintf(fname,templ,t) where t=from:step:to inline bool ReadRange(const char *templ, double from, double to, double step=1, bool as_slice=false) { return mgl_datac_read_range(this,templ,from,to,step,as_slice); } /// Read data from tab-separated text files with auto determining size which filenames are satisfied to template (like "t_*.dat") inline bool ReadAll(const char *templ, bool as_slice=false) { return mgl_datac_read_all(this, templ, as_slice); } /// Read data from text file with size specified at beginning of the file inline bool ReadMat(const char *fname, long dim=2) { return mgl_datac_read_mat(this,fname,dim); } /// Read data array from HDF file (parse HDF4 and HDF5 files) inline int ReadHDF(const char *fname,const char *data) { return mgl_datac_read_hdf(this,fname,data); } /// Save data to HDF file void SaveHDF(const char *fname,const char *data,bool rewrite=false) const { mgl_datac_save_hdf(this,fname,data,rewrite); } /// Get real part of data values inline mglData Real() const { return mglData(true,mgl_datac_real(this)); } /// Get imaginary part of data values inline mglData Imag() const { return mglData(true,mgl_datac_imag(this)); } /// Get absolute value of data values, i.e. |u| inline mglData Abs() const { return mglData(true,mgl_datac_abs(this)); } /// Get square of absolute value of data values, i.e. |u|^2 inline mglData Norm() const { return mglData(true,mgl_datac_norm(this)); } /// Get argument of data values inline mglData Arg() const { return mglData(true,mgl_datac_arg(this)); } /// Get column (or slice) of the data filled by formulas of named columns inline mglDataC Column(const char *eq) const { return mglDataC(true,mgl_datac_column(this,eq)); } /// Get momentum (1D-array) of data along direction 'dir'. String looks like "x1" for median in x-direction, "x2" for width in x-dir and so on. inline mglDataC Momentum(char dir, const char *how) const { return mglDataC(true,mgl_datac_momentum(this,dir,how)); } /// Get sub-array of the data with given fixed indexes inline mglDataC SubData(long xx,long yy=-1,long zz=-1) const { return mglDataC(true,mgl_datac_subdata(this,xx,yy,zz)); } inline mglDataC SubData(const mglDataA &xx, const mglDataA &yy, const mglDataA &zz) const { return mglDataC(true,mgl_datac_subdata_ext(this,&xx,&yy,&zz)); } inline mglDataC SubData(const mglDataA &xx, const mglDataA &yy) const { return mglDataC(true,mgl_datac_subdata_ext(this,&xx,&yy,0)); } inline mglDataC SubData(const mglDataA &xx) const { return mglDataC(true,mgl_datac_subdata_ext(this,&xx,0,0)); } /// Get trace of the data array inline mglDataC Trace() const { return mglDataC(true,mgl_datac_trace(this)); } /// Get array which is result of summation in given direction or directions inline mglDataC Sum(const char *dir) const { return mglDataC(true,mgl_datac_sum(this,dir)); } /// Get the data which is direct multiplication (like, d[i,j] = this[i]*a[j] and so on) inline mglDataC Combine(const mglDataA &dat) const { return mglDataC(true,mgl_datac_combine(this,&dat)); } /// Resize the data to new size of box [x1,x2]*[y1,y2]*[z1,z2] inline mglDataC Resize(long mx,long my=1,long mz=1, mreal x1=0,mreal x2=1, mreal y1=0,mreal y2=1, mreal z1=0,mreal z2=1) const { return mglDataC(true,mgl_datac_resize_box(this,mx,my,mz,x1,x2,y1,y2,z1,z2)); } /// Get array which values is result of interpolation this for coordinates from other arrays inline mglDataC Evaluate(const mglData &idat, bool norm=true) const { return mglDataC(true,mgl_datac_evaluate(this,&idat,0,0,norm)); } inline mglDataC Evaluate(const mglData &idat, const mglData &jdat, bool norm=true) const { return mglDataC(true,mgl_datac_evaluate(this,&idat,&jdat,0,norm)); } inline mglDataC Evaluate(const mglData &idat, const mglData &jdat, const mglData &kdat, bool norm=true) const { return mglDataC(true,mgl_datac_evaluate(this,&idat,&jdat,&kdat,norm)); } /// Find correlation with another data arrays inline mglDataC Correl(const mglData &dat, const char *dir) const { return mglDataC(true,mgl_datac_correl(this,&dat,dir)); } /// Find auto correlation function inline mglDataC AutoCorrel(const char *dir) const { return mglDataC(true,mgl_datac_correl(this,this,dir)); } /// Create n-th points distribution of this data values in range [v1, v2] inline mglData Hist(long n,mreal v1=0,mreal v2=1, long nsub=0) const { return mglData(true,mgl_data_hist(this,n,v1,v2,nsub)); } /// Create n-th points distribution of this data values in range [v1, v2] with weight w inline mglData Hist(const mglDataA &w, long n,mreal v1=0,mreal v2=1, long nsub=0) const { return mglData(true,mgl_data_hist_w(this,&w,n,v1,v2,nsub)); } /// Get array which is result of maximal values in given direction or directions inline mglData Max(const char *dir) const { return mglData(true,mgl_data_max_dir(this,dir)); } /// Get array which is result of minimal values in given direction or directions inline mglData Min(const char *dir) const { return mglData(true,mgl_data_min_dir(this,dir)); } /// Cumulative summation the data in given direction or directions inline void CumSum(const char *dir) { mgl_datac_cumsum(this,dir); } /// Integrate (cumulative summation) the data in given direction or directions inline void Integral(const char *dir) { mgl_datac_integral(this,dir); } /// Differentiate the data in given direction or directions inline void Diff(const char *dir) { mgl_datac_diff(this,dir); } /// Double-differentiate (like laplace operator) the data in given direction inline void Diff2(const char *dir) { mgl_datac_diff2(this,dir); } /// Swap left and right part of the data in given direction (useful for fourier spectrums) inline void Swap(const char *dir) { mgl_datac_swap(this,dir); } /// Roll data along direction dir by num slices inline void Roll(char dir, long num) { mgl_datac_roll(this,dir,num); } /// Mirror the data in given direction (useful for fourier spectrums) inline void Mirror(const char *dir) { mgl_datac_mirror(this,dir); } /// Smooth the data on specified direction or directions /** String \a dir may contain: * ‘x’, ‘y’, ‘z’ for 1st, 2nd or 3d dimension; * ‘dN’ for linear averaging over N points; * ‘3’ for linear averaging over 3 points; * ‘5’ for linear averaging over 5 points. * By default quadratic averaging over 5 points is used. */ inline void Smooth(const char *dirs="xyz",mreal delta=0) { mgl_datac_smooth(this,dirs,delta); } /// Limit the data to be inside [-v,v], keeping the original sign inline void Limit(mreal v) { mgl_datac_limit(this, v); } /// Hankel transform inline void Hankel(const char *dir) { mgl_datac_hankel(this,dir); } /// Fourier transform inline void FFT(const char *dir) { mgl_datac_fft(this,dir); } /// Calculate one step of diffraction by finite-difference method with parameter q inline void Diffraction(const char *how, mreal q) { mgl_datac_diffr(this,how,q); } /// Interpolate by cubic spline the data to given point x=[0...nx-1], y=[0...ny-1], z=[0...nz-1] inline dual Spline(mreal x,mreal y=0,mreal z=0) const { return mgl_datac_spline(this, x,y,z); } /// Interpolate by cubic spline the data to given point x,\a y,\a z which normalized in range [0, 1] inline dual Spline1(mreal x,mreal y=0,mreal z=0) const { return mgl_datac_spline(this, x*(nx-1),y*(ny-1),z*(nz-1)); } /// Interpolate by linear function the data to given point x=[0...nx-1], y=[0...ny-1], z=[0...nz-1] inline dual Linear(mreal x,mreal y=0,mreal z=0) const { return mgl_datac_linear_ext(this,x,y,z,0,0,0); } /// Interpolate by line the data to given point x,\a y,\a z which normalized in range [0, 1] inline dual Linear1(mreal x,mreal y=0,mreal z=0) const { return mgl_datac_linear_ext(this,x*(nx-1),y*(ny-1),z*(nz-1),0,0,0); } /// Interpolate by linear function the data and return its derivatives at given point x=[0...nx-1], y=[0...ny-1], z=[0...nz-1] inline dual Linear(mglPoint &dif, mreal x,mreal y=0,mreal z=0) const { dual val,dx,dy,dz; val = mgl_datac_linear_ext(this,x,y,z, &dx, &dy, &dz); dif.Set(dx.real(),dy.real(),dz.real()); return val; } /// Interpolate by line the data and return its derivatives at given point x,\a y,\a z which normalized in range [0, 1] inline dual Linear1(mglPoint &dif, mreal x,mreal y=0,mreal z=0) const { dual val,dx,dy,dz; val = mgl_datac_linear_ext(this,x,y,z, &dx, &dy, &dz); dif.Set(dx.real(),dy.real(),dz.real()); dif.x/=nx>1?nx-1:1; dif.y/=ny>1?ny-1:1; dif.z/=nz>1?nz-1:1; return val; } /// Return an approximated x-value (root) when dat(x) = val inline mreal Solve(mreal val, bool use_spline=true, long i0=0) const { return mgl_data_solve_1d(this, val, use_spline, i0); } /// Return an approximated value (root) when dat(x) = val inline mglData Solve(mreal val, char dir, bool norm=true) const { return mglData(true,mgl_data_solve(this, val, dir, 0, norm)); } inline mglData Solve(mreal val, char dir, const mglData &i0, bool norm=true) const { return mglData(true,mgl_data_solve(this, val, dir, &i0, norm)); } /// Copy data from other mglDataA variable inline const mglDataA &operator=(const mglDataA &d) { if(this!=&d) Set(&d); return d; } inline const mglDataC &operator=(const mglDataC &d) { if(this!=&d) Set(&d); return d; } inline dual operator=(dual val) { #pragma omp parallel for for(long i=0;i<nx*ny*nz;i++) a[i]=val; return val; } inline dual operator=(mreal val) { #pragma omp parallel for for(long i=0;i<nx*ny*nz;i++) a[i]=val; return val; } /// Multiply the data by other one for each element inline void operator*=(const mglDataA &d) { mgl_datac_mul_dat(this,&d); } /// Divide the data by other one for each element inline void operator/=(const mglDataA &d) { mgl_datac_div_dat(this,&d); } /// Add the other data inline void operator+=(const mglDataA &d) { mgl_datac_add_dat(this,&d); } /// Subtract the other data inline void operator-=(const mglDataA &d) { mgl_datac_sub_dat(this,&d); } /// Multiply each element by the number inline void operator*=(dual d) { mgl_datac_mul_num(this,d); } /// Divide each element by the number inline void operator/=(dual d) { mgl_datac_div_num(this,d); } /// Add the number inline void operator+=(dual d) { mgl_datac_add_num(this,d); } /// Subtract the number inline void operator-=(dual d) { mgl_datac_sub_num(this,d); } #ifndef SWIG /// Direct access to the data cell inline dual &operator[](long i) { return a[i]; } #endif #ifndef DEBUG /// Get the value in given cell of the data mreal v(long i,long j=0,long k=0) const { return abs(a[i+nx*(j+ny*k)]); } /// Set the value in given cell of the data void set_v(mreal val, long i,long j=0,long k=0) { a[i+nx*(j+ny*k)]=val; } #else /// Get the value in given cell of the data with border checking mreal v(long i,long j=0,long k=0) const { return mgl_abs(mgl_datac_get_value(this,i,j,k)); } /// Set the value in given cell of the data void set_v(mreal val, long i,long j=0,long k=0) { mgl_datac_set_value(this,val,i,j,k); } #endif /// Get the complex value in given cell of the data dual vc(long i,long j=0,long k=0) const { return a[i+nx*(j+ny*k)]; } dual vcthr(long i) const { return a[i]; } /// Get the interpolated value and its derivatives in given data cell without border checking mreal valueD(mreal x,mreal y=0,mreal z=0,mreal *dx=0,mreal *dy=0,mreal *dz=0) const { dual aa,ax,ay,az; mreal res; aa = mglSpline3C(a,nx,ny,nz,x,y,z,&ax,&ay,&az); res = abs(aa); if(dx) *dx = res?(real(aa)*real(ax)+imag(aa)*imag(ax))/res:0; if(dy) *dy = res?(real(aa)*real(ay)+imag(aa)*imag(ay))/res:0; if(dz) *dz = res?(real(aa)*real(az)+imag(aa)*imag(az))/res:0; return res; } /// Get the interpolated value in given data cell without border checking mreal value(mreal x,mreal y=0,mreal z=0) const { return abs(mglSpline3Cs(a,nx,ny,nz,x,y,z)); } mreal vthr(long i) const { return abs(a[i]); } // add for speeding up !!! mreal dvx(long i,long j=0,long k=0) const { register long i0=i+nx*(j+ny*k); return i>0? abs(i<nx-1? (a[i0+1]-a[i0-1])/mreal(2):a[i0]-a[i0-1]) : abs(a[i0+1]-a[i0]); } mreal dvy(long i,long j=0,long k=0) const { register long i0=i+nx*(j+ny*k); return j>0? abs(j<ny-1? (a[i0+nx]-a[i0-nx])/mreal(2):a[i0]-a[i0-nx]) : abs(a[i0+nx]-a[i0]);} mreal dvz(long i,long j=0,long k=0) const { register long i0=i+nx*(j+ny*k), n=nx*ny; return k>0? abs(k<nz-1? (a[i0+n]-a[i0-n])/mreal(2):a[i0]-a[i0-n]) : abs(a[i0+n]-a[i0]); } }; //----------------------------------------------------------------------------- /// Saves result of PDE solving (|u|^2) for "Hamiltonian" ham with initial conditions ini inline mglDataC mglPDEc(mglBase *gr, const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, mreal dz=0.1, mreal k0=100,const char *opt="") { return mglDataC(true, mgl_pde_solve_c(gr,ham, &ini_re, &ini_im, dz, k0,opt)); } /// Saves result of PDE solving for "Hamiltonian" ham with initial conditions ini along a curve ray (must have nx>=7 - x,y,z,px,py,pz,tau or nx=5 - x,y,px,py,tau) inline mglDataC mglQO2dc(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, const mglDataA &ray, mreal r=1, mreal k0=100) { return mglDataC(true, mgl_qo2d_solve_c(ham, &ini_re, &ini_im, &ray, r, k0, 0, 0)); } inline mglDataC mglQO2dc(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, const mglDataA &ray, mglData &xx, mglData &yy, mreal r=1, mreal k0=100) { return mglDataC(true, mgl_qo2d_solve_c(ham, &ini_re, &ini_im, &ray, r, k0, &xx, &yy)); } /// Saves result of PDE solving for "Hamiltonian" ham with initial conditions ini along a curve ray (must have nx>=7 - x,y,z,px,py,pz,tau or nx=5 - x,y,px,py,tau) inline mglDataC mglQO3dc(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, const mglDataA &ray, mreal r=1, mreal k0=100) { return mglDataC(true, mgl_qo3d_solve_c(ham, &ini_re, &ini_im, &ray, r, k0, 0, 0, 0)); } inline mglDataC mglQO3dc(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, const mglDataA &ray, mglData &xx, mglData &yy, mglData &zz, mreal r=1, mreal k0=100) { return mglDataC(true, mgl_qo3d_solve_c(ham, &ini_re, &ini_im, &ray, r, k0, &xx, &yy, &zz)); } //----------------------------------------------------------------------------- /// Get array as solution of tridiagonal system of equations a[i]*x[i-1]+b[i]*x[i]+c[i]*x[i+1]=d[i] /** String \a how may contain: * 'x', 'y', 'z' for solving along x-,y-,z-directions, or * 'h' for solving along hexagonal direction at x-y plain (need nx=ny), * 'c' for using periodical boundary conditions, * 'd' for diffraction/diffuse calculation. */ inline mglDataC mglTridMatC(const mglDataA &A, const mglDataA &B, const mglDataA &C, const mglDataC &D, const char *how) { return mglDataC(true, mgl_datac_tridmat(&A, &B, &C, &D, how)); } //----------------------------------------------------------------------------- /// Get sub-array of the data with given fixed indexes inline mglDataC mglSubDataC(const mglDataA &dat, long xx, long yy=-1, long zz=-1) { return mglDataC(true,mgl_datac_subdata(&dat,xx,yy,zz)); } inline mglDataC mglSubDataC(const mglDataA &dat, const mglDataA &xx, const mglDataA &yy, const mglDataA &zz) { return mglDataC(true,mgl_datac_subdata_ext(&dat,&xx,&yy,&zz)); } inline mglDataC mglSubDataC(const mglDataA &dat, const mglDataA &xx, const mglDataA &yy) { return mglDataC(true,mgl_datac_subdata_ext(&dat,&xx,&yy,0)); } inline mglDataC mglSubDataC(const mglDataA &dat, const mglDataA &xx) { return mglDataC(true,mgl_datac_subdata_ext(&dat,&xx,0,0)); } //----------------------------------------------------------------------------- /// Prepare coefficients for global spline interpolation inline mglDataC mglGSplineCInit(const mglDataA &xdat, const mglDataA &ydat) { return mglDataC(true,mgl_gsplinec_init(&xdat, &ydat)); } /// Evaluate global spline (and its derivatives d1, d2 if not NULL) using prepared coefficients \a coef inline dual mglGSplineC(const mglDataA &coef, mreal dx, dual *d1=0, dual *d2=0) { return mgl_gsplinec(&coef, dx, d1,d2); } //----------------------------------------------------------------------------- #define _DN_(a) ((mglDataC *)*(a)) #define _DC_ ((mglDataC *)*d) //----------------------------------------------------------------------------- #ifndef SWIG /// Wrapper class for complex expression evaluating class MGL_EXPORT mglExprC { HAEX ex; mglExprC(const mglExprC &){} // copying is not allowed const mglExprC &operator=(const mglExprC &t){return t;} // copying is not allowed public: mglExprC(const char *expr) { ex = mgl_create_cexpr(expr); } ~mglExprC() { mgl_delete_cexpr(ex); } /// Return value of expression for given x,y,z variables inline dual Eval(dual x, dual y=0, dual z=0) { return mgl_cexpr_eval(ex,x,y,z); } /// Return value of expression for given x,y,z,u,v,w variables inline dual Eval(dual x, dual y, dual z, dual u, dual v, dual w) { dual var[26]; var['x'-'a']=x; var['y'-'a']=y; var['z'-'a']=z; var['u'-'a']=u; var['v'-'a']=v; var['w'-'a']=w; return mgl_cexpr_eval_v(ex,var); } /// Return value of expression for given variables inline dual Eval(dual var[26]) { return mgl_cexpr_eval_v(ex,var); } }; #endif //----------------------------------------------------------------------------- #endif #endif
affinity_display.1.c
// RUN: %libomp-compile // RUN: env OMP_DISPLAY_AFFINITY=TRUE OMP_NUM_THREADS=4 OMP_PLACES='{0,1},{2,3},{4,5},{6,7}' %libomp-run | %python %S/check.py -c 'CHECK' %s // REQUIRES: !abt // Affinity Display examples #include <stdio.h> #include <stdlib.h> // also null is in <stddef.h> #include <stddef.h> #include <omp.h> #include <string.h> // ENVIRONMENT // OMP_DISPLAY_AFFINITY=TRUE // OMP_NUM_THREADS=4 // OMP_PLACES='{0,1},{2,3},{4,5},{6,7}' // CHECK: num_threads=1 OMP: pid [0-9]+ tid [0-9]+ thread [0-4] bound to OS proc set \{([0-7])|(0,1)|(undefined)\} // CHECK: num_threads=4 Thread id [0-3] reporting in // CHECK: num_threads=4 OMP: pid [0-9]+ tid [0-9]+ thread [0-4] bound to OS proc set \{([0-7])|([0246],[1357])|(undefined)\} // CHECK: num_threads=1 Default Affinity Format is: // CHECK: num_threads=1 Affinity Format set to: host=%20H tid=%0.4n binds_to=%A // CHECK: num_threads=4 tid=[0-3] affinity:host=[a-zA-Z0-9_.-]+[ ]+tid=000[0-4][ ]+binds_to=(([0-7])|([0246],[1357])|(undefined)) #define FORMAT_STORE 80 #define BUFFER_STORE 80 int main(int argc, char** argv) { int i, n, tid, max_req_store = 0; size_t nchars; char default_format[FORMAT_STORE]; char my_format[] = "host=%20H tid=%0.4n binds_to=%A"; char **buffer; // CODE SEGMENT 1 AFFINITY DISPLAY omp_display_affinity(NULL); // OMP_DISPLAY_AFFINITY=TRUE, // Affinity reported for 1 parallel region #pragma omp parallel { printf("Thread id %d reporting in.\n", omp_get_thread_num()); } // Get and Display Default Affinity Format nchars = omp_get_affinity_format(default_format, (size_t)FORMAT_STORE); printf("Default Affinity Format is: %s\n", default_format); if (nchars > FORMAT_STORE) { printf("Caution: Reported Format is truncated. Increase\n"); printf(" FORMAT_STORE by %d.\n", (int)nchars - FORMAT_STORE); } // Set Affinity Format omp_set_affinity_format(my_format); printf("Affinity Format set to: %s\n", my_format); // CODE SEGMENT 3 CAPTURE AFFINITY // Set up buffer for affinity of n threads n = omp_get_max_threads(); buffer = (char **)malloc(sizeof(char *) * n); for (i = 0; i < n; i++) { buffer[i] = (char *)malloc(sizeof(char) * BUFFER_STORE); } // Capture Affinity using Affinity Format set above. // Use critical reduction to check size of buffer areas #pragma omp parallel private(tid, nchars) { tid = omp_get_thread_num(); nchars = omp_capture_affinity(buffer[tid], (size_t)BUFFER_STORE, NULL); #pragma omp critical { if (nchars > max_req_store) max_req_store = nchars; } } for (i = 0; i < n; i++) { printf("tid=%d affinity:%s:\n", i, buffer[i]); } // for 4 threads with OMP_PLACES='{0,1},{2,3},{4,5},{6,7}' // host=%20H tid=%0.4n binds_to=%A // host=<hostname> tid=0000 binds_to=0,1 // host=<hostname> tid=0001 binds_to=2,3 // host=<hostname> tid=0002 binds_to=4,5 // host=<hostname> tid=0003 binds_to=6,7 if (max_req_store > BUFFER_STORE) { printf("Caution: Affinity string truncated. Increase\n"); printf(" BUFFER_STORE by %d\n", max_req_store - BUFFER_STORE); } return 0; }
exact_rhs-brisbane.c
//-------------------------------------------------------------------------// // // // This benchmark is a serial C version of the NPB BT code. This C // // version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the serial Fortran versions in // // "NPB3.3-SER" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this C version to cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header-brisbane.h" //--------------------------------------------------------------------- // compute the right hand side based on exact solution //--------------------------------------------------------------------- void exact_rhs() { double dtemp[5], xi, eta, zeta, dtpp; int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1; //--------------------------------------------------------------------- // initialize //--------------------------------------------------------------------- for (k = 0; k <= grid_points[2]-1; k++) { for (j = 0; j <= grid_points[1]-1; j++) { for (i = 0; i <= grid_points[0]-1; i++) { for (m = 0; m < 5; m++) { forcing[k][j][i][m] = 0.0; } } } } //--------------------------------------------------------------------- // xi-direction flux differences //--------------------------------------------------------------------- for (k = 1; k <= grid_points[2]-2; k++) { zeta = (double)(k) * dnzm1; for (j = 1; j <= grid_points[1]-2; j++) { eta = (double)(j) * dnym1; for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)(i) * dnxm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[i][m] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m < 5; m++) { buf[i][m] = dtpp * dtemp[m]; } cuf[i] = buf[i][1] * buf[i][1]; buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3]; q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] + buf[i][3]*ue[i][3]); } for (i = 1; i <= grid_points[0]-2; i++) { im1 = i-1; ip1 = i+1; forcing[k][j][i][0] = forcing[k][j][i][0] - tx2*( ue[ip1][1]-ue[im1][1] )+ dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]); forcing[k][j][i][1] = forcing[k][j][i][1] - tx2 * ( (ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))- (ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+ xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+ dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ue[im1][1]); forcing[k][j][i][2] = forcing[k][j][i][2] - tx2 * ( ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+ xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+ dx3tx1*( ue[ip1][2]-2.0*ue[i][2] +ue[im1][2]); forcing[k][j][i][3] = forcing[k][j][i][3] - tx2*( ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+ xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+ dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]); forcing[k][j][i][4] = forcing[k][j][i][4] - tx2*( buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])- buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+ 0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+ buf[im1][0])+ xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+ xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+ dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]); } //--------------------------------------------------------------------- // Fourth-order dissipation //--------------------------------------------------------------------- for (m = 0; m < 5; m++) { i = 1; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]); i = 2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } for (i = 3; i <= grid_points[0]-4; i++) { for (m = 0; m < 5; m++) { forcing[k][j][i][m] = forcing[k][j][i][m] - dssp* (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } } for (m = 0; m < 5; m++) { i = grid_points[0]-3; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m]); i = grid_points[0]-2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]); } } } //--------------------------------------------------------------------- // eta-direction flux differences //--------------------------------------------------------------------- for (k = 1; k <= grid_points[2]-2; k++) { zeta = (double)(k) * dnzm1; for (i = 1; i <= grid_points[0]-2; i++) { xi = (double)(i) * dnxm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)(j) * dnym1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[j][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m < 5; m++) { buf[j][m] = dtpp * dtemp[m]; } cuf[j] = buf[j][2] * buf[j][2]; buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3]; q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] + buf[j][3]*ue[j][3]); } for (j = 1; j <= grid_points[1]-2; j++) { jm1 = j-1; jp1 = j+1; forcing[k][j][i][0] = forcing[k][j][i][0] - ty2*( ue[jp1][2]-ue[jm1][2] )+ dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]); forcing[k][j][i][1] = forcing[k][j][i][1] - ty2*( ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+ yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+ dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]); forcing[k][j][i][2] = forcing[k][j][i][2] - ty2*( (ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))- (ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+ yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+ dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]); forcing[k][j][i][3] = forcing[k][j][i][3] - ty2*( ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+ yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+ dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]); forcing[k][j][i][4] = forcing[k][j][i][4] - ty2*( buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])- buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+ 0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+ buf[jm1][0])+ yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+ yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+ dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]); } //--------------------------------------------------------------------- // Fourth-order dissipation //--------------------------------------------------------------------- for (m = 0; m < 5; m++) { j = 1; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]); j = 2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } for (j = 3; j <= grid_points[1]-4; j++) { for (m = 0; m < 5; m++) { forcing[k][j][i][m] = forcing[k][j][i][m] - dssp* (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } } for (m = 0; m < 5; m++) { j = grid_points[1]-3; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m]); j = grid_points[1]-2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]); } } } //--------------------------------------------------------------------- // zeta-direction flux differences //--------------------------------------------------------------------- for (j = 1; j <= grid_points[1]-2; j++) { eta = (double)(j) * dnym1; for (i = 1; i <= grid_points[0]-2; i++) { xi = (double)(i) * dnxm1; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)(k) * dnzm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[k][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m < 5; m++) { buf[k][m] = dtpp * dtemp[m]; } cuf[k] = buf[k][3] * buf[k][3]; buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2]; q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] + buf[k][3]*ue[k][3]); } for (k = 1; k <= grid_points[2]-2; k++) { km1 = k-1; kp1 = k+1; forcing[k][j][i][0] = forcing[k][j][i][0] - tz2*( ue[kp1][3]-ue[km1][3] )+ dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]); forcing[k][j][i][1] = forcing[k][j][i][1] - tz2 * ( ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+ zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+ dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]); forcing[k][j][i][2] = forcing[k][j][i][2] - tz2 * ( ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+ zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+ dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]); forcing[k][j][i][3] = forcing[k][j][i][3] - tz2 * ( (ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))- (ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+ zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+ dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]); forcing[k][j][i][4] = forcing[k][j][i][4] - tz2 * ( buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])- buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+ 0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0] +buf[km1][0])+ zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+ zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+ dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]); } //--------------------------------------------------------------------- // Fourth-order dissipation //--------------------------------------------------------------------- for (m = 0; m < 5; m++) { k = 1; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]); k = 2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } for (k = 3; k <= grid_points[2]-4; k++) { for (m = 0; m < 5; m++) { forcing[k][j][i][m] = forcing[k][j][i][m] - dssp* (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } } for (m = 0; m < 5; m++) { k = grid_points[2]-3; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m]); k = grid_points[2]-2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]); } } } //--------------------------------------------------------------------- // now change the sign of the forcing function, //--------------------------------------------------------------------- for (k = 1; k <= grid_points[2]-2; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { forcing[k][j][i][m] = -1.0 * forcing[k][j][i][m]; } } } } #pragma omp target update to(forcing) brisbane_task task0; brisbane_task_create(&task0); brisbane_task_h2d_full(task0, mem_forcing, forcing); brisbane_task_submit(task0, brisbane_cpu, NULL, true); }
tree-vectorizer.h
/* Vectorizer Copyright (C) 2003-2019 Free Software Foundation, Inc. Contributed by Dorit Naishlos <dorit@il.ibm.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_TREE_VECTORIZER_H #define GCC_TREE_VECTORIZER_H typedef struct _stmt_vec_info *stmt_vec_info; #include "tree-data-ref.h" #include "tree-hash-traits.h" #include "target.h" /* Used for naming of new temporaries. */ enum vect_var_kind { vect_simple_var, vect_pointer_var, vect_scalar_var, vect_mask_var }; /* Defines type of operation. */ enum operation_type { unary_op = 1, binary_op, ternary_op }; /* Define type of available alignment support. */ enum dr_alignment_support { dr_unaligned_unsupported, dr_unaligned_supported, dr_explicit_realign, dr_explicit_realign_optimized, dr_aligned }; /* Define type of def-use cross-iteration cycle. */ enum vect_def_type { vect_uninitialized_def = 0, vect_constant_def = 1, vect_external_def, vect_internal_def, vect_induction_def, vect_reduction_def, vect_double_reduction_def, vect_nested_cycle, vect_unknown_def_type }; /* Define type of reduction. */ enum vect_reduction_type { TREE_CODE_REDUCTION, COND_REDUCTION, INTEGER_INDUC_COND_REDUCTION, CONST_COND_REDUCTION, /* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop to implement: for (int i = 0; i < VF; ++i) res = cond[i] ? val[i] : res; */ EXTRACT_LAST_REDUCTION, /* Use a folding reduction within the loop to implement: for (int i = 0; i < VF; ++i) res = res OP val[i]; (with no reassocation). */ FOLD_LEFT_REDUCTION }; #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \ || ((D) == vect_double_reduction_def) \ || ((D) == vect_nested_cycle)) /* Structure to encapsulate information about a group of like instructions to be presented to the target cost model. */ struct stmt_info_for_cost { int count; enum vect_cost_for_stmt kind; enum vect_cost_model_location where; stmt_vec_info stmt_info; int misalign; }; typedef vec<stmt_info_for_cost> stmt_vector_for_cost; /* Maps base addresses to an innermost_loop_behavior that gives the maximum known alignment for that base. */ typedef hash_map<tree_operand_hash, innermost_loop_behavior *> vec_base_alignments; /************************************************************************ SLP ************************************************************************/ typedef struct _slp_tree *slp_tree; /* A computation tree of an SLP instance. Each node corresponds to a group of stmts to be packed in a SIMD stmt. */ struct _slp_tree { /* Nodes that contain def-stmts of this node statements operands. */ vec<slp_tree> children; /* A group of scalar stmts to be vectorized together. */ vec<stmt_vec_info> stmts; /* Load permutation relative to the stores, NULL if there is no permutation. */ vec<unsigned> load_permutation; /* Vectorized stmt/s. */ vec<stmt_vec_info> vec_stmts; /* Number of vector stmts that are created to replace the group of scalar stmts. It is calculated during the transformation phase as the number of scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector size. */ unsigned int vec_stmts_size; /* Reference count in the SLP graph. */ unsigned int refcnt; /* Whether the scalar computations use two different operators. */ bool two_operators; /* The DEF type of this node. */ enum vect_def_type def_type; }; /* SLP instance is a sequence of stmts in a loop that can be packed into SIMD stmts. */ typedef struct _slp_instance { /* The root of SLP tree. */ slp_tree root; /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */ unsigned int group_size; /* The unrolling factor required to vectorized this SLP instance. */ poly_uint64 unrolling_factor; /* The group of nodes that contain loads of this SLP instance. */ vec<slp_tree> loads; /* The SLP node containing the reduction PHIs. */ slp_tree reduc_phis; } *slp_instance; /* Access Functions. */ #define SLP_INSTANCE_TREE(S) (S)->root #define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size #define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor #define SLP_INSTANCE_LOADS(S) (S)->loads #define SLP_TREE_CHILDREN(S) (S)->children #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts #define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts #define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation #define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators #define SLP_TREE_DEF_TYPE(S) (S)->def_type /* Describes two objects whose addresses must be unequal for the vectorized loop to be valid. */ typedef std::pair<tree, tree> vec_object_pair; /* Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE. UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */ struct vec_lower_bound { vec_lower_bound () {} vec_lower_bound (tree e, bool u, poly_uint64 m) : expr (e), unsigned_p (u), min_value (m) {} tree expr; bool unsigned_p; poly_uint64 min_value; }; /* Vectorizer state shared between different analyses like vector sizes of the same CFG region. */ struct vec_info_shared { vec_info_shared(); ~vec_info_shared(); void save_datarefs(); void check_datarefs(); /* All data references. Freed by free_data_refs, so not an auto_vec. */ vec<data_reference_p> datarefs; vec<data_reference> datarefs_copy; /* The loop nest in which the data dependences are computed. */ auto_vec<loop_p> loop_nest; /* All data dependences. Freed by free_dependence_relations, so not an auto_vec. */ vec<ddr_p> ddrs; }; /* Vectorizer state common between loop and basic-block vectorization. */ struct vec_info { enum vec_kind { bb, loop }; vec_info (vec_kind, void *, vec_info_shared *); ~vec_info (); stmt_vec_info add_stmt (gimple *); stmt_vec_info lookup_stmt (gimple *); stmt_vec_info lookup_def (tree); stmt_vec_info lookup_single_use (tree); struct dr_vec_info *lookup_dr (data_reference *); void move_dr (stmt_vec_info, stmt_vec_info); void remove_stmt (stmt_vec_info); void replace_stmt (gimple_stmt_iterator *, stmt_vec_info, gimple *); /* The type of vectorization. */ vec_kind kind; /* Shared vectorizer state. */ vec_info_shared *shared; /* The mapping of GIMPLE UID to stmt_vec_info. */ vec<stmt_vec_info> stmt_vec_infos; /* All SLP instances. */ auto_vec<slp_instance> slp_instances; /* Maps base addresses to an innermost_loop_behavior that gives the maximum known alignment for that base. */ vec_base_alignments base_alignments; /* All interleaving chains of stores, represented by the first stmt in the chain. */ auto_vec<stmt_vec_info> grouped_stores; /* Cost data used by the target cost model. */ void *target_cost_data; private: stmt_vec_info new_stmt_vec_info (gimple *stmt); void set_vinfo_for_stmt (gimple *, stmt_vec_info); void free_stmt_vec_infos (); void free_stmt_vec_info (stmt_vec_info); }; struct _loop_vec_info; struct _bb_vec_info; template<> template<> inline bool is_a_helper <_loop_vec_info *>::test (vec_info *i) { return i->kind == vec_info::loop; } template<> template<> inline bool is_a_helper <_bb_vec_info *>::test (vec_info *i) { return i->kind == vec_info::bb; } /* In general, we can divide the vector statements in a vectorized loop into related groups ("rgroups") and say that for each rgroup there is some nS such that the rgroup operates on nS values from one scalar iteration followed by nS values from the next. That is, if VF is the vectorization factor of the loop, the rgroup operates on a sequence: (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS) where (i,j) represents a scalar value with index j in a scalar iteration with index i. [ We use the term "rgroup" to emphasise that this grouping isn't necessarily the same as the grouping of statements used elsewhere. For example, if we implement a group of scalar loads using gather loads, we'll use a separate gather load for each scalar load, and thus each gather load will belong to its own rgroup. ] In general this sequence will occupy nV vectors concatenated together. If these vectors have nL lanes each, the total number of scalar values N is given by: N = nS * VF = nV * nL None of nS, VF, nV and nL are required to be a power of 2. nS and nV are compile-time constants but VF and nL can be variable (if the target supports variable-length vectors). In classical vectorization, each iteration of the vector loop would handle exactly VF iterations of the original scalar loop. However, in a fully-masked loop, a particular iteration of the vector loop might handle fewer than VF iterations of the scalar loop. The vector lanes that correspond to iterations of the scalar loop are said to be "active" and the other lanes are said to be "inactive". In a fully-masked loop, many rgroups need to be masked to ensure that they have no effect for the inactive lanes. Each such rgroup needs a sequence of booleans in the same order as above, but with each (i,j) replaced by a boolean that indicates whether iteration i is active. This sequence occupies nV vector masks that again have nL lanes each. Thus the mask sequence as a whole consists of VF independent booleans that are each repeated nS times. We make the simplifying assumption that if a sequence of nV masks is suitable for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by VIEW_CONVERTing it. This holds for all current targets that support fully-masked loops. For example, suppose the scalar loop is: float *f; double *d; for (int i = 0; i < n; ++i) { f[i * 2 + 0] += 1.0f; f[i * 2 + 1] += 2.0f; d[i] += 3.0; } and suppose that vectors have 256 bits. The vectorized f accesses will belong to one rgroup and the vectorized d access to another: f rgroup: nS = 2, nV = 1, nL = 8 d rgroup: nS = 1, nV = 1, nL = 4 VF = 4 [ In this simple example the rgroups do correspond to the normal SLP grouping scheme. ] If only the first three lanes are active, the masks we need are: f rgroup: 1 1 | 1 1 | 1 1 | 0 0 d rgroup: 1 | 1 | 1 | 0 Here we can use a mask calculated for f's rgroup for d's, but not vice versa. Thus for each value of nV, it is enough to provide nV masks, with the mask being calculated based on the highest nL (or, equivalently, based on the highest nS) required by any rgroup with that nV. We therefore represent the entire collection of masks as a two-level table, with the first level being indexed by nV - 1 (since nV == 0 doesn't exist) and the second being indexed by the mask index 0 <= i < nV. */ /* The masks needed by rgroups with nV vectors, according to the description above. */ struct rgroup_masks { /* The largest nS for all rgroups that use these masks. */ unsigned int max_nscalars_per_iter; /* The type of mask to use, based on the highest nS recorded above. */ tree mask_type; /* A vector of nV masks, in iteration order. */ vec<tree> masks; }; typedef auto_vec<rgroup_masks> vec_loop_masks; /*-----------------------------------------------------------------*/ /* Info on vectorized loops. */ /*-----------------------------------------------------------------*/ typedef struct _loop_vec_info : public vec_info { _loop_vec_info (struct loop *, vec_info_shared *); ~_loop_vec_info (); /* The loop to which this info struct refers to. */ struct loop *loop; /* The loop basic blocks. */ basic_block *bbs; /* Number of latch executions. */ tree num_itersm1; /* Number of iterations. */ tree num_iters; /* Number of iterations of the original loop. */ tree num_iters_unchanged; /* Condition under which this loop is analyzed and versioned. */ tree num_iters_assumptions; /* Threshold of number of iterations below which vectorzation will not be performed. It is calculated from MIN_PROFITABLE_ITERS and PARAM_MIN_VECT_LOOP_BOUND. */ unsigned int th; /* When applying loop versioning, the vector form should only be used if the number of scalar iterations is >= this value, on top of all the other requirements. Ignored when loop versioning is not being used. */ poly_uint64 versioning_threshold; /* Unrolling factor */ poly_uint64 vectorization_factor; /* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR if there is no particular limit. */ unsigned HOST_WIDE_INT max_vectorization_factor; /* The masks that a fully-masked loop should use to avoid operating on inactive scalars. */ vec_loop_masks masks; /* If we are using a loop mask to align memory addresses, this variable contains the number of vector elements that we should skip in the first iteration of the vector loop (i.e. the number of leading elements that should be false in the first mask). */ tree mask_skip_niters; /* Type of the variables to use in the WHILE_ULT call for fully-masked loops. */ tree mask_compare_type; /* For #pragma omp simd if (x) loops the x expression. If constant 0, the loop should not be vectorized, if constant non-zero, simd_if_cond shouldn't be set and loop vectorized normally, if SSA_NAME, the loop should be versioned on that condition, using scalar loop if the condition is false and vectorized loop otherwise. */ tree simd_if_cond; /* Unknown DRs according to which loop was peeled. */ struct dr_vec_info *unaligned_dr; /* peeling_for_alignment indicates whether peeling for alignment will take place, and what the peeling factor should be: peeling_for_alignment = X means: If X=0: Peeling for alignment will not be applied. If X>0: Peel first X iterations. If X=-1: Generate a runtime test to calculate the number of iterations to be peeled, using the dataref recorded in the field unaligned_dr. */ int peeling_for_alignment; /* The mask used to check the alignment of pointers or arrays. */ int ptr_mask; /* Data Dependence Relations defining address ranges that are candidates for a run-time aliasing check. */ auto_vec<ddr_p> may_alias_ddrs; /* Data Dependence Relations defining address ranges together with segment lengths from which the run-time aliasing check is built. */ auto_vec<dr_with_seg_len_pair_t> comp_alias_ddrs; /* Check that the addresses of each pair of objects is unequal. */ auto_vec<vec_object_pair> check_unequal_addrs; /* List of values that are required to be nonzero. This is used to check whether things like "x[i * n] += 1;" are safe and eventually gets added to the checks for lower bounds below. */ auto_vec<tree> check_nonzero; /* List of values that need to be checked for a minimum value. */ auto_vec<vec_lower_bound> lower_bounds; /* Statements in the loop that have data references that are candidates for a runtime (loop versioning) misalignment check. */ auto_vec<stmt_vec_info> may_misalign_stmts; /* Reduction cycles detected in the loop. Used in loop-aware SLP. */ auto_vec<stmt_vec_info> reductions; /* All reduction chains in the loop, represented by the first stmt in the chain. */ auto_vec<stmt_vec_info> reduction_chains; /* Cost vector for a single scalar iteration. */ auto_vec<stmt_info_for_cost> scalar_cost_vec; /* Map of IV base/step expressions to inserted name in the preheader. */ hash_map<tree_operand_hash, tree> *ivexpr_map; /* The unrolling factor needed to SLP the loop. In case of that pure SLP is applied to the loop, i.e., no unrolling is needed, this is 1. */ poly_uint64 slp_unrolling_factor; /* Cost of a single scalar iteration. */ int single_scalar_iteration_cost; /* Is the loop vectorizable? */ bool vectorizable; /* Records whether we still have the option of using a fully-masked loop. */ bool can_fully_mask_p; /* True if have decided to use a fully-masked loop. */ bool fully_masked_p; /* When we have grouped data accesses with gaps, we may introduce invalid memory accesses. We peel the last iteration of the loop to prevent this. */ bool peeling_for_gaps; /* When the number of iterations is not a multiple of the vector size we need to peel off iterations at the end to form an epilogue loop. */ bool peeling_for_niter; /* Reductions are canonicalized so that the last operand is the reduction operand. If this places a constant into RHS1, this decanonicalizes GIMPLE for other phases, so we must track when this has occurred and fix it up. */ bool operands_swapped; /* True if there are no loop carried data dependencies in the loop. If loop->safelen <= 1, then this is always true, either the loop didn't have any loop carried data dependencies, or the loop is being vectorized guarded with some runtime alias checks, or couldn't be vectorized at all, but then this field shouldn't be used. For loop->safelen >= 2, the user has asserted that there are no backward dependencies, but there still could be loop carried forward dependencies in such loops. This flag will be false if normal vectorizer data dependency analysis would fail or require versioning for alias, but because of loop->safelen >= 2 it has been vectorized even without versioning for alias. E.g. in: #pragma omp simd for (int i = 0; i < m; i++) a[i] = a[i + k] * c; (or #pragma simd or #pragma ivdep) we can vectorize this and it will DTRT even for k > 0 && k < m, but without safelen we would not vectorize this, so this field would be false. */ bool no_data_dependencies; /* Mark loops having masked stores. */ bool has_mask_store; /* If if-conversion versioned this loop before conversion, this is the loop version without if-conversion. */ struct loop *scalar_loop; /* For loops being epilogues of already vectorized loops this points to the original vectorized loop. Otherwise NULL. */ _loop_vec_info *orig_loop_info; } *loop_vec_info; /* Access Functions. */ #define LOOP_VINFO_LOOP(L) (L)->loop #define LOOP_VINFO_BBS(L) (L)->bbs #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1 #define LOOP_VINFO_NITERS(L) (L)->num_iters /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after prologue peeling retain total unchanged scalar loop iterations for cost model. */ #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable #define LOOP_VINFO_CAN_FULLY_MASK_P(L) (L)->can_fully_mask_p #define LOOP_VINFO_FULLY_MASKED_P(L) (L)->fully_masked_p #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor #define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor #define LOOP_VINFO_MASKS(L) (L)->masks #define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters #define LOOP_VINFO_MASK_COMPARE_TYPE(L) (L)->mask_compare_type #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask #define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest #define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs #define LOOP_VINFO_DDRS(L) (L)->shared->ddrs #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters)) #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs #define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero #define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions #define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains #define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps #define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info #define LOOP_VINFO_SIMD_IF_COND(L) (L)->simd_if_cond #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \ ((L)->may_misalign_stmts.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \ ((L)->comp_alias_ddrs.length () > 0 \ || (L)->check_unequal_addrs.length () > 0 \ || (L)->lower_bounds.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \ (LOOP_VINFO_NITERS_ASSUMPTIONS (L)) #define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L) \ (LOOP_VINFO_SIMD_IF_COND (L)) #define LOOP_REQUIRES_VERSIONING(L) \ (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \ || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (L)) #define LOOP_VINFO_NITERS_KNOWN_P(L) \ (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0) #define LOOP_VINFO_EPILOGUE_P(L) \ (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL) #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \ (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L))) /* Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL value signifies success, and a NULL value signifies failure, supporting propagating an opt_problem * describing the failure back up the call stack. */ typedef opt_pointer_wrapper <loop_vec_info> opt_loop_vec_info; static inline loop_vec_info loop_vec_info_for_loop (struct loop *loop) { return (loop_vec_info) loop->aux; } typedef struct _bb_vec_info : public vec_info { _bb_vec_info (gimple_stmt_iterator, gimple_stmt_iterator, vec_info_shared *); ~_bb_vec_info (); basic_block bb; gimple_stmt_iterator region_begin; gimple_stmt_iterator region_end; } *bb_vec_info; #define BB_VINFO_BB(B) (B)->bb #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances #define BB_VINFO_DATAREFS(B) (B)->shared->datarefs #define BB_VINFO_DDRS(B) (B)->shared->ddrs #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data static inline bb_vec_info vec_info_for_bb (basic_block bb) { return (bb_vec_info) bb->aux; } /*-----------------------------------------------------------------*/ /* Info on vectorized defs. */ /*-----------------------------------------------------------------*/ enum stmt_vec_info_type { undef_vec_info_type = 0, load_vec_info_type, store_vec_info_type, shift_vec_info_type, op_vec_info_type, call_vec_info_type, call_simd_clone_vec_info_type, assignment_vec_info_type, condition_vec_info_type, comparison_vec_info_type, reduc_vec_info_type, induc_vec_info_type, type_promotion_vec_info_type, type_demotion_vec_info_type, type_conversion_vec_info_type, loop_exit_ctrl_vec_info_type }; /* Indicates whether/how a variable is used in the scope of loop/basic block. */ enum vect_relevant { vect_unused_in_scope = 0, /* The def is only used outside the loop. */ vect_used_only_live, /* The def is in the inner loop, and the use is in the outer loop, and the use is a reduction stmt. */ vect_used_in_outer_by_reduction, /* The def is in the inner loop, and the use is in the outer loop (and is not part of reduction). */ vect_used_in_outer, /* defs that feed computations that end up (only) in a reduction. These defs may be used by non-reduction stmts, but eventually, any computations/values that are affected by these defs are used to compute a reduction (i.e. don't get stored to memory, for example). We use this to identify computations that we can change the order in which they are computed. */ vect_used_by_reduction, vect_used_in_scope }; /* The type of vectorization that can be applied to the stmt: regular loop-based vectorization; pure SLP - the stmt is a part of SLP instances and does not have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is a part of SLP instance and also must be loop-based vectorized, since it has uses outside SLP sequences. In the loop context the meanings of pure and hybrid SLP are slightly different. By saying that pure SLP is applied to the loop, we mean that we exploit only intra-iteration parallelism in the loop; i.e., the loop can be vectorized without doing any conceptual unrolling, cause we don't pack together stmts from different iterations, only within a single iteration. Loop hybrid SLP means that we exploit both intra-iteration and inter-iteration parallelism (e.g., number of elements in the vector is 4 and the slp-group-size is 2, in which case we don't have enough parallelism within an iteration, so we obtain the rest of the parallelism from subsequent iterations by unrolling the loop by 2). */ enum slp_vect_type { loop_vect = 0, pure_slp, hybrid }; /* Says whether a statement is a load, a store of a vectorized statement result, or a store of an invariant value. */ enum vec_load_store_type { VLS_LOAD, VLS_STORE, VLS_STORE_INVARIANT }; /* Describes how we're going to vectorize an individual load or store, or a group of loads or stores. */ enum vect_memory_access_type { /* An access to an invariant address. This is used only for loads. */ VMAT_INVARIANT, /* A simple contiguous access. */ VMAT_CONTIGUOUS, /* A contiguous access that goes down in memory rather than up, with no additional permutation. This is used only for stores of invariants. */ VMAT_CONTIGUOUS_DOWN, /* A simple contiguous access in which the elements need to be permuted after loading or before storing. Only used for loop vectorization; SLP uses separate permutes. */ VMAT_CONTIGUOUS_PERMUTE, /* A simple contiguous access in which the elements need to be reversed after loading or before storing. */ VMAT_CONTIGUOUS_REVERSE, /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */ VMAT_LOAD_STORE_LANES, /* An access in which each scalar element is loaded or stored individually. */ VMAT_ELEMENTWISE, /* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped SLP accesses. Each unrolled iteration uses a contiguous load or store for the whole group, but the groups from separate iterations are combined in the same way as for VMAT_ELEMENTWISE. */ VMAT_STRIDED_SLP, /* The access uses gather loads or scatter stores. */ VMAT_GATHER_SCATTER }; struct dr_vec_info { /* The data reference itself. */ data_reference *dr; /* The statement that contains the data reference. */ stmt_vec_info stmt; /* The misalignment in bytes of the reference, or -1 if not known. */ int misalignment; /* The byte alignment that we'd ideally like the reference to have, and the value that misalignment is measured against. */ poly_uint64 target_alignment; /* If true the alignment of base_decl needs to be increased. */ bool base_misaligned; tree base_decl; }; typedef struct data_reference *dr_p; struct _stmt_vec_info { enum stmt_vec_info_type type; /* Indicates whether this stmts is part of a computation whose result is used outside the loop. */ bool live; /* Stmt is part of some pattern (computation idiom) */ bool in_pattern_p; /* True if the statement was created during pattern recognition as part of the replacement for RELATED_STMT. This implies that the statement isn't part of any basic block, although for convenience its gimple_bb is the same as for RELATED_STMT. */ bool pattern_stmt_p; /* Is this statement vectorizable or should it be skipped in (partial) vectorization. */ bool vectorizable; /* The stmt to which this info struct refers to. */ gimple *stmt; /* The vec_info with respect to which STMT is vectorized. */ vec_info *vinfo; /* The vector type to be used for the LHS of this statement. */ tree vectype; /* The vectorized version of the stmt. */ stmt_vec_info vectorized_stmt; /* The following is relevant only for stmts that contain a non-scalar data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have at most one such data-ref. */ dr_vec_info dr_aux; /* Information about the data-ref relative to this loop nest (the loop that is being considered for vectorization). */ innermost_loop_behavior dr_wrt_vec_loop; /* For loop PHI nodes, the base and evolution part of it. This makes sure this information is still available in vect_update_ivs_after_vectorizer where we may not be able to re-analyze the PHI nodes evolution as peeling for the prologue loop can make it unanalyzable. The evolution part is still correct after peeling, but the base may have changed from the version here. */ tree loop_phi_evolution_base_unchanged; tree loop_phi_evolution_part; /* Used for various bookkeeping purposes, generally holding a pointer to some other stmt S that is in some way "related" to this stmt. Current use of this field is: If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is true): S is the "pattern stmt" that represents (and replaces) the sequence of stmts that constitutes the pattern. Similarly, the related_stmt of the "pattern stmt" points back to this stmt (which is the last stmt in the original sequence of stmts that constitutes the pattern). */ stmt_vec_info related_stmt; /* Used to keep a sequence of def stmts of a pattern stmt if such exists. The sequence is attached to the original statement rather than the pattern statement. */ gimple_seq pattern_def_seq; /* List of datarefs that are known to have the same alignment as the dataref of this stmt. */ vec<dr_p> same_align_refs; /* Selected SIMD clone's function info. First vector element is SIMD clone's function decl, followed by a pair of trees (base + step) for linear arguments (pair of NULLs for other arguments). */ vec<tree> simd_clone_info; /* Classify the def of this stmt. */ enum vect_def_type def_type; /* Whether the stmt is SLPed, loop-based vectorized, or both. */ enum slp_vect_type slp_type; /* Interleaving and reduction chains info. */ /* First element in the group. */ stmt_vec_info first_element; /* Pointer to the next element in the group. */ stmt_vec_info next_element; /* The size of the group. */ unsigned int size; /* For stores, number of stores from this group seen. We vectorize the last one. */ unsigned int store_count; /* For loads only, the gap from the previous load. For consecutive loads, GAP is 1. */ unsigned int gap; /* The minimum negative dependence distance this stmt participates in or zero if none. */ unsigned int min_neg_dist; /* Not all stmts in the loop need to be vectorized. e.g, the increment of the loop induction variable and computation of array indexes. relevant indicates whether the stmt needs to be vectorized. */ enum vect_relevant relevant; /* For loads if this is a gather, for stores if this is a scatter. */ bool gather_scatter_p; /* True if this is an access with loop-invariant stride. */ bool strided_p; /* For both loads and stores. */ bool simd_lane_access_p; /* Classifies how the load or store is going to be implemented for loop vectorization. */ vect_memory_access_type memory_access_type; /* For reduction loops, this is the type of reduction. */ enum vect_reduction_type v_reduc_type; /* For CONST_COND_REDUCTION, record the reduc code. */ enum tree_code const_cond_reduc_code; /* On a reduction PHI the reduction type as detected by vect_force_simple_reduction. */ enum vect_reduction_type reduc_type; /* On a reduction PHI the def returned by vect_force_simple_reduction. On the def returned by vect_force_simple_reduction the corresponding PHI. */ stmt_vec_info reduc_def; /* The number of scalar stmt references from active SLP instances. */ unsigned int num_slp_uses; /* If nonzero, the lhs of the statement could be truncated to this many bits without affecting any users of the result. */ unsigned int min_output_precision; /* If nonzero, all non-boolean input operands have the same precision, and they could each be truncated to this many bits without changing the result. */ unsigned int min_input_precision; /* If OPERATION_BITS is nonzero, the statement could be performed on an integer with the sign and number of bits given by OPERATION_SIGN and OPERATION_BITS without changing the result. */ unsigned int operation_precision; signop operation_sign; }; /* Information about a gather/scatter call. */ struct gather_scatter_info { /* The internal function to use for the gather/scatter operation, or IFN_LAST if a built-in function should be used instead. */ internal_fn ifn; /* The FUNCTION_DECL for the built-in gather/scatter function, or null if an internal function should be used instead. */ tree decl; /* The loop-invariant base value. */ tree base; /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */ tree offset; /* Each offset element should be multiplied by this amount before being added to the base. */ int scale; /* The definition type for the vectorized offset. */ enum vect_def_type offset_dt; /* The type of the vectorized offset. */ tree offset_vectype; /* The type of the scalar elements after loading or before storing. */ tree element_type; /* The type of the scalar elements being loaded or stored. */ tree memory_type; }; /* Access Functions. */ #define STMT_VINFO_TYPE(S) (S)->type #define STMT_VINFO_STMT(S) (S)->stmt inline loop_vec_info STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo) { if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo)) return loop_vinfo; return NULL; } inline bb_vec_info STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo) { if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo)) return bb_vinfo; return NULL; } #define STMT_VINFO_RELEVANT(S) (S)->relevant #define STMT_VINFO_LIVE_P(S) (S)->live #define STMT_VINFO_VECTYPE(S) (S)->vectype #define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable #define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0) #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p #define STMT_VINFO_STRIDED_P(S) (S)->strided_p #define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p #define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \ (S)->dr_wrt_vec_loop.base_misalignment #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.offset_alignment #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.step_alignment #define STMT_VINFO_DR_INFO(S) \ (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux) #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info #define STMT_VINFO_DEF_TYPE(S) (S)->def_type #define STMT_VINFO_GROUPED_ACCESS(S) \ ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S)) #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist #define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def #define DR_GROUP_FIRST_ELEMENT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element) #define DR_GROUP_NEXT_ELEMENT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element) #define DR_GROUP_SIZE(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->size) #define DR_GROUP_STORE_COUNT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count) #define DR_GROUP_GAP(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap) #define REDUC_GROUP_FIRST_ELEMENT(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element) #define REDUC_GROUP_NEXT_ELEMENT(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element) #define REDUC_GROUP_SIZE(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size) #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope) #define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid) #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp) #define STMT_SLP_TYPE(S) (S)->slp_type #define VECT_MAX_COST 1000 /* The maximum number of intermediate steps required in multi-step type conversion. */ #define MAX_INTERM_CVT_STEPS 3 #define MAX_VECTORIZATION_FACTOR INT_MAX /* Nonzero if TYPE represents a (scalar) boolean type or type in the middle-end compatible with it (unsigned precision 1 integral types). Used to determine which types should be vectorized as VECTOR_BOOLEAN_TYPE_P. */ #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || ((TREE_CODE (TYPE) == INTEGER_TYPE \ || TREE_CODE (TYPE) == ENUMERAL_TYPE) \ && TYPE_PRECISION (TYPE) == 1 \ && TYPE_UNSIGNED (TYPE))) static inline bool nested_in_vect_loop_p (struct loop *loop, stmt_vec_info stmt_info) { return (loop->inner && (loop->inner == (gimple_bb (stmt_info->stmt))->loop_father)); } /* Return TRUE if a statement represented by STMT_INFO is a part of a pattern. */ static inline bool is_pattern_stmt_p (stmt_vec_info stmt_info) { return stmt_info->pattern_stmt_p; } /* If STMT_INFO is a pattern statement, return the statement that it replaces, otherwise return STMT_INFO itself. */ inline stmt_vec_info vect_orig_stmt (stmt_vec_info stmt_info) { if (is_pattern_stmt_p (stmt_info)) return STMT_VINFO_RELATED_STMT (stmt_info); return stmt_info; } /* Return the later statement between STMT1_INFO and STMT2_INFO. */ static inline stmt_vec_info get_later_stmt (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info) { if (gimple_uid (vect_orig_stmt (stmt1_info)->stmt) > gimple_uid (vect_orig_stmt (stmt2_info)->stmt)) return stmt1_info; else return stmt2_info; } /* If STMT_INFO has been replaced by a pattern statement, return the replacement statement, otherwise return STMT_INFO itself. */ inline stmt_vec_info vect_stmt_to_vectorize (stmt_vec_info stmt_info) { if (STMT_VINFO_IN_PATTERN_P (stmt_info)) return STMT_VINFO_RELATED_STMT (stmt_info); return stmt_info; } /* Return true if BB is a loop header. */ static inline bool is_loop_header_bb_p (basic_block bb) { if (bb == (bb->loop_father)->header) return true; gcc_checking_assert (EDGE_COUNT (bb->preds) == 1); return false; } /* Return pow2 (X). */ static inline int vect_pow2 (int x) { int i, res = 1; for (i = 0; i < x; i++) res *= 2; return res; } /* Alias targetm.vectorize.builtin_vectorization_cost. */ static inline int builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, tree vectype, int misalign) { return targetm.vectorize.builtin_vectorization_cost (type_of_cost, vectype, misalign); } /* Get cost by calling cost target builtin. */ static inline int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost) { return builtin_vectorization_cost (type_of_cost, NULL, 0); } /* Alias targetm.vectorize.init_cost. */ static inline void * init_cost (struct loop *loop_info) { return targetm.vectorize.init_cost (loop_info); } extern void dump_stmt_cost (FILE *, void *, int, enum vect_cost_for_stmt, stmt_vec_info, int, unsigned, enum vect_cost_model_location); /* Alias targetm.vectorize.add_stmt_cost. */ static inline unsigned add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, int misalign, enum vect_cost_model_location where) { unsigned cost = targetm.vectorize.add_stmt_cost (data, count, kind, stmt_info, misalign, where); if (dump_file && (dump_flags & TDF_DETAILS)) dump_stmt_cost (dump_file, data, count, kind, stmt_info, misalign, cost, where); return cost; } /* Alias targetm.vectorize.finish_cost. */ static inline void finish_cost (void *data, unsigned *prologue_cost, unsigned *body_cost, unsigned *epilogue_cost) { targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost); } /* Alias targetm.vectorize.destroy_cost_data. */ static inline void destroy_cost_data (void *data) { targetm.vectorize.destroy_cost_data (data); } inline void add_stmt_costs (void *data, stmt_vector_for_cost *cost_vec) { stmt_info_for_cost *cost; unsigned i; FOR_EACH_VEC_ELT (*cost_vec, i, cost) add_stmt_cost (data, cost->count, cost->kind, cost->stmt_info, cost->misalign, cost->where); } /*-----------------------------------------------------------------*/ /* Info on data references alignment. */ /*-----------------------------------------------------------------*/ #define DR_MISALIGNMENT_UNKNOWN (-1) #define DR_MISALIGNMENT_UNINITIALIZED (-2) inline void set_dr_misalignment (dr_vec_info *dr_info, int val) { dr_info->misalignment = val; } inline int dr_misalignment (dr_vec_info *dr_info) { int misalign = dr_info->misalignment; gcc_assert (misalign != DR_MISALIGNMENT_UNINITIALIZED); return misalign; } /* Reflects actual alignment of first access in the vectorized loop, taking into account peeling/versioning if applied. */ #define DR_MISALIGNMENT(DR) dr_misalignment (DR) #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL) /* Only defined once DR_MISALIGNMENT is defined. */ #define DR_TARGET_ALIGNMENT(DR) ((DR)->target_alignment) /* Return true if data access DR_INFO is aligned to its target alignment (which may be less than a full vector). */ static inline bool aligned_access_p (dr_vec_info *dr_info) { return (DR_MISALIGNMENT (dr_info) == 0); } /* Return TRUE if the alignment of the data access is known, and FALSE otherwise. */ static inline bool known_alignment_for_access_p (dr_vec_info *dr_info) { return (DR_MISALIGNMENT (dr_info) != DR_MISALIGNMENT_UNKNOWN); } /* Return the minimum alignment in bytes that the vectorized version of DR_INFO is guaranteed to have. */ static inline unsigned int vect_known_alignment_in_bytes (dr_vec_info *dr_info) { if (DR_MISALIGNMENT (dr_info) == DR_MISALIGNMENT_UNKNOWN) return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info->dr))); if (DR_MISALIGNMENT (dr_info) == 0) return known_alignment (DR_TARGET_ALIGNMENT (dr_info)); return DR_MISALIGNMENT (dr_info) & -DR_MISALIGNMENT (dr_info); } /* Return the behavior of DR_INFO with respect to the vectorization context (which for outer loop vectorization might not be the behavior recorded in DR_INFO itself). */ static inline innermost_loop_behavior * vect_dr_behavior (dr_vec_info *dr_info) { stmt_vec_info stmt_info = dr_info->stmt; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); if (loop_vinfo == NULL || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt_info)) return &DR_INNERMOST (dr_info->dr); else return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info); } /* Return true if the vect cost model is unlimited. */ static inline bool unlimited_cost_model (loop_p loop) { if (loop != NULL && loop->force_vectorize && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT) return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED; return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED); } /* Return true if the loop described by LOOP_VINFO is fully-masked and if the first iteration should use a partial mask in order to achieve alignment. */ static inline bool vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo) { return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)); } /* Return the number of vectors of type VECTYPE that are needed to get NUNITS elements. NUNITS should be based on the vectorization factor, so it is always a known multiple of the number of elements in VECTYPE. */ static inline unsigned int vect_get_num_vectors (poly_uint64 nunits, tree vectype) { return exact_div (nunits, TYPE_VECTOR_SUBPARTS (vectype)).to_constant (); } /* Return the number of copies needed for loop vectorization when a statement operates on vectors of type VECTYPE. This is the vectorization factor divided by the number of elements in VECTYPE and is always known at compile time. */ static inline unsigned int vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype) { return vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo), vectype); } /* Update maximum unit count *MAX_NUNITS so that it accounts for the number of units in vector type VECTYPE. *MAX_NUNITS can be 1 if we haven't yet recorded any vector types. */ static inline void vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype) { /* All unit counts have the form current_vector_size * X for some rational X, so two unit sizes must have a common multiple. Everything is a multiple of the initial value of 1. */ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); *max_nunits = force_common_multiple (*max_nunits, nunits); } /* Return the vectorization factor that should be used for costing purposes while vectorizing the loop described by LOOP_VINFO. Pick a reasonable estimate if the vectorization factor isn't known at compile time. */ static inline unsigned int vect_vf_for_cost (loop_vec_info loop_vinfo) { return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo)); } /* Estimate the number of elements in VEC_TYPE for costing purposes. Pick a reasonable estimate if the exact number isn't known at compile time. */ static inline unsigned int vect_nunits_for_cost (tree vec_type) { return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type)); } /* Return the maximum possible vectorization factor for LOOP_VINFO. */ static inline unsigned HOST_WIDE_INT vect_max_vf (loop_vec_info loop_vinfo) { unsigned HOST_WIDE_INT vf; if (LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf)) return vf; return MAX_VECTORIZATION_FACTOR; } /* Return the size of the value accessed by unvectorized data reference DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated for the associated gimple statement, since that guarantees that DR_INFO accesses either a scalar or a scalar equivalent. ("Scalar equivalent" here includes things like V1SI, which can be vectorized in the same way as a plain SI.) */ inline unsigned int vect_get_scalar_dr_size (dr_vec_info *dr_info) { return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_info->dr)))); } /* Source location + hotness information. */ extern dump_user_location_t vect_location; /* A macro for calling: dump_begin_scope (MSG, vect_location); via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc, and then calling dump_end_scope (); once the object goes out of scope, thus capturing the nesting of the scopes. These scopes affect dump messages within them: dump messages at the top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those in a nested scope implicitly default to MSG_PRIORITY_INTERNALS. */ #define DUMP_VECT_SCOPE(MSG) \ AUTO_DUMP_SCOPE (MSG, vect_location) /* A sentinel class for ensuring that the "vect_location" global gets reset at the end of a scope. The "vect_location" global is used during dumping and contains a location_t, which could contain references to a tree block via the ad-hoc data. This data is used for tracking inlining information, but it's not a GC root; it's simply assumed that such locations never get accessed if the blocks are optimized away. Hence we need to ensure that such locations are purged at the end of any operations using them (e.g. via this class). */ class auto_purge_vect_location { public: ~auto_purge_vect_location (); }; /*-----------------------------------------------------------------*/ /* Function prototypes. */ /*-----------------------------------------------------------------*/ /* Simple loop peeling and versioning utilities for vectorizer's purposes - in tree-vect-loop-manip.c. */ extern void vect_set_loop_condition (struct loop *, loop_vec_info, tree, tree, tree, bool); extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge); struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *, struct loop *, edge); struct loop *vect_loop_versioning (loop_vec_info, unsigned int, bool, poly_uint64); extern struct loop *vect_do_peeling (loop_vec_info, tree, tree, tree *, tree *, tree *, int, bool, bool); extern void vect_prepare_for_masked_peels (loop_vec_info); extern dump_user_location_t find_loop_location (struct loop *); extern bool vect_can_advance_ivs_p (loop_vec_info); /* In tree-vect-stmts.c. */ extern poly_uint64 current_vector_size; extern tree get_vectype_for_scalar_type (tree); extern tree get_vectype_for_scalar_type_and_size (tree, poly_uint64); extern tree get_mask_type_for_scalar_type (tree); extern tree get_same_sized_vectype (tree, tree); extern bool vect_get_loop_mask_type (loop_vec_info); extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, stmt_vec_info * = NULL, gimple ** = NULL); extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, tree *, stmt_vec_info * = NULL, gimple ** = NULL); extern bool supportable_widening_operation (enum tree_code, stmt_vec_info, tree, tree, enum tree_code *, enum tree_code *, int *, vec<tree> *); extern bool supportable_narrowing_operation (enum tree_code, tree, tree, enum tree_code *, int *, vec<tree> *); extern unsigned record_stmt_cost (stmt_vector_for_cost *, int, enum vect_cost_for_stmt, stmt_vec_info, int, enum vect_cost_model_location); extern stmt_vec_info vect_finish_replace_stmt (stmt_vec_info, gimple *); extern stmt_vec_info vect_finish_stmt_generation (stmt_vec_info, gimple *, gimple_stmt_iterator *); extern opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info); extern tree vect_get_store_rhs (stmt_vec_info); extern tree vect_get_vec_def_for_operand_1 (stmt_vec_info, enum vect_def_type); extern tree vect_get_vec_def_for_operand (tree, stmt_vec_info, tree = NULL); extern void vect_get_vec_defs (tree, tree, stmt_vec_info, vec<tree> *, vec<tree> *, slp_tree); extern void vect_get_vec_defs_for_stmt_copy (vec_info *, vec<tree> *, vec<tree> *); extern tree vect_init_vector (stmt_vec_info, tree, tree, gimple_stmt_iterator *); extern tree vect_get_vec_def_for_stmt_copy (vec_info *, tree); extern bool vect_transform_stmt (stmt_vec_info, gimple_stmt_iterator *, slp_tree, slp_instance); extern void vect_remove_stores (stmt_vec_info); extern opt_result vect_analyze_stmt (stmt_vec_info, bool *, slp_tree, slp_instance, stmt_vector_for_cost *); extern bool vectorizable_condition (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, bool, slp_tree, stmt_vector_for_cost *); extern bool vectorizable_shift (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); extern void vect_get_load_cost (stmt_vec_info, int, bool, unsigned int *, unsigned int *, stmt_vector_for_cost *, stmt_vector_for_cost *, bool); extern void vect_get_store_cost (stmt_vec_info, int, unsigned int *, stmt_vector_for_cost *); extern bool vect_supportable_shift (enum tree_code, tree); extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &); extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &); extern void optimize_mask_stores (struct loop*); extern gcall *vect_gen_while (tree, tree, tree); extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree); extern opt_result vect_get_vector_types_for_stmt (stmt_vec_info, tree *, tree *); extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info); /* In tree-vect-data-refs.c. */ extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64); extern enum dr_alignment_support vect_supportable_dr_alignment (dr_vec_info *, bool); extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *, HOST_WIDE_INT *); extern opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *); extern bool vect_slp_analyze_instance_dependence (slp_instance); extern opt_result vect_enhance_data_refs_alignment (loop_vec_info); extern opt_result vect_analyze_data_refs_alignment (loop_vec_info); extern opt_result vect_verify_datarefs_alignment (loop_vec_info); extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance); extern opt_result vect_analyze_data_ref_accesses (vec_info *); extern opt_result vect_prune_runtime_alias_test_list (loop_vec_info); extern bool vect_gather_scatter_fn_p (bool, bool, tree, tree, unsigned int, signop, int, internal_fn *, tree *); extern bool vect_check_gather_scatter (stmt_vec_info, loop_vec_info, gather_scatter_info *); extern opt_result vect_find_stmt_data_reference (loop_p, gimple *, vec<data_reference_p> *); extern opt_result vect_analyze_data_refs (vec_info *, poly_uint64 *); extern void vect_record_base_alignments (vec_info *); extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, struct loop *, tree, tree *, gimple_stmt_iterator *, gimple **, bool, tree = NULL_TREE, tree = NULL_TREE); extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, stmt_vec_info, tree); extern void vect_copy_ref_info (tree, tree); extern tree vect_create_destination_var (tree, tree); extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT); extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool); extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT); extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool); extern void vect_permute_store_chain (vec<tree> ,unsigned int, stmt_vec_info, gimple_stmt_iterator *, vec<tree> *); extern tree vect_setup_realignment (stmt_vec_info, gimple_stmt_iterator *, tree *, enum dr_alignment_support, tree, struct loop **); extern void vect_transform_grouped_load (stmt_vec_info, vec<tree> , int, gimple_stmt_iterator *); extern void vect_record_grouped_load_vectors (stmt_vec_info, vec<tree>); extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *); extern tree vect_get_new_ssa_name (tree, enum vect_var_kind, const char * = NULL); extern tree vect_create_addr_base_for_vector_ref (stmt_vec_info, gimple_seq *, tree, tree = NULL_TREE); /* In tree-vect-loop.c. */ /* FORNOW: Used in tree-parloops.c. */ extern stmt_vec_info vect_force_simple_reduction (loop_vec_info, stmt_vec_info, bool *, bool); /* Used in gimple-loop-interchange.c. */ extern bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree, enum tree_code); /* Drive for loop analysis stage. */ extern opt_loop_vec_info vect_analyze_loop (struct loop *, loop_vec_info, vec_info_shared *); extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL); extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *, tree *, bool); extern tree vect_halve_mask_nunits (tree); extern tree vect_double_mask_nunits (tree); extern void vect_record_loop_mask (loop_vec_info, vec_loop_masks *, unsigned int, tree); extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *, unsigned int, tree, unsigned int); /* Drive for loop transformation stage. */ extern struct loop *vect_transform_loop (loop_vec_info); extern opt_loop_vec_info vect_analyze_loop_form (struct loop *, vec_info_shared *); extern bool vectorizable_live_operation (stmt_vec_info, gimple_stmt_iterator *, slp_tree, int, stmt_vec_info *, stmt_vector_for_cost *); extern bool vectorizable_reduction (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, slp_instance, stmt_vector_for_cost *); extern bool vectorizable_induction (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); extern tree get_initial_def_for_reduction (stmt_vec_info, tree, tree *); extern bool vect_worthwhile_without_simd_p (vec_info *, tree_code); extern int vect_get_known_peeling_cost (loop_vec_info, int, int *, stmt_vector_for_cost *, stmt_vector_for_cost *, stmt_vector_for_cost *); extern tree cse_and_gimplify_to_preheader (loop_vec_info, tree); /* In tree-vect-slp.c. */ extern void vect_free_slp_instance (slp_instance, bool); extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> , gimple_stmt_iterator *, poly_uint64, slp_instance, bool, unsigned *); extern bool vect_slp_analyze_operations (vec_info *); extern void vect_schedule_slp (vec_info *); extern opt_result vect_analyze_slp (vec_info *, unsigned); extern bool vect_make_slp_decision (loop_vec_info); extern void vect_detect_hybrid_slp (loop_vec_info); extern void vect_get_slp_defs (vec<tree> , slp_tree, vec<vec<tree> > *); extern bool vect_slp_bb (basic_block); extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree); extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info); extern bool can_duplicate_and_interleave_p (unsigned int, machine_mode, unsigned int * = NULL, tree * = NULL, tree * = NULL); extern void duplicate_and_interleave (gimple_seq *, tree, vec<tree>, unsigned int, vec<tree> &); extern int vect_get_place_in_interleaving_chain (stmt_vec_info, stmt_vec_info); /* In tree-vect-patterns.c. */ /* Pattern recognition functions. Additional pattern recognition functions can (and will) be added in the future. */ void vect_pattern_recog (vec_info *); /* In tree-vectorizer.c. */ unsigned vectorize_loops (void); void vect_free_loop_info_assumptions (struct loop *); #endif /* GCC_TREE_VECTORIZER_H */
Compiler.c
// this is autogenerated file, do not edit it. #include "ficus/ficus.h" struct _fx_Nt6option1N10Ast__typ_t_data_t; static void _fx_free_Nt6option1N10Ast__typ_t(struct _fx_Nt6option1N10Ast__typ_t_data_t** dst); struct _fx_Nt6option1N10Ast__exp_t_data_t; static void _fx_free_Nt6option1N10Ast__exp_t(struct _fx_Nt6option1N10Ast__exp_t_data_t** dst); struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t; static void _fx_free_Nt9Dynvec__t1N14Ast__id_info_t(struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t** dst); struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t; static void _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t** dst); struct _fx_N10Ast__typ_t_data_t; static void _fx_free_N10Ast__typ_t(struct _fx_N10Ast__typ_t_data_t** dst); struct _fx_N10Ast__exp_t_data_t; static void _fx_free_N10Ast__exp_t(struct _fx_N10Ast__exp_t_data_t** dst); struct _fx_N10Ast__pat_t_data_t; static void _fx_free_N10Ast__pat_t(struct _fx_N10Ast__pat_t_data_t** dst); struct _fx_N16Ast__env_entry_t_data_t; static void _fx_free_N16Ast__env_entry_t(struct _fx_N16Ast__env_entry_t_data_t** dst); struct _fx_N16Ast__defmodule_t_data_t; static void _fx_free_N16Ast__defmodule_t(struct _fx_N16Ast__defmodule_t_data_t** dst); struct _fx_N14K_form__ktyp_t_data_t; static void _fx_free_N14K_form__ktyp_t(struct _fx_N14K_form__ktyp_t_data_t** dst); struct _fx_N14K_form__kexp_t_data_t; static void _fx_free_N14K_form__kexp_t(struct _fx_N14K_form__kexp_t_data_t** dst); struct _fx_N14C_form__ctyp_t_data_t; static void _fx_free_N14C_form__ctyp_t(struct _fx_N14C_form__ctyp_t_data_t** dst); struct _fx_N14C_form__cexp_t_data_t; static void _fx_free_N14C_form__cexp_t(struct _fx_N14C_form__cexp_t_data_t** dst); struct _fx_N15C_form__cstmt_t_data_t; static void _fx_free_N15C_form__cstmt_t(struct _fx_N15C_form__cstmt_t_data_t** dst); typedef struct _fx_Nt6option1N10Ast__typ_t_data_t { int_ rc; union { struct _fx_N10Ast__typ_t_data_t* Some; } u; } _fx_Nt6option1N10Ast__typ_t_data_t, *_fx_Nt6option1N10Ast__typ_t; typedef struct _fx_LS_data_t { int_ rc; struct _fx_LS_data_t* tl; fx_str_t hd; } _fx_LS_data_t, *_fx_LS; typedef struct _fx_FPS1B { int (*fp)(bool, fx_str_t*, void*); fx_fcv_t* fcv; } _fx_FPS1B; typedef struct _fx_R18Options__options_t { struct _fx_LS_data_t* app_args; fx_str_t app_filename; bool arch64; bool force_rebuild; fx_str_t build_dir; fx_str_t build_rootdir; fx_str_t cflags; fx_str_t clibs; bool compile_by_cpp; fx_str_t filename; bool gen_c; struct _fx_LS_data_t* include_path; bool debug; int_ optim_iters; int_ inline_thresh; bool enable_openmp; bool relax; bool use_preamble; bool make_app; int_ optimize_level; fx_str_t output_name; bool print_ast0; bool print_ast; bool print_k0; bool print_k; bool print_tokens; bool run_app; bool verbose; bool W_unused; } _fx_R18Options__options_t; typedef struct _fx_Ta2i { int_ t0; int_ t1; } _fx_Ta2i; typedef struct _fx_T2Ta2iS { struct _fx_Ta2i t0; fx_str_t t1; } _fx_T2Ta2iS; typedef struct _fx_R9Ast__id_t { int_ m; int_ i; int_ j; } _fx_R9Ast__id_t; typedef struct _fx_R10Ast__loc_t { int_ m_idx; int_ line0; int_ col0; int_ line1; int_ col1; } _fx_R10Ast__loc_t; typedef struct _fx_T2R9Ast__id_ti { struct _fx_R9Ast__id_t t0; int_ t1; } _fx_T2R9Ast__id_ti; typedef struct _fx_T2Bi { bool t0; int_ t1; } _fx_T2Bi; typedef struct _fx_N12Ast__scope_t { int tag; union { int_ ScBlock; struct _fx_T2Bi ScLoop; int_ ScFold; int_ ScArrMap; int_ ScMap; int_ ScTry; struct _fx_R9Ast__id_t ScFun; struct _fx_R9Ast__id_t ScClass; struct _fx_R9Ast__id_t ScInterface; int_ ScModule; } u; } _fx_N12Ast__scope_t; typedef struct _fx_LN12Ast__scope_t_data_t { int_ rc; struct _fx_LN12Ast__scope_t_data_t* tl; struct _fx_N12Ast__scope_t hd; } _fx_LN12Ast__scope_t_data_t, *_fx_LN12Ast__scope_t; typedef struct _fx_R16Ast__val_flags_t { bool val_flag_arg; bool val_flag_mutable; bool val_flag_temp; bool val_flag_tempref; bool val_flag_private; bool val_flag_subarray; bool val_flag_instance; struct _fx_T2R9Ast__id_ti val_flag_method; int_ val_flag_ctor; struct _fx_LN12Ast__scope_t_data_t* val_flag_global; } _fx_R16Ast__val_flags_t; typedef struct _fx_T2R9Ast__id_tN14C_form__ctyp_t { struct _fx_R9Ast__id_t t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* tl; struct _fx_T2R9Ast__id_tN14C_form__ctyp_t hd; } _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t, *_fx_LT2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_R23C_form__cdefinterface_t { struct _fx_R9Ast__id_t ci_name; fx_str_t ci_cname; struct _fx_R9Ast__id_t ci_id; struct _fx_R9Ast__id_t ci_vtbl; struct _fx_R9Ast__id_t ci_base; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* ci_all_methods; struct _fx_LN12Ast__scope_t_data_t* ci_scope; struct _fx_R10Ast__loc_t ci_loc; } _fx_R23C_form__cdefinterface_t; typedef struct _fx_rR23C_form__cdefinterface_t_data_t { int_ rc; struct _fx_R23C_form__cdefinterface_t data; } _fx_rR23C_form__cdefinterface_t_data_t, *_fx_rR23C_form__cdefinterface_t; typedef struct _fx_N17Ast__fun_constr_t { int tag; union { int_ CtorVariant; struct _fx_R9Ast__id_t CtorFP; struct _fx_R9Ast__id_t CtorExn; } u; } _fx_N17Ast__fun_constr_t; typedef struct _fx_R16Ast__fun_flags_t { int_ fun_flag_pure; bool fun_flag_ccode; bool fun_flag_have_keywords; bool fun_flag_inline; bool fun_flag_nothrow; bool fun_flag_really_nothrow; bool fun_flag_private; struct _fx_N17Ast__fun_constr_t fun_flag_ctor; struct _fx_R9Ast__id_t fun_flag_method_of; bool fun_flag_uses_fv; bool fun_flag_recursive; bool fun_flag_instance; } _fx_R16Ast__fun_flags_t; typedef struct _fx_LN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LN15C_form__cstmt_t_data_t* tl; struct _fx_N15C_form__cstmt_t_data_t* hd; } _fx_LN15C_form__cstmt_t_data_t, *_fx_LN15C_form__cstmt_t; typedef struct _fx_N19C_form__carg_attr_t { int tag; } _fx_N19C_form__carg_attr_t; typedef struct _fx_LN19C_form__carg_attr_t_data_t { int_ rc; struct _fx_LN19C_form__carg_attr_t_data_t* tl; struct _fx_N19C_form__carg_attr_t hd; } _fx_LN19C_form__carg_attr_t_data_t, *_fx_LN19C_form__carg_attr_t; typedef struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t { struct _fx_R9Ast__id_t t0; struct _fx_N14C_form__ctyp_t_data_t* t1; struct _fx_LN19C_form__carg_attr_t_data_t* t2; } _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t; typedef struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t { int_ rc; struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* tl; struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t hd; } _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t, *_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t; typedef struct _fx_R17C_form__cdeffun_t { struct _fx_R9Ast__id_t cf_name; fx_str_t cf_cname; struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* cf_args; struct _fx_N14C_form__ctyp_t_data_t* cf_rt; struct _fx_LN15C_form__cstmt_t_data_t* cf_body; struct _fx_R16Ast__fun_flags_t cf_flags; struct _fx_LN12Ast__scope_t_data_t* cf_scope; struct _fx_R10Ast__loc_t cf_loc; } _fx_R17C_form__cdeffun_t; typedef struct _fx_rR17C_form__cdeffun_t_data_t { int_ rc; struct _fx_R17C_form__cdeffun_t data; } _fx_rR17C_form__cdeffun_t_data_t, *_fx_rR17C_form__cdeffun_t; typedef struct _fx_Ta2R9Ast__id_t { struct _fx_R9Ast__id_t t0; struct _fx_R9Ast__id_t t1; } _fx_Ta2R9Ast__id_t; typedef struct _fx_LR9Ast__id_t_data_t { int_ rc; struct _fx_LR9Ast__id_t_data_t* tl; struct _fx_R9Ast__id_t hd; } _fx_LR9Ast__id_t_data_t, *_fx_LR9Ast__id_t; typedef struct _fx_R17C_form__ctprops_t { bool ctp_scalar; bool ctp_complex; bool ctp_ptr; bool ctp_pass_by_ref; struct _fx_LR9Ast__id_t_data_t* ctp_make; struct _fx_Ta2R9Ast__id_t ctp_free; struct _fx_Ta2R9Ast__id_t ctp_copy; } _fx_R17C_form__ctprops_t; typedef struct _fx_R17C_form__cdeftyp_t { struct _fx_R9Ast__id_t ct_name; struct _fx_N14C_form__ctyp_t_data_t* ct_typ; fx_str_t ct_cname; struct _fx_R17C_form__ctprops_t ct_props; int_ ct_data_start; struct _fx_R9Ast__id_t ct_enum; struct _fx_LR9Ast__id_t_data_t* ct_ifaces; struct _fx_R9Ast__id_t ct_ifaces_id; struct _fx_LN12Ast__scope_t_data_t* ct_scope; struct _fx_R10Ast__loc_t ct_loc; } _fx_R17C_form__cdeftyp_t; typedef struct _fx_rR17C_form__cdeftyp_t_data_t { int_ rc; struct _fx_R17C_form__cdeftyp_t data; } _fx_rR17C_form__cdeftyp_t_data_t, *_fx_rR17C_form__cdeftyp_t; typedef struct _fx_Nt6option1N14C_form__cexp_t { int tag; union { struct _fx_N14C_form__cexp_t_data_t* Some; } u; } _fx_Nt6option1N14C_form__cexp_t; typedef struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t { struct _fx_R9Ast__id_t t0; struct _fx_Nt6option1N14C_form__cexp_t t1; } _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t; typedef struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* tl; struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t hd; } _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t, *_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t; typedef struct _fx_R18C_form__cdefenum_t { struct _fx_R9Ast__id_t cenum_name; struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* cenum_members; fx_str_t cenum_cname; struct _fx_LN12Ast__scope_t_data_t* cenum_scope; struct _fx_R10Ast__loc_t cenum_loc; } _fx_R18C_form__cdefenum_t; typedef struct _fx_rR18C_form__cdefenum_t_data_t { int_ rc; struct _fx_R18C_form__cdefenum_t data; } _fx_rR18C_form__cdefenum_t_data_t, *_fx_rR18C_form__cdefenum_t; typedef struct _fx_R19C_form__cdefmacro_t { struct _fx_R9Ast__id_t cm_name; fx_str_t cm_cname; struct _fx_LR9Ast__id_t_data_t* cm_args; struct _fx_LN15C_form__cstmt_t_data_t* cm_body; struct _fx_LN12Ast__scope_t_data_t* cm_scope; struct _fx_R10Ast__loc_t cm_loc; } _fx_R19C_form__cdefmacro_t; typedef struct _fx_rR19C_form__cdefmacro_t_data_t { int_ rc; struct _fx_R19C_form__cdefmacro_t data; } _fx_rR19C_form__cdefmacro_t_data_t, *_fx_rR19C_form__cdefmacro_t; typedef struct _fx_T2R9Ast__id_tN14K_form__ktyp_t { struct _fx_R9Ast__id_t t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* tl; struct _fx_T2R9Ast__id_tN14K_form__ktyp_t hd; } _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t, *_fx_LT2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_R23K_form__kdefinterface_t { struct _fx_R9Ast__id_t ki_name; struct _fx_R9Ast__id_t ki_base; fx_str_t ki_cname; struct _fx_R9Ast__id_t ki_id; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* ki_all_methods; struct _fx_LN12Ast__scope_t_data_t* ki_scope; struct _fx_R10Ast__loc_t ki_loc; } _fx_R23K_form__kdefinterface_t; typedef struct _fx_rR23K_form__kdefinterface_t_data_t { int_ rc; struct _fx_R23K_form__kdefinterface_t data; } _fx_rR23K_form__kdefinterface_t_data_t, *_fx_rR23K_form__kdefinterface_t; typedef struct _fx_R25K_form__kdefclosureinfo_t { struct _fx_R9Ast__id_t kci_arg; struct _fx_R9Ast__id_t kci_fcv_t; struct _fx_R9Ast__id_t kci_fp_typ; struct _fx_R9Ast__id_t kci_make_fp; struct _fx_R9Ast__id_t kci_wrap_f; } _fx_R25K_form__kdefclosureinfo_t; typedef struct _fx_R17K_form__kdeffun_t { struct _fx_R9Ast__id_t kf_name; fx_str_t kf_cname; struct _fx_LR9Ast__id_t_data_t* kf_params; struct _fx_N14K_form__ktyp_t_data_t* kf_rt; struct _fx_N14K_form__kexp_t_data_t* kf_body; struct _fx_R16Ast__fun_flags_t kf_flags; struct _fx_R25K_form__kdefclosureinfo_t kf_closure; struct _fx_LN12Ast__scope_t_data_t* kf_scope; struct _fx_R10Ast__loc_t kf_loc; } _fx_R17K_form__kdeffun_t; typedef struct _fx_rR17K_form__kdeffun_t_data_t { int_ rc; struct _fx_R17K_form__kdeffun_t data; } _fx_rR17K_form__kdeffun_t_data_t, *_fx_rR17K_form__kdeffun_t; typedef struct _fx_R17K_form__kdefexn_t { struct _fx_R9Ast__id_t ke_name; fx_str_t ke_cname; fx_str_t ke_base_cname; struct _fx_N14K_form__ktyp_t_data_t* ke_typ; bool ke_std; struct _fx_R9Ast__id_t ke_tag; struct _fx_R9Ast__id_t ke_make; struct _fx_LN12Ast__scope_t_data_t* ke_scope; struct _fx_R10Ast__loc_t ke_loc; } _fx_R17K_form__kdefexn_t; typedef struct _fx_rR17K_form__kdefexn_t_data_t { int_ rc; struct _fx_R17K_form__kdefexn_t data; } _fx_rR17K_form__kdefexn_t_data_t, *_fx_rR17K_form__kdefexn_t; typedef struct _fx_R17K_form__ktprops_t { bool ktp_complex; bool ktp_scalar; bool ktp_ptr; bool ktp_pass_by_ref; bool ktp_custom_free; bool ktp_custom_copy; } _fx_R17K_form__ktprops_t; typedef struct _fx_Nt6option1R17K_form__ktprops_t { int tag; union { struct _fx_R17K_form__ktprops_t Some; } u; } _fx_Nt6option1R17K_form__ktprops_t; typedef struct _fx_R16Ast__var_flags_t { int_ var_flag_class_from; bool var_flag_record; bool var_flag_recursive; bool var_flag_have_tag; bool var_flag_have_mutable; bool var_flag_opt; bool var_flag_instance; } _fx_R16Ast__var_flags_t; typedef struct _fx_LN14K_form__ktyp_t_data_t { int_ rc; struct _fx_LN14K_form__ktyp_t_data_t* tl; struct _fx_N14K_form__ktyp_t_data_t* hd; } _fx_LN14K_form__ktyp_t_data_t, *_fx_LN14K_form__ktyp_t; typedef struct _fx_T2R9Ast__id_tLR9Ast__id_t { struct _fx_R9Ast__id_t t0; struct _fx_LR9Ast__id_t_data_t* t1; } _fx_T2R9Ast__id_tLR9Ast__id_t; typedef struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* tl; struct _fx_T2R9Ast__id_tLR9Ast__id_t hd; } _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t, *_fx_LT2R9Ast__id_tLR9Ast__id_t; typedef struct _fx_R21K_form__kdefvariant_t { struct _fx_R9Ast__id_t kvar_name; fx_str_t kvar_cname; struct _fx_R9Ast__id_t kvar_proto; struct _fx_Nt6option1R17K_form__ktprops_t kvar_props; struct _fx_LN14K_form__ktyp_t_data_t* kvar_targs; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* kvar_cases; struct _fx_LR9Ast__id_t_data_t* kvar_ctors; struct _fx_R16Ast__var_flags_t kvar_flags; struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* kvar_ifaces; struct _fx_LN12Ast__scope_t_data_t* kvar_scope; struct _fx_R10Ast__loc_t kvar_loc; } _fx_R21K_form__kdefvariant_t; typedef struct _fx_rR21K_form__kdefvariant_t_data_t { int_ rc; struct _fx_R21K_form__kdefvariant_t data; } _fx_rR21K_form__kdefvariant_t_data_t, *_fx_rR21K_form__kdefvariant_t; typedef struct _fx_R17K_form__kdeftyp_t { struct _fx_R9Ast__id_t kt_name; fx_str_t kt_cname; struct _fx_R9Ast__id_t kt_proto; struct _fx_Nt6option1R17K_form__ktprops_t kt_props; struct _fx_LN14K_form__ktyp_t_data_t* kt_targs; struct _fx_N14K_form__ktyp_t_data_t* kt_typ; struct _fx_LN12Ast__scope_t_data_t* kt_scope; struct _fx_R10Ast__loc_t kt_loc; } _fx_R17K_form__kdeftyp_t; typedef struct _fx_rR17K_form__kdeftyp_t_data_t { int_ rc; struct _fx_R17K_form__kdeftyp_t data; } _fx_rR17K_form__kdeftyp_t_data_t, *_fx_rR17K_form__kdeftyp_t; typedef struct _fx_R25K_form__kdefclosurevars_t { struct _fx_R9Ast__id_t kcv_name; fx_str_t kcv_cname; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* kcv_freevars; struct _fx_LR9Ast__id_t_data_t* kcv_orig_freevars; struct _fx_LN12Ast__scope_t_data_t* kcv_scope; struct _fx_R10Ast__loc_t kcv_loc; } _fx_R25K_form__kdefclosurevars_t; typedef struct _fx_rR25K_form__kdefclosurevars_t_data_t { int_ rc; struct _fx_R25K_form__kdefclosurevars_t data; } _fx_rR25K_form__kdefclosurevars_t_data_t, *_fx_rR25K_form__kdefclosurevars_t; typedef struct _fx_Nt6option1R9Ast__id_t { int tag; union { struct _fx_R9Ast__id_t Some; } u; } _fx_Nt6option1R9Ast__id_t; typedef struct _fx_Nt6option1N10Ast__exp_t_data_t { int_ rc; union { struct _fx_N10Ast__exp_t_data_t* Some; } u; } _fx_Nt6option1N10Ast__exp_t_data_t, *_fx_Nt6option1N10Ast__exp_t; typedef struct _fx_R13Ast__defval_t { struct _fx_R9Ast__id_t dv_name; struct _fx_N10Ast__typ_t_data_t* dv_typ; struct _fx_R16Ast__val_flags_t dv_flags; struct _fx_LN12Ast__scope_t_data_t* dv_scope; struct _fx_R10Ast__loc_t dv_loc; } _fx_R13Ast__defval_t; typedef struct _fx_FPi2R9Ast__id_tR9Ast__id_t { int (*fp)(struct _fx_R9Ast__id_t*, struct _fx_R9Ast__id_t*, int_*, void*); fx_fcv_t* fcv; } _fx_FPi2R9Ast__id_tR9Ast__id_t; typedef struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t { struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* root; struct _fx_FPi2R9Ast__id_tR9Ast__id_t cmp; } _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t; typedef struct _fx_LN10Ast__pat_t_data_t { int_ rc; struct _fx_LN10Ast__pat_t_data_t* tl; struct _fx_N10Ast__pat_t_data_t* hd; } _fx_LN10Ast__pat_t_data_t, *_fx_LN10Ast__pat_t; typedef struct _fx_rLR9Ast__id_t_data_t { int_ rc; struct _fx_LR9Ast__id_t_data_t* data; } _fx_rLR9Ast__id_t_data_t, *_fx_rLR9Ast__id_t; typedef struct _fx_R13Ast__deffun_t { struct _fx_R9Ast__id_t df_name; struct _fx_LR9Ast__id_t_data_t* df_templ_args; struct _fx_LN10Ast__pat_t_data_t* df_args; struct _fx_N10Ast__typ_t_data_t* df_typ; struct _fx_N10Ast__exp_t_data_t* df_body; struct _fx_R16Ast__fun_flags_t df_flags; struct _fx_LN12Ast__scope_t_data_t* df_scope; struct _fx_R10Ast__loc_t df_loc; struct _fx_rLR9Ast__id_t_data_t* df_templ_inst; struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t df_env; } _fx_R13Ast__deffun_t; typedef struct _fx_rR13Ast__deffun_t_data_t { int_ rc; struct _fx_R13Ast__deffun_t data; } _fx_rR13Ast__deffun_t_data_t, *_fx_rR13Ast__deffun_t; typedef struct _fx_R13Ast__defexn_t { struct _fx_R9Ast__id_t dexn_name; struct _fx_N10Ast__typ_t_data_t* dexn_typ; struct _fx_LN12Ast__scope_t_data_t* dexn_scope; struct _fx_R10Ast__loc_t dexn_loc; } _fx_R13Ast__defexn_t; typedef struct _fx_rR13Ast__defexn_t_data_t { int_ rc; struct _fx_R13Ast__defexn_t data; } _fx_rR13Ast__defexn_t_data_t, *_fx_rR13Ast__defexn_t; typedef struct _fx_R13Ast__deftyp_t { struct _fx_R9Ast__id_t dt_name; struct _fx_LR9Ast__id_t_data_t* dt_templ_args; struct _fx_N10Ast__typ_t_data_t* dt_typ; bool dt_finalized; struct _fx_LN12Ast__scope_t_data_t* dt_scope; struct _fx_R10Ast__loc_t dt_loc; } _fx_R13Ast__deftyp_t; typedef struct _fx_rR13Ast__deftyp_t_data_t { int_ rc; struct _fx_R13Ast__deftyp_t data; } _fx_rR13Ast__deftyp_t_data_t, *_fx_rR13Ast__deftyp_t; typedef struct _fx_T2R9Ast__id_tN10Ast__typ_t { struct _fx_R9Ast__id_t t0; struct _fx_N10Ast__typ_t_data_t* t1; } _fx_T2R9Ast__id_tN10Ast__typ_t; typedef struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* tl; struct _fx_T2R9Ast__id_tN10Ast__typ_t hd; } _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t, *_fx_LT2R9Ast__id_tN10Ast__typ_t; typedef struct _fx_LTa2R9Ast__id_t_data_t { int_ rc; struct _fx_LTa2R9Ast__id_t_data_t* tl; struct _fx_Ta2R9Ast__id_t hd; } _fx_LTa2R9Ast__id_t_data_t, *_fx_LTa2R9Ast__id_t; typedef struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t { struct _fx_R9Ast__id_t t0; struct _fx_LTa2R9Ast__id_t_data_t* t1; } _fx_T2R9Ast__id_tLTa2R9Ast__id_t; typedef struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* tl; struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t hd; } _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t, *_fx_LT2R9Ast__id_tLTa2R9Ast__id_t; typedef struct _fx_R17Ast__defvariant_t { struct _fx_R9Ast__id_t dvar_name; struct _fx_LR9Ast__id_t_data_t* dvar_templ_args; struct _fx_N10Ast__typ_t_data_t* dvar_alias; struct _fx_R16Ast__var_flags_t dvar_flags; struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* dvar_cases; struct _fx_LR9Ast__id_t_data_t* dvar_ctors; struct _fx_rLR9Ast__id_t_data_t* dvar_templ_inst; struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* dvar_ifaces; struct _fx_LN12Ast__scope_t_data_t* dvar_scope; struct _fx_R10Ast__loc_t dvar_loc; } _fx_R17Ast__defvariant_t; typedef struct _fx_rR17Ast__defvariant_t_data_t { int_ rc; struct _fx_R17Ast__defvariant_t data; } _fx_rR17Ast__defvariant_t_data_t, *_fx_rR17Ast__defvariant_t; typedef struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t { struct _fx_R9Ast__id_t t0; struct _fx_N10Ast__typ_t_data_t* t1; struct _fx_R16Ast__fun_flags_t t2; } _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t; typedef struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t { int_ rc; struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* tl; struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t hd; } _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t, *_fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t; typedef struct _fx_R19Ast__definterface_t { struct _fx_R9Ast__id_t di_name; struct _fx_R9Ast__id_t di_base; struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* di_new_methods; struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* di_all_methods; struct _fx_LN12Ast__scope_t_data_t* di_scope; struct _fx_R10Ast__loc_t di_loc; } _fx_R19Ast__definterface_t; typedef struct _fx_rR19Ast__definterface_t_data_t { int_ rc; struct _fx_R19Ast__definterface_t data; } _fx_rR19Ast__definterface_t_data_t, *_fx_rR19Ast__definterface_t; typedef struct _fx_N14Ast__id_info_t { int tag; union { struct _fx_R13Ast__defval_t IdDVal; struct _fx_rR13Ast__deffun_t_data_t* IdFun; struct _fx_rR13Ast__defexn_t_data_t* IdExn; struct _fx_rR13Ast__deftyp_t_data_t* IdTyp; struct _fx_rR17Ast__defvariant_t_data_t* IdVariant; struct _fx_rR19Ast__definterface_t_data_t* IdInterface; int_ IdModule; } u; } _fx_N14Ast__id_info_t; typedef struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t { int_ t0; fx_arr_t t1; struct _fx_N14Ast__id_info_t t2; } _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t; typedef struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t { int_ rc; union { struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t t; } u; } _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t, *_fx_Nt9Dynvec__t1N14Ast__id_info_t; typedef struct _fx_N12Map__color_t { int tag; } _fx_N12Map__color_t; typedef struct _fx_LN16Ast__env_entry_t_data_t { int_ rc; struct _fx_LN16Ast__env_entry_t_data_t* tl; struct _fx_N16Ast__env_entry_t_data_t* hd; } _fx_LN16Ast__env_entry_t_data_t, *_fx_LN16Ast__env_entry_t; typedef struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t { struct _fx_N12Map__color_t t0; struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t1; struct _fx_R9Ast__id_t t2; struct _fx_LN16Ast__env_entry_t_data_t* t3; struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t4; } _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t; typedef struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t { int_ rc; union { struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t Node; } u; } _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t, *_fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t; typedef struct _fx_T2R10Ast__loc_tS { struct _fx_R10Ast__loc_t t0; fx_str_t t1; } _fx_T2R10Ast__loc_tS; typedef struct _fx_T2il { int_ t0; int64_t t1; } _fx_T2il; typedef struct _fx_T2iq { int_ t0; uint64_t t1; } _fx_T2iq; typedef struct _fx_T2id { int_ t0; double t1; } _fx_T2id; typedef struct _fx_N10Ast__lit_t { int tag; union { int64_t LitInt; struct _fx_T2il LitSInt; struct _fx_T2iq LitUInt; struct _fx_T2id LitFloat; fx_str_t LitString; char_ LitChar; bool LitBool; } u; } _fx_N10Ast__lit_t; typedef struct _fx_rNt6option1N10Ast__typ_t_data_t { int_ rc; struct _fx_Nt6option1N10Ast__typ_t_data_t* data; } _fx_rNt6option1N10Ast__typ_t_data_t, *_fx_rNt6option1N10Ast__typ_t; typedef struct _fx_LN10Ast__typ_t_data_t { int_ rc; struct _fx_LN10Ast__typ_t_data_t* tl; struct _fx_N10Ast__typ_t_data_t* hd; } _fx_LN10Ast__typ_t_data_t, *_fx_LN10Ast__typ_t; typedef struct _fx_T2LN10Ast__typ_tN10Ast__typ_t { struct _fx_LN10Ast__typ_t_data_t* t0; struct _fx_N10Ast__typ_t_data_t* t1; } _fx_T2LN10Ast__typ_tN10Ast__typ_t; typedef struct _fx_T2iN10Ast__typ_t { int_ t0; struct _fx_N10Ast__typ_t_data_t* t1; } _fx_T2iN10Ast__typ_t; typedef struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t { struct _fx_R16Ast__val_flags_t t0; struct _fx_R9Ast__id_t t1; struct _fx_N10Ast__typ_t_data_t* t2; struct _fx_N10Ast__exp_t_data_t* t3; } _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t; typedef struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t { int_ rc; struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* tl; struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t hd; } _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t, *_fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t; typedef struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB { struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* t0; bool t1; } _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB; typedef struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t { int_ rc; struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB data; } _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t, *_fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB; typedef struct _fx_T2LN10Ast__typ_tR9Ast__id_t { struct _fx_LN10Ast__typ_t_data_t* t0; struct _fx_R9Ast__id_t t1; } _fx_T2LN10Ast__typ_tR9Ast__id_t; typedef struct _fx_N10Ast__typ_t_data_t { int_ rc; int tag; union { struct _fx_rNt6option1N10Ast__typ_t_data_t* TypVar; struct _fx_Nt6option1N10Ast__typ_t_data_t* TypVarTuple; struct _fx_N10Ast__typ_t_data_t* TypVarArray; int_ TypSInt; int_ TypUInt; int_ TypFloat; struct _fx_T2LN10Ast__typ_tN10Ast__typ_t TypFun; struct _fx_N10Ast__typ_t_data_t* TypList; struct _fx_N10Ast__typ_t_data_t* TypVector; struct _fx_LN10Ast__typ_t_data_t* TypTuple; struct _fx_N10Ast__typ_t_data_t* TypRef; struct _fx_T2iN10Ast__typ_t TypArray; struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t* TypRecord; struct _fx_T2LN10Ast__typ_tR9Ast__id_t TypApp; } u; } _fx_N10Ast__typ_t_data_t, *_fx_N10Ast__typ_t; typedef struct _fx_N12Ast__cmpop_t { int tag; } _fx_N12Ast__cmpop_t; typedef struct _fx_N13Ast__binary_t { int tag; union { struct _fx_N12Ast__cmpop_t OpCmp; struct _fx_N12Ast__cmpop_t OpDotCmp; } u; } _fx_N13Ast__binary_t; typedef struct _fx_N12Ast__unary_t { int tag; } _fx_N12Ast__unary_t; typedef struct _fx_N13Ast__intrin_t { int tag; union { struct _fx_R9Ast__id_t IntrinMath; } u; } _fx_N13Ast__intrin_t; typedef struct _fx_N15Ast__for_make_t { int tag; } _fx_N15Ast__for_make_t; typedef struct _fx_R16Ast__for_flags_t { bool for_flag_parallel; struct _fx_N15Ast__for_make_t for_flag_make; bool for_flag_unzip; bool for_flag_fold; bool for_flag_nested; } _fx_R16Ast__for_flags_t; typedef struct _fx_N13Ast__border_t { int tag; } _fx_N13Ast__border_t; typedef struct _fx_N18Ast__interpolate_t { int tag; } _fx_N18Ast__interpolate_t; typedef struct _fx_T2BR10Ast__loc_t { bool t0; struct _fx_R10Ast__loc_t t1; } _fx_T2BR10Ast__loc_t; typedef struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t { struct _fx_Nt6option1N10Ast__exp_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__typ_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_Nt6option1N10Ast__exp_t_data_t* t0; struct _fx_Nt6option1N10Ast__exp_t_data_t* t1; struct _fx_Nt6option1N10Ast__exp_t_data_t* t2; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3; } _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__lit_t t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N13Ast__binary_t t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_N10Ast__exp_t_data_t* t2; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3; } _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N12Ast__unary_t t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_LN10Ast__exp_t_data_t { int_ rc; struct _fx_LN10Ast__exp_t_data_t* tl; struct _fx_N10Ast__exp_t_data_t* hd; } _fx_LN10Ast__exp_t_data_t, *_fx_LN10Ast__exp_t; typedef struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N13Ast__intrin_t t0; struct _fx_LN10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN10Ast__exp_t { struct _fx_R9Ast__id_t t0; struct _fx_N10Ast__exp_t_data_t* t1; } _fx_T2R9Ast__id_tN10Ast__exp_t; typedef struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_LN10Ast__exp_t_data_t* t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_LLN10Ast__exp_t_data_t { int_ rc; struct _fx_LLN10Ast__exp_t_data_t* tl; struct _fx_LN10Ast__exp_t_data_t* hd; } _fx_LLN10Ast__exp_t_data_t, *_fx_LLN10Ast__exp_t; typedef struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_LLN10Ast__exp_t_data_t* t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* tl; struct _fx_T2R9Ast__id_tN10Ast__exp_t hd; } _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t, *_fx_LT2R9Ast__id_tN10Ast__exp_t; typedef struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_LN10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N13Ast__border_t t1; struct _fx_N18Ast__interpolate_t t2; struct _fx_LN10Ast__exp_t_data_t* t3; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t4; } _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__exp_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N10Ast__exp_tR10Ast__loc_t; typedef struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_N10Ast__exp_t_data_t* t2; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3; } _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__pat_tN10Ast__exp_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; } _fx_T2N10Ast__pat_tN10Ast__exp_t; typedef struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t { int_ rc; struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* tl; struct _fx_T2N10Ast__pat_tN10Ast__exp_t hd; } _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t, *_fx_LT2N10Ast__pat_tN10Ast__exp_t; typedef struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t { struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0; struct _fx_N10Ast__pat_t_data_t* t1; struct _fx_N10Ast__exp_t_data_t* t2; struct _fx_R16Ast__for_flags_t t3; struct _fx_R10Ast__loc_t t4; } _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t; typedef struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t { struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0; struct _fx_N10Ast__pat_t_data_t* t1; } _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t; typedef struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t { int_ rc; struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* tl; struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t hd; } _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t, *_fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t; typedef struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_R16Ast__for_flags_t t2; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3; } _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N10Ast__typ_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t { fx_str_t t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2ST2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t { fx_str_t t0; fx_str_t t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3SST2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_R16Ast__val_flags_t t2; struct _fx_R10Ast__loc_t t3; } _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t; typedef struct _fx_T2iR9Ast__id_t { int_ t0; struct _fx_R9Ast__id_t t1; } _fx_T2iR9Ast__id_t; typedef struct _fx_LT2iR9Ast__id_t_data_t { int_ rc; struct _fx_LT2iR9Ast__id_t_data_t* tl; struct _fx_T2iR9Ast__id_t hd; } _fx_LT2iR9Ast__id_t_data_t, *_fx_LT2iR9Ast__id_t; typedef struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t { struct _fx_LT2iR9Ast__id_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2LT2iR9Ast__id_tR10Ast__loc_t; typedef struct _fx_T3iLR9Ast__id_tR10Ast__loc_t { int_ t0; struct _fx_LR9Ast__id_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3iLR9Ast__id_tR10Ast__loc_t; typedef struct _fx_T2LSR10Ast__loc_t { struct _fx_LS_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2LSR10Ast__loc_t; typedef struct _fx_N10Ast__exp_t_data_t { int_ rc; int tag; union { struct _fx_R10Ast__loc_t ExpNop; struct _fx_T2BR10Ast__loc_t ExpBreak; struct _fx_R10Ast__loc_t ExpContinue; struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t ExpReturn; struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpRange; struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t ExpLit; struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t ExpIdent; struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpBinary; struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpUnary; struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpIntrin; struct _fx_T2R9Ast__id_tN10Ast__exp_t ExpSync; struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpSeq; struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkTuple; struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkArray; struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkVector; struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkRecord; struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpUpdateRecord; struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpCall; struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpAt; struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t ExpAssign; struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMem; struct _fx_T2N10Ast__exp_tR10Ast__loc_t ExpThrow; struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpIf; struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t ExpWhile; struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t ExpDoWhile; struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t ExpFor; struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t ExpMap; struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpTryCatch; struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMatch; struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t ExpCast; struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t ExpTyped; struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t ExpCCode; struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t ExpData; struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t DefVal; struct _fx_rR13Ast__deffun_t_data_t* DefFun; struct _fx_rR13Ast__defexn_t_data_t* DefExn; struct _fx_rR13Ast__deftyp_t_data_t* DefTyp; struct _fx_rR17Ast__defvariant_t_data_t* DefVariant; struct _fx_rR19Ast__definterface_t_data_t* DefInterface; struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t DirImport; struct _fx_T3iLR9Ast__id_tR10Ast__loc_t DirImportFrom; struct _fx_T2LSR10Ast__loc_t DirPragma; } u; } _fx_N10Ast__exp_t_data_t, *_fx_N10Ast__exp_t; typedef struct _fx_T2N10Ast__lit_tR10Ast__loc_t { struct _fx_N10Ast__lit_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N10Ast__lit_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2R9Ast__id_tR10Ast__loc_t; typedef struct _fx_T2LN10Ast__pat_tR10Ast__loc_t { struct _fx_LN10Ast__pat_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2LN10Ast__pat_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_LN10Ast__pat_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN10Ast__pat_t { struct _fx_R9Ast__id_t t0; struct _fx_N10Ast__pat_t_data_t* t1; } _fx_T2R9Ast__id_tN10Ast__pat_t; typedef struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* tl; struct _fx_T2R9Ast__id_tN10Ast__pat_t hd; } _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t, *_fx_LT2R9Ast__id_tN10Ast__pat_t; typedef struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t { struct _fx_Nt6option1R9Ast__id_t t0; struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__pat_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_R9Ast__id_t t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__typ_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__pat_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N10Ast__pat_tR10Ast__loc_t; typedef struct _fx_N10Ast__pat_t_data_t { int_ rc; int tag; union { struct _fx_R10Ast__loc_t PatAny; struct _fx_T2N10Ast__lit_tR10Ast__loc_t PatLit; struct _fx_T2R9Ast__id_tR10Ast__loc_t PatIdent; struct _fx_T2LN10Ast__pat_tR10Ast__loc_t PatTuple; struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t PatVariant; struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t PatRecord; struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t PatCons; struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t PatAs; struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t PatTyped; struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t PatWhen; struct _fx_T2LN10Ast__pat_tR10Ast__loc_t PatAlt; struct _fx_T2N10Ast__pat_tR10Ast__loc_t PatRef; } u; } _fx_N10Ast__pat_t_data_t, *_fx_N10Ast__pat_t; typedef struct _fx_N16Ast__env_entry_t_data_t { int_ rc; int tag; union { struct _fx_R9Ast__id_t EnvId; struct _fx_N10Ast__typ_t_data_t* EnvTyp; } u; } _fx_N16Ast__env_entry_t_data_t, *_fx_N16Ast__env_entry_t; typedef struct _fx_T2SR10Ast__loc_t { fx_str_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2SR10Ast__loc_t; typedef struct _fx_LT2SR10Ast__loc_t_data_t { int_ rc; struct _fx_LT2SR10Ast__loc_t_data_t* tl; struct _fx_T2SR10Ast__loc_t hd; } _fx_LT2SR10Ast__loc_t_data_t, *_fx_LT2SR10Ast__loc_t; typedef struct _fx_Li_data_t { int_ rc; struct _fx_Li_data_t* tl; int_ hd; } _fx_Li_data_t, *_fx_Li; typedef struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t { struct _fx_R9Ast__id_t t0; fx_str_t t1; int_ t2; bool t3; struct _fx_LN10Ast__exp_t_data_t* t4; struct _fx_Li_data_t* t5; struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t t6; bool t7; int_ t8; struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t* t9; } _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t; typedef struct _fx_N16Ast__defmodule_t_data_t { int_ rc; union { struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t defmodule_t; } u; } _fx_N16Ast__defmodule_t_data_t, *_fx_N16Ast__defmodule_t; typedef struct _fx_LE_data_t { int_ rc; struct _fx_LE_data_t* tl; fx_exn_t hd; } _fx_LE_data_t, *_fx_LE; typedef struct _fx_T2BS { bool t0; fx_str_t t1; } _fx_T2BS; typedef struct _fx_N14Lexer__token_t { int tag; union { struct _fx_N10Ast__lit_t LITERAL; struct _fx_T2BS IDENT; fx_str_t TYVAR; fx_str_t DATA; bool FOR; bool IMPORT; bool REF; bool RETURN; bool WHILE; bool LPAREN; bool LSQUARE; bool BACKSLASH; bool MINUS; bool PLUS; bool STAR; bool DOT_PLUS; bool DOT_MINUS; struct _fx_N13Ast__binary_t AUG_BINOP; struct _fx_N12Ast__cmpop_t CMP; struct _fx_N12Ast__cmpop_t DOT_CMP; fx_str_t RESERVED; } u; } _fx_N14Lexer__token_t; typedef struct _fx_LN14Lexer__token_t_data_t { int_ rc; struct _fx_LN14Lexer__token_t_data_t* tl; struct _fx_N14Lexer__token_t hd; } _fx_LN14Lexer__token_t_data_t, *_fx_LN14Lexer__token_t; typedef struct _fx_N14K_form__klit_t { int tag; union { int64_t KLitInt; struct _fx_T2il KLitSInt; struct _fx_T2iq KLitUInt; struct _fx_T2id KLitFloat; fx_str_t KLitString; char_ KLitChar; bool KLitBool; struct _fx_N14K_form__ktyp_t_data_t* KLitNil; } u; } _fx_N14K_form__klit_t; typedef struct _fx_N14K_form__atom_t { int tag; union { struct _fx_R9Ast__id_t AtomId; struct _fx_N14K_form__klit_t AtomLit; } u; } _fx_N14K_form__atom_t; typedef struct _fx_Nt6option1N14K_form__atom_t { int tag; union { struct _fx_N14K_form__atom_t Some; } u; } _fx_Nt6option1N14K_form__atom_t; typedef struct _fx_LN14K_form__kexp_t_data_t { int_ rc; struct _fx_LN14K_form__kexp_t_data_t* tl; struct _fx_N14K_form__kexp_t_data_t* hd; } _fx_LN14K_form__kexp_t_data_t, *_fx_LN14K_form__kexp_t; typedef struct _fx_T2BN14K_form__atom_t { bool t0; struct _fx_N14K_form__atom_t t1; } _fx_T2BN14K_form__atom_t; typedef struct _fx_LT2BN14K_form__atom_t_data_t { int_ rc; struct _fx_LT2BN14K_form__atom_t_data_t* tl; struct _fx_T2BN14K_form__atom_t hd; } _fx_LT2BN14K_form__atom_t_data_t, *_fx_LT2BN14K_form__atom_t; typedef struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t { struct _fx_LN14K_form__ktyp_t_data_t* t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t; typedef struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t { struct _fx_R9Ast__id_t t0; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* t1; } _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_T2iN14K_form__ktyp_t { int_ t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2iN14K_form__ktyp_t; typedef struct _fx_N14K_form__ktyp_t_data_t { int_ rc; int tag; union { int_ KTypSInt; int_ KTypUInt; int_ KTypFloat; struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t KTypFun; struct _fx_LN14K_form__ktyp_t_data_t* KTypTuple; struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t KTypRecord; struct _fx_R9Ast__id_t KTypName; struct _fx_T2iN14K_form__ktyp_t KTypArray; struct _fx_N14K_form__ktyp_t_data_t* KTypVector; struct _fx_N14K_form__ktyp_t_data_t* KTypList; struct _fx_N14K_form__ktyp_t_data_t* KTypRef; } u; } _fx_N14K_form__ktyp_t_data_t, *_fx_N14K_form__ktyp_t; typedef struct _fx_Ta3N14K_form__atom_t { struct _fx_N14K_form__atom_t t0; struct _fx_N14K_form__atom_t t1; struct _fx_N14K_form__atom_t t2; } _fx_Ta3N14K_form__atom_t; typedef struct _fx_N13K_form__dom_t { int tag; union { struct _fx_N14K_form__atom_t DomainElem; struct _fx_N14K_form__atom_t DomainFast; struct _fx_Ta3N14K_form__atom_t DomainRange; } u; } _fx_N13K_form__dom_t; typedef struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t { struct _fx_Nt6option1N14K_form__atom_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t; typedef struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__ktyp_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__atom_t t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N13Ast__binary_t t0; struct _fx_N14K_form__atom_t t1; struct _fx_N14K_form__atom_t t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N12Ast__unary_t t0; struct _fx_N14K_form__atom_t t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_LN14K_form__atom_t_data_t { int_ rc; struct _fx_LN14K_form__atom_t_data_t* tl; struct _fx_N14K_form__atom_t hd; } _fx_LN14K_form__atom_t_data_t, *_fx_LN14K_form__atom_t; typedef struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N13Ast__intrin_t t0; struct _fx_LN14K_form__atom_t_data_t* t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN14K_form__kexp_t { struct _fx_R9Ast__id_t t0; struct _fx_N14K_form__kexp_t_data_t* t1; } _fx_T2R9Ast__id_tN14K_form__kexp_t; typedef struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LN14K_form__kexp_t_data_t* t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__kexp_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_N14K_form__kexp_t_data_t* t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_LN14K_form__atom_t_data_t* t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; int_ t1; struct _fx_LN14K_form__atom_t_data_t* t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LN14K_form__atom_t_data_t* t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_R9Ast__id_t t1; struct _fx_LN14K_form__atom_t_data_t* t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_LLT2BN14K_form__atom_t_data_t { int_ rc; struct _fx_LLT2BN14K_form__atom_t_data_t* tl; struct _fx_LT2BN14K_form__atom_t_data_t* hd; } _fx_LLT2BN14K_form__atom_t_data_t, *_fx_LLT2BN14K_form__atom_t; typedef struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { bool t0; struct _fx_LLT2BN14K_form__atom_t_data_t* t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LT2BN14K_form__atom_t_data_t* t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_LN13K_form__dom_t_data_t { int_ rc; struct _fx_LN13K_form__dom_t_data_t* tl; struct _fx_N13K_form__dom_t hd; } _fx_LN13K_form__dom_t_data_t, *_fx_LN13K_form__dom_t; typedef struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__atom_t t0; struct _fx_N13Ast__border_t t1; struct _fx_N18Ast__interpolate_t t2; struct _fx_LN13K_form__dom_t_data_t* t3; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t4; } _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; int_ t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_N14K_form__atom_t t1; struct _fx_R10Ast__loc_t t2; } _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t; typedef struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t { struct _fx_LN14K_form__kexp_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; } _fx_T2LN14K_form__kexp_tN14K_form__kexp_t; typedef struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t { int_ rc; struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* tl; struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t hd; } _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t, *_fx_LT2LN14K_form__kexp_tN14K_form__kexp_t; typedef struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__kexp_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tBR10Ast__loc_t { struct _fx_R9Ast__id_t t0; bool t1; struct _fx_R10Ast__loc_t t2; } _fx_T3R9Ast__id_tBR10Ast__loc_t; typedef struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__atom_t t0; struct _fx_N14K_form__ktyp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN13K_form__dom_t { struct _fx_R9Ast__id_t t0; struct _fx_N13K_form__dom_t t1; } _fx_T2R9Ast__id_tN13K_form__dom_t; typedef struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* tl; struct _fx_T2R9Ast__id_tN13K_form__dom_t hd; } _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t, *_fx_LT2R9Ast__id_tN13K_form__dom_t; typedef struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t { struct _fx_N14K_form__kexp_t_data_t* t0; struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t1; struct _fx_LR9Ast__id_t_data_t* t2; } _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t; typedef struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t { int_ rc; struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* tl; struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t hd; } _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t, *_fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t; typedef struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_R16Ast__for_flags_t t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t { struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t0; struct _fx_LR9Ast__id_t_data_t* t1; struct _fx_N14K_form__kexp_t_data_t* t2; struct _fx_R16Ast__for_flags_t t3; struct _fx_R10Ast__loc_t t4; } _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t; typedef struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t { struct _fx_N14K_form__kexp_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t; typedef struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t { fx_str_t t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t; typedef struct _fx_N14K_form__kexp_t_data_t { int_ rc; int tag; union { struct _fx_R10Ast__loc_t KExpNop; struct _fx_R10Ast__loc_t KExpBreak; struct _fx_R10Ast__loc_t KExpContinue; struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t KExpReturn; struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpAtom; struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpBinary; struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpUnary; struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpIntrin; struct _fx_T2R9Ast__id_tN14K_form__kexp_t KExpSync; struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpSeq; struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpIf; struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpCall; struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpICall; struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkTuple; struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkRecord; struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkClosure; struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkArray; struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkVector; struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpAt; struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t KExpMem; struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t KExpAssign; struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMatch; struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpTryCatch; struct _fx_T3R9Ast__id_tBR10Ast__loc_t KExpThrow; struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t KExpCast; struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMap; struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t KExpFor; struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t KExpWhile; struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t KExpDoWhile; struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t KExpCCode; struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t KDefVal; struct _fx_rR17K_form__kdeffun_t_data_t* KDefFun; struct _fx_rR17K_form__kdefexn_t_data_t* KDefExn; struct _fx_rR21K_form__kdefvariant_t_data_t* KDefVariant; struct _fx_rR23K_form__kdefinterface_t_data_t* KDefInterface; struct _fx_rR17K_form__kdeftyp_t_data_t* KDefTyp; struct _fx_rR25K_form__kdefclosurevars_t_data_t* KDefClosureVars; } u; } _fx_N14K_form__kexp_t_data_t, *_fx_N14K_form__kexp_t; typedef struct _fx_R14Ast__pragmas_t { bool pragma_cpp; struct _fx_LT2SR10Ast__loc_t_data_t* pragma_clibs; } _fx_R14Ast__pragmas_t; typedef struct _fx_R17K_form__kmodule_t { struct _fx_R9Ast__id_t km_name; int_ km_idx; int_ km_toposort_idx; fx_str_t km_cname; struct _fx_LN14K_form__kexp_t_data_t* km_top; struct _fx_Li_data_t* km_deps; bool km_skip; bool km_main; struct _fx_R14Ast__pragmas_t km_pragmas; } _fx_R17K_form__kmodule_t; typedef struct _fx_LR17K_form__kmodule_t_data_t { int_ rc; struct _fx_LR17K_form__kmodule_t_data_t* tl; struct _fx_R17K_form__kmodule_t hd; } _fx_LR17K_form__kmodule_t_data_t, *_fx_LR17K_form__kmodule_t; typedef struct _fx_Nt6option1N14C_form__ctyp_t { int tag; union { struct _fx_N14C_form__ctyp_t_data_t* Some; } u; } _fx_Nt6option1N14C_form__ctyp_t; typedef struct _fx_N17C_form__cbinary_t { int tag; union { struct _fx_N12Ast__cmpop_t COpCmp; } u; } _fx_N17C_form__cbinary_t; typedef struct _fx_N16C_form__cunary_t { int tag; } _fx_N16C_form__cunary_t; typedef struct _fx_N19C_form__ctyp_attr_t { int tag; } _fx_N19C_form__ctyp_attr_t; typedef struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t { struct _fx_Nt6option1R9Ast__id_t t0; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* t1; } _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_LN14C_form__ctyp_t_data_t { int_ rc; struct _fx_LN14C_form__ctyp_t_data_t* tl; struct _fx_N14C_form__ctyp_t_data_t* hd; } _fx_LN14C_form__ctyp_t_data_t, *_fx_LN14C_form__ctyp_t; typedef struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t { struct _fx_LN14C_form__ctyp_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t; typedef struct _fx_LN19C_form__ctyp_attr_t_data_t { int_ rc; struct _fx_LN19C_form__ctyp_attr_t_data_t* tl; struct _fx_N19C_form__ctyp_attr_t hd; } _fx_LN19C_form__ctyp_attr_t_data_t, *_fx_LN19C_form__ctyp_attr_t; typedef struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t { struct _fx_LN19C_form__ctyp_attr_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t; typedef struct _fx_T2iN14C_form__ctyp_t { int_ t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2iN14C_form__ctyp_t; typedef struct _fx_N14C_form__ctyp_t_data_t { int_ rc; int tag; union { int_ CTypSInt; int_ CTypUInt; int_ CTypFloat; struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t CTypStruct; struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t CTypUnion; struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t CTypFunRawPtr; struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t CTypRawPtr; struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t CTypRawArray; struct _fx_T2iN14C_form__ctyp_t CTypArray; struct _fx_N14C_form__ctyp_t_data_t* CTypVector; struct _fx_R9Ast__id_t CTypName; } u; } _fx_N14C_form__ctyp_t_data_t, *_fx_N14C_form__ctyp_t; typedef struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__ctyp_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14K_form__klit_t t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N17C_form__cbinary_t t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_N14C_form__cexp_t_data_t* t2; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t3; } _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N16C_form__cunary_t t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_R9Ast__id_t t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_N14C_form__cexp_t_data_t* t2; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t3; } _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_LN14C_form__cexp_t_data_t { int_ rc; struct _fx_LN14C_form__cexp_t_data_t* tl; struct _fx_N14C_form__cexp_t_data_t* hd; } _fx_LN14C_form__cexp_t_data_t, *_fx_LN14C_form__cexp_t; typedef struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LN14C_form__cexp_t_data_t* t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_LN14C_form__cexp_t_data_t* t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_N14C_form__cexp_t_data_t { int_ rc; int tag; union { struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpIdent; struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t CExpLit; struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpBinary; struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpUnary; struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpMem; struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpArrow; struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t CExpCast; struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpTernary; struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpCall; struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpInit; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t CExpTyp; struct _fx_T2SR10Ast__loc_t CExpCCode; } u; } _fx_N14C_form__cexp_t_data_t, *_fx_N14C_form__cexp_t; typedef struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t { struct _fx_Nt6option1N14C_form__cexp_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t { struct _fx_LN15C_form__cstmt_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2LN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN15C_form__cstmt_t { struct _fx_R9Ast__id_t t0; struct _fx_N15C_form__cstmt_t_data_t* t1; } _fx_T2R9Ast__id_tN15C_form__cstmt_t; typedef struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N15C_form__cstmt_t_data_t* t1; struct _fx_N15C_form__cstmt_t_data_t* t2; struct _fx_R10Ast__loc_t t3; } _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_Nt6option1N14C_form__ctyp_t t0; struct _fx_LN14C_form__cexp_t_data_t* t1; struct _fx_Nt6option1N14C_form__cexp_t t2; struct _fx_LN14C_form__cexp_t_data_t* t3; struct _fx_N15C_form__cstmt_t_data_t* t4; struct _fx_R10Ast__loc_t t5; } _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t { struct _fx_N15C_form__cstmt_t_data_t* t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t { struct _fx_LN14C_form__cexp_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; } _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl; struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t hd; } _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t, *_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t { struct _fx_N14C_form__ctyp_t_data_t* t0; struct _fx_R9Ast__id_t t1; struct _fx_Nt6option1N14C_form__cexp_t t2; struct _fx_R10Ast__loc_t t3; } _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; } _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl; struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t hd; } _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t, *_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t { struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_N15C_form__cstmt_t_data_t { int_ rc; int tag; union { struct _fx_R10Ast__loc_t CStmtNop; struct _fx_T2SR10Ast__loc_t CComment; struct _fx_N14C_form__cexp_t_data_t* CExp; struct _fx_R10Ast__loc_t CStmtBreak; struct _fx_R10Ast__loc_t CStmtContinue; struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t CStmtReturn; struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t CStmtBlock; struct _fx_T2R9Ast__id_tN15C_form__cstmt_t CStmtSync; struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t CStmtIf; struct _fx_T2R9Ast__id_tR10Ast__loc_t CStmtGoto; struct _fx_T2R9Ast__id_tR10Ast__loc_t CStmtLabel; struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t CStmtFor; struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t CStmtWhile; struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t CStmtDoWhile; struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t CStmtSwitch; struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t CDefVal; struct _fx_rR17C_form__cdeffun_t_data_t* CDefFun; struct _fx_rR17C_form__cdeftyp_t_data_t* CDefTyp; struct _fx_T2R9Ast__id_tR10Ast__loc_t CDefForwardSym; struct _fx_T2R9Ast__id_tR10Ast__loc_t CDefForwardTyp; struct _fx_rR18C_form__cdefenum_t_data_t* CDefEnum; struct _fx_rR23C_form__cdefinterface_t_data_t* CDefInterface; struct _fx_rR19C_form__cdefmacro_t_data_t* CMacroDef; struct _fx_T2R9Ast__id_tR10Ast__loc_t CMacroUndef; struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t CMacroIf; struct _fx_T2SR10Ast__loc_t CMacroInclude; struct _fx_T2SR10Ast__loc_t CMacroPragma; } u; } _fx_N15C_form__cstmt_t_data_t, *_fx_N15C_form__cstmt_t; typedef struct _fx_R17C_form__cmodule_t { struct _fx_R9Ast__id_t cmod_name; fx_str_t cmod_cname; struct _fx_LN15C_form__cstmt_t_data_t* cmod_ccode; bool cmod_main; bool cmod_recompile; bool cmod_skip; struct _fx_R14Ast__pragmas_t cmod_pragmas; } _fx_R17C_form__cmodule_t; typedef struct _fx_LR17C_form__cmodule_t_data_t { int_ rc; struct _fx_LR17C_form__cmodule_t_data_t* tl; struct _fx_R17C_form__cmodule_t hd; } _fx_LR17C_form__cmodule_t_data_t, *_fx_LR17C_form__cmodule_t; typedef struct _fx_N20Compiler__msgcolor_t { int tag; } _fx_N20Compiler__msgcolor_t; typedef struct _fx_T2LN14Lexer__token_tB { struct _fx_LN14Lexer__token_t_data_t* t0; bool t1; } _fx_T2LN14Lexer__token_tB; typedef struct _fx_T2SB { fx_str_t t0; bool t1; } _fx_T2SB; typedef struct _fx_LT2SB_data_t { int_ rc; struct _fx_LT2SB_data_t* tl; struct _fx_T2SB hd; } _fx_LT2SB_data_t, *_fx_LT2SB; typedef struct _fx_T2SLS { fx_str_t t0; struct _fx_LS_data_t* t1; } _fx_T2SLS; typedef struct _fx_Ta2LS { struct _fx_LS_data_t* t0; struct _fx_LS_data_t* t1; } _fx_Ta2LS; typedef struct _fx_T2iLi { int_ t0; struct _fx_Li_data_t* t1; } _fx_T2iLi; typedef struct _fx_LT2iLi_data_t { int_ rc; struct _fx_LT2iLi_data_t* tl; struct _fx_T2iLi hd; } _fx_LT2iLi_data_t, *_fx_LT2iLi; typedef struct _fx_rLi_data_t { int_ rc; struct _fx_Li_data_t* data; } _fx_rLi_data_t, *_fx_rLi; typedef struct _fx_T3BBS { bool t0; bool t1; fx_str_t t2; } _fx_T3BBS; typedef struct _fx_T2LR17K_form__kmodule_tB { struct _fx_LR17K_form__kmodule_t_data_t* t0; bool t1; } _fx_T2LR17K_form__kmodule_tB; typedef struct _fx_T2LR17C_form__cmodule_tB { struct _fx_LR17C_form__cmodule_t_data_t* t0; bool t1; } _fx_T2LR17C_form__cmodule_tB; typedef struct _fx_Ta9S { fx_str_t t0; fx_str_t t1; fx_str_t t2; fx_str_t t3; fx_str_t t4; fx_str_t t5; fx_str_t t6; fx_str_t t7; fx_str_t t8; } _fx_Ta9S; typedef struct _fx_Ta2S { fx_str_t t0; fx_str_t t1; } _fx_Ta2S; typedef struct _fx_Ta3S { fx_str_t t0; fx_str_t t1; fx_str_t t2; } _fx_Ta3S; typedef struct _fx_Ta4S { fx_str_t t0; fx_str_t t1; fx_str_t t2; fx_str_t t3; } _fx_Ta4S; typedef struct _fx_T5BBLSBS { bool t0; bool t1; struct _fx_LS_data_t* t2; bool t3; fx_str_t t4; } _fx_T5BBLSBS; typedef struct _fx_T5BBLSBLS { bool t0; bool t1; struct _fx_LS_data_t* t2; bool t3; struct _fx_LS_data_t* t4; } _fx_T5BBLSBLS; typedef struct { int_ rc; int_ data; } _fx_E4Exit_data_t; typedef struct { int_ rc; fx_str_t data; } _fx_E4Fail_data_t; typedef struct { int_ rc; struct _fx_T2Ta2iS data; } _fx_E22LexerUtils__LexerError_data_t; typedef struct { int_ rc; struct _fx_T2R10Ast__loc_tS data; } _fx_E17Ast__CompileError_data_t; typedef struct { int_ rc; struct _fx_T2R10Ast__loc_tS data; } _fx_E18Parser__ParseError_data_t; static void _fx_free_Nt6option1N10Ast__typ_t(struct _fx_Nt6option1N10Ast__typ_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_N10Ast__typ_t(&(*dst)->u.Some); fx_free(*dst); } *dst = 0; } static void _fx_free_LS(struct _fx_LS_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LS, fx_free_str); } static int _fx_cons_LS(fx_str_t* hd, struct _fx_LS_data_t* tl, bool addref_tl, struct _fx_LS_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LS, fx_copy_str); } static void _fx_free_R18Options__options_t(struct _fx_R18Options__options_t* dst) { _fx_free_LS(&dst->app_args); fx_free_str(&dst->app_filename); fx_free_str(&dst->build_dir); fx_free_str(&dst->build_rootdir); fx_free_str(&dst->cflags); fx_free_str(&dst->clibs); fx_free_str(&dst->filename); _fx_free_LS(&dst->include_path); fx_free_str(&dst->output_name); } static void _fx_copy_R18Options__options_t(struct _fx_R18Options__options_t* src, struct _fx_R18Options__options_t* dst) { FX_COPY_PTR(src->app_args, &dst->app_args); fx_copy_str(&src->app_filename, &dst->app_filename); dst->arch64 = src->arch64; dst->force_rebuild = src->force_rebuild; fx_copy_str(&src->build_dir, &dst->build_dir); fx_copy_str(&src->build_rootdir, &dst->build_rootdir); fx_copy_str(&src->cflags, &dst->cflags); fx_copy_str(&src->clibs, &dst->clibs); dst->compile_by_cpp = src->compile_by_cpp; fx_copy_str(&src->filename, &dst->filename); dst->gen_c = src->gen_c; FX_COPY_PTR(src->include_path, &dst->include_path); dst->debug = src->debug; dst->optim_iters = src->optim_iters; dst->inline_thresh = src->inline_thresh; dst->enable_openmp = src->enable_openmp; dst->relax = src->relax; dst->use_preamble = src->use_preamble; dst->make_app = src->make_app; dst->optimize_level = src->optimize_level; fx_copy_str(&src->output_name, &dst->output_name); dst->print_ast0 = src->print_ast0; dst->print_ast = src->print_ast; dst->print_k0 = src->print_k0; dst->print_k = src->print_k; dst->print_tokens = src->print_tokens; dst->run_app = src->run_app; dst->verbose = src->verbose; dst->W_unused = src->W_unused; } static void _fx_make_R18Options__options_t( struct _fx_LS_data_t* r_app_args, fx_str_t* r_app_filename, bool r_arch64, bool r_force_rebuild, fx_str_t* r_build_dir, fx_str_t* r_build_rootdir, fx_str_t* r_cflags, fx_str_t* r_clibs, bool r_compile_by_cpp, fx_str_t* r_filename, bool r_gen_c, struct _fx_LS_data_t* r_include_path, bool r_debug, int_ r_optim_iters, int_ r_inline_thresh, bool r_enable_openmp, bool r_relax, bool r_use_preamble, bool r_make_app, int_ r_optimize_level, fx_str_t* r_output_name, bool r_print_ast0, bool r_print_ast, bool r_print_k0, bool r_print_k, bool r_print_tokens, bool r_run_app, bool r_verbose, bool r_W_unused, struct _fx_R18Options__options_t* fx_result) { FX_COPY_PTR(r_app_args, &fx_result->app_args); fx_copy_str(r_app_filename, &fx_result->app_filename); fx_result->arch64 = r_arch64; fx_result->force_rebuild = r_force_rebuild; fx_copy_str(r_build_dir, &fx_result->build_dir); fx_copy_str(r_build_rootdir, &fx_result->build_rootdir); fx_copy_str(r_cflags, &fx_result->cflags); fx_copy_str(r_clibs, &fx_result->clibs); fx_result->compile_by_cpp = r_compile_by_cpp; fx_copy_str(r_filename, &fx_result->filename); fx_result->gen_c = r_gen_c; FX_COPY_PTR(r_include_path, &fx_result->include_path); fx_result->debug = r_debug; fx_result->optim_iters = r_optim_iters; fx_result->inline_thresh = r_inline_thresh; fx_result->enable_openmp = r_enable_openmp; fx_result->relax = r_relax; fx_result->use_preamble = r_use_preamble; fx_result->make_app = r_make_app; fx_result->optimize_level = r_optimize_level; fx_copy_str(r_output_name, &fx_result->output_name); fx_result->print_ast0 = r_print_ast0; fx_result->print_ast = r_print_ast; fx_result->print_k0 = r_print_k0; fx_result->print_k = r_print_k; fx_result->print_tokens = r_print_tokens; fx_result->run_app = r_run_app; fx_result->verbose = r_verbose; fx_result->W_unused = r_W_unused; } static void _fx_free_T2Ta2iS(struct _fx_T2Ta2iS* dst) { fx_free_str(&dst->t1); } static void _fx_copy_T2Ta2iS(struct _fx_T2Ta2iS* src, struct _fx_T2Ta2iS* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_T2Ta2iS(struct _fx_Ta2i* t0, fx_str_t* t1, struct _fx_T2Ta2iS* fx_result) { fx_result->t0 = *t0; fx_copy_str(t1, &fx_result->t1); } static int _fx_cons_LN12Ast__scope_t( struct _fx_N12Ast__scope_t* hd, struct _fx_LN12Ast__scope_t_data_t* tl, bool addref_tl, struct _fx_LN12Ast__scope_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN12Ast__scope_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_R16Ast__val_flags_t(struct _fx_R16Ast__val_flags_t* dst) { fx_free_list_simple(&dst->val_flag_global); } static void _fx_copy_R16Ast__val_flags_t(struct _fx_R16Ast__val_flags_t* src, struct _fx_R16Ast__val_flags_t* dst) { dst->val_flag_arg = src->val_flag_arg; dst->val_flag_mutable = src->val_flag_mutable; dst->val_flag_temp = src->val_flag_temp; dst->val_flag_tempref = src->val_flag_tempref; dst->val_flag_private = src->val_flag_private; dst->val_flag_subarray = src->val_flag_subarray; dst->val_flag_instance = src->val_flag_instance; dst->val_flag_method = src->val_flag_method; dst->val_flag_ctor = src->val_flag_ctor; FX_COPY_PTR(src->val_flag_global, &dst->val_flag_global); } static void _fx_make_R16Ast__val_flags_t( bool r_val_flag_arg, bool r_val_flag_mutable, bool r_val_flag_temp, bool r_val_flag_tempref, bool r_val_flag_private, bool r_val_flag_subarray, bool r_val_flag_instance, struct _fx_T2R9Ast__id_ti* r_val_flag_method, int_ r_val_flag_ctor, struct _fx_LN12Ast__scope_t_data_t* r_val_flag_global, struct _fx_R16Ast__val_flags_t* fx_result) { fx_result->val_flag_arg = r_val_flag_arg; fx_result->val_flag_mutable = r_val_flag_mutable; fx_result->val_flag_temp = r_val_flag_temp; fx_result->val_flag_tempref = r_val_flag_tempref; fx_result->val_flag_private = r_val_flag_private; fx_result->val_flag_subarray = r_val_flag_subarray; fx_result->val_flag_instance = r_val_flag_instance; fx_result->val_flag_method = *r_val_flag_method; fx_result->val_flag_ctor = r_val_flag_ctor; FX_COPY_PTR(r_val_flag_global, &fx_result->val_flag_global); } static void _fx_free_T2R9Ast__id_tN14C_form__ctyp_t(struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* src, struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN14C_form__ctyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN14C_form__ctyp_t, _fx_free_T2R9Ast__id_tN14C_form__ctyp_t); } static int _fx_cons_LT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* hd, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN14C_form__ctyp_t, _fx_copy_T2R9Ast__id_tN14C_form__ctyp_t); } static void _fx_free_R23C_form__cdefinterface_t(struct _fx_R23C_form__cdefinterface_t* dst) { fx_free_str(&dst->ci_cname); _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(&dst->ci_all_methods); fx_free_list_simple(&dst->ci_scope); } static void _fx_copy_R23C_form__cdefinterface_t( struct _fx_R23C_form__cdefinterface_t* src, struct _fx_R23C_form__cdefinterface_t* dst) { dst->ci_name = src->ci_name; fx_copy_str(&src->ci_cname, &dst->ci_cname); dst->ci_id = src->ci_id; dst->ci_vtbl = src->ci_vtbl; dst->ci_base = src->ci_base; FX_COPY_PTR(src->ci_all_methods, &dst->ci_all_methods); FX_COPY_PTR(src->ci_scope, &dst->ci_scope); dst->ci_loc = src->ci_loc; } static void _fx_make_R23C_form__cdefinterface_t( struct _fx_R9Ast__id_t* r_ci_name, fx_str_t* r_ci_cname, struct _fx_R9Ast__id_t* r_ci_id, struct _fx_R9Ast__id_t* r_ci_vtbl, struct _fx_R9Ast__id_t* r_ci_base, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* r_ci_all_methods, struct _fx_LN12Ast__scope_t_data_t* r_ci_scope, struct _fx_R10Ast__loc_t* r_ci_loc, struct _fx_R23C_form__cdefinterface_t* fx_result) { fx_result->ci_name = *r_ci_name; fx_copy_str(r_ci_cname, &fx_result->ci_cname); fx_result->ci_id = *r_ci_id; fx_result->ci_vtbl = *r_ci_vtbl; fx_result->ci_base = *r_ci_base; FX_COPY_PTR(r_ci_all_methods, &fx_result->ci_all_methods); FX_COPY_PTR(r_ci_scope, &fx_result->ci_scope); fx_result->ci_loc = *r_ci_loc; } static void _fx_free_rR23C_form__cdefinterface_t(struct _fx_rR23C_form__cdefinterface_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR23C_form__cdefinterface_t, _fx_free_R23C_form__cdefinterface_t); } static int _fx_make_rR23C_form__cdefinterface_t( struct _fx_R23C_form__cdefinterface_t* arg, struct _fx_rR23C_form__cdefinterface_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR23C_form__cdefinterface_t, _fx_copy_R23C_form__cdefinterface_t); } static void _fx_free_LN15C_form__cstmt_t(struct _fx_LN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN15C_form__cstmt_t, _fx_free_N15C_form__cstmt_t); } static int _fx_cons_LN15C_form__cstmt_t( struct _fx_N15C_form__cstmt_t_data_t* hd, struct _fx_LN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN15C_form__cstmt_t, FX_COPY_PTR); } static int _fx_cons_LN19C_form__carg_attr_t( struct _fx_N19C_form__carg_attr_t* hd, struct _fx_LN19C_form__carg_attr_t_data_t* tl, bool addref_tl, struct _fx_LN19C_form__carg_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN19C_form__carg_attr_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); fx_free_list_simple(&dst->t2); } static void _fx_copy_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* src, struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); } static void _fx_make_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_LN19C_form__carg_attr_t_data_t* t2, struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); } static void _fx_free_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t, _fx_free_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t); } static int _fx_cons_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* hd, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* tl, bool addref_tl, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t, _fx_copy_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t); } static void _fx_free_R17C_form__cdeffun_t(struct _fx_R17C_form__cdeffun_t* dst) { fx_free_str(&dst->cf_cname); _fx_free_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(&dst->cf_args); _fx_free_N14C_form__ctyp_t(&dst->cf_rt); _fx_free_LN15C_form__cstmt_t(&dst->cf_body); fx_free_list_simple(&dst->cf_scope); } static void _fx_copy_R17C_form__cdeffun_t(struct _fx_R17C_form__cdeffun_t* src, struct _fx_R17C_form__cdeffun_t* dst) { dst->cf_name = src->cf_name; fx_copy_str(&src->cf_cname, &dst->cf_cname); FX_COPY_PTR(src->cf_args, &dst->cf_args); FX_COPY_PTR(src->cf_rt, &dst->cf_rt); FX_COPY_PTR(src->cf_body, &dst->cf_body); dst->cf_flags = src->cf_flags; FX_COPY_PTR(src->cf_scope, &dst->cf_scope); dst->cf_loc = src->cf_loc; } static void _fx_make_R17C_form__cdeffun_t( struct _fx_R9Ast__id_t* r_cf_name, fx_str_t* r_cf_cname, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* r_cf_args, struct _fx_N14C_form__ctyp_t_data_t* r_cf_rt, struct _fx_LN15C_form__cstmt_t_data_t* r_cf_body, struct _fx_R16Ast__fun_flags_t* r_cf_flags, struct _fx_LN12Ast__scope_t_data_t* r_cf_scope, struct _fx_R10Ast__loc_t* r_cf_loc, struct _fx_R17C_form__cdeffun_t* fx_result) { fx_result->cf_name = *r_cf_name; fx_copy_str(r_cf_cname, &fx_result->cf_cname); FX_COPY_PTR(r_cf_args, &fx_result->cf_args); FX_COPY_PTR(r_cf_rt, &fx_result->cf_rt); FX_COPY_PTR(r_cf_body, &fx_result->cf_body); fx_result->cf_flags = *r_cf_flags; FX_COPY_PTR(r_cf_scope, &fx_result->cf_scope); fx_result->cf_loc = *r_cf_loc; } static void _fx_free_rR17C_form__cdeffun_t(struct _fx_rR17C_form__cdeffun_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17C_form__cdeffun_t, _fx_free_R17C_form__cdeffun_t); } static int _fx_make_rR17C_form__cdeffun_t( struct _fx_R17C_form__cdeffun_t* arg, struct _fx_rR17C_form__cdeffun_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17C_form__cdeffun_t, _fx_copy_R17C_form__cdeffun_t); } static int _fx_cons_LR9Ast__id_t( struct _fx_R9Ast__id_t* hd, struct _fx_LR9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LR9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LR9Ast__id_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_R17C_form__ctprops_t(struct _fx_R17C_form__ctprops_t* dst) { fx_free_list_simple(&dst->ctp_make); } static void _fx_copy_R17C_form__ctprops_t(struct _fx_R17C_form__ctprops_t* src, struct _fx_R17C_form__ctprops_t* dst) { dst->ctp_scalar = src->ctp_scalar; dst->ctp_complex = src->ctp_complex; dst->ctp_ptr = src->ctp_ptr; dst->ctp_pass_by_ref = src->ctp_pass_by_ref; FX_COPY_PTR(src->ctp_make, &dst->ctp_make); dst->ctp_free = src->ctp_free; dst->ctp_copy = src->ctp_copy; } static void _fx_make_R17C_form__ctprops_t( bool r_ctp_scalar, bool r_ctp_complex, bool r_ctp_ptr, bool r_ctp_pass_by_ref, struct _fx_LR9Ast__id_t_data_t* r_ctp_make, struct _fx_Ta2R9Ast__id_t* r_ctp_free, struct _fx_Ta2R9Ast__id_t* r_ctp_copy, struct _fx_R17C_form__ctprops_t* fx_result) { fx_result->ctp_scalar = r_ctp_scalar; fx_result->ctp_complex = r_ctp_complex; fx_result->ctp_ptr = r_ctp_ptr; fx_result->ctp_pass_by_ref = r_ctp_pass_by_ref; FX_COPY_PTR(r_ctp_make, &fx_result->ctp_make); fx_result->ctp_free = *r_ctp_free; fx_result->ctp_copy = *r_ctp_copy; } static void _fx_free_R17C_form__cdeftyp_t(struct _fx_R17C_form__cdeftyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->ct_typ); fx_free_str(&dst->ct_cname); _fx_free_R17C_form__ctprops_t(&dst->ct_props); fx_free_list_simple(&dst->ct_ifaces); fx_free_list_simple(&dst->ct_scope); } static void _fx_copy_R17C_form__cdeftyp_t(struct _fx_R17C_form__cdeftyp_t* src, struct _fx_R17C_form__cdeftyp_t* dst) { dst->ct_name = src->ct_name; FX_COPY_PTR(src->ct_typ, &dst->ct_typ); fx_copy_str(&src->ct_cname, &dst->ct_cname); _fx_copy_R17C_form__ctprops_t(&src->ct_props, &dst->ct_props); dst->ct_data_start = src->ct_data_start; dst->ct_enum = src->ct_enum; FX_COPY_PTR(src->ct_ifaces, &dst->ct_ifaces); dst->ct_ifaces_id = src->ct_ifaces_id; FX_COPY_PTR(src->ct_scope, &dst->ct_scope); dst->ct_loc = src->ct_loc; } static void _fx_make_R17C_form__cdeftyp_t( struct _fx_R9Ast__id_t* r_ct_name, struct _fx_N14C_form__ctyp_t_data_t* r_ct_typ, fx_str_t* r_ct_cname, struct _fx_R17C_form__ctprops_t* r_ct_props, int_ r_ct_data_start, struct _fx_R9Ast__id_t* r_ct_enum, struct _fx_LR9Ast__id_t_data_t* r_ct_ifaces, struct _fx_R9Ast__id_t* r_ct_ifaces_id, struct _fx_LN12Ast__scope_t_data_t* r_ct_scope, struct _fx_R10Ast__loc_t* r_ct_loc, struct _fx_R17C_form__cdeftyp_t* fx_result) { fx_result->ct_name = *r_ct_name; FX_COPY_PTR(r_ct_typ, &fx_result->ct_typ); fx_copy_str(r_ct_cname, &fx_result->ct_cname); _fx_copy_R17C_form__ctprops_t(r_ct_props, &fx_result->ct_props); fx_result->ct_data_start = r_ct_data_start; fx_result->ct_enum = *r_ct_enum; FX_COPY_PTR(r_ct_ifaces, &fx_result->ct_ifaces); fx_result->ct_ifaces_id = *r_ct_ifaces_id; FX_COPY_PTR(r_ct_scope, &fx_result->ct_scope); fx_result->ct_loc = *r_ct_loc; } static void _fx_free_rR17C_form__cdeftyp_t(struct _fx_rR17C_form__cdeftyp_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17C_form__cdeftyp_t, _fx_free_R17C_form__cdeftyp_t); } static int _fx_make_rR17C_form__cdeftyp_t( struct _fx_R17C_form__cdeftyp_t* arg, struct _fx_rR17C_form__cdeftyp_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17C_form__cdeftyp_t, _fx_copy_R17C_form__cdeftyp_t); } static void _fx_free_Nt6option1N14C_form__cexp_t(struct _fx_Nt6option1N14C_form__cexp_t* dst) { switch (dst->tag) { case 2: _fx_free_N14C_form__cexp_t(&dst->u.Some); break; default: ; } dst->tag = 0; } static void _fx_copy_Nt6option1N14C_form__cexp_t( struct _fx_Nt6option1N14C_form__cexp_t* src, struct _fx_Nt6option1N14C_form__cexp_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: FX_COPY_PTR(src->u.Some, &dst->u.Some); break; default: dst->u = src->u; } } static void _fx_free_T2R9Ast__id_tNt6option1N14C_form__cexp_t(struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* dst) { _fx_free_Nt6option1N14C_form__cexp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* src, struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* dst) { dst->t0 = src->t0; _fx_copy_Nt6option1N14C_form__cexp_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_R9Ast__id_t* t0, struct _fx_Nt6option1N14C_form__cexp_t* t1, struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* fx_result) { fx_result->t0 = *t0; _fx_copy_Nt6option1N14C_form__cexp_t(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t, _fx_free_T2R9Ast__id_tNt6option1N14C_form__cexp_t); } static int _fx_cons_LT2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* hd, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t, _fx_copy_T2R9Ast__id_tNt6option1N14C_form__cexp_t); } static void _fx_free_R18C_form__cdefenum_t(struct _fx_R18C_form__cdefenum_t* dst) { _fx_free_LT2R9Ast__id_tNt6option1N14C_form__cexp_t(&dst->cenum_members); fx_free_str(&dst->cenum_cname); fx_free_list_simple(&dst->cenum_scope); } static void _fx_copy_R18C_form__cdefenum_t(struct _fx_R18C_form__cdefenum_t* src, struct _fx_R18C_form__cdefenum_t* dst) { dst->cenum_name = src->cenum_name; FX_COPY_PTR(src->cenum_members, &dst->cenum_members); fx_copy_str(&src->cenum_cname, &dst->cenum_cname); FX_COPY_PTR(src->cenum_scope, &dst->cenum_scope); dst->cenum_loc = src->cenum_loc; } static void _fx_make_R18C_form__cdefenum_t( struct _fx_R9Ast__id_t* r_cenum_name, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* r_cenum_members, fx_str_t* r_cenum_cname, struct _fx_LN12Ast__scope_t_data_t* r_cenum_scope, struct _fx_R10Ast__loc_t* r_cenum_loc, struct _fx_R18C_form__cdefenum_t* fx_result) { fx_result->cenum_name = *r_cenum_name; FX_COPY_PTR(r_cenum_members, &fx_result->cenum_members); fx_copy_str(r_cenum_cname, &fx_result->cenum_cname); FX_COPY_PTR(r_cenum_scope, &fx_result->cenum_scope); fx_result->cenum_loc = *r_cenum_loc; } static void _fx_free_rR18C_form__cdefenum_t(struct _fx_rR18C_form__cdefenum_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR18C_form__cdefenum_t, _fx_free_R18C_form__cdefenum_t); } static int _fx_make_rR18C_form__cdefenum_t( struct _fx_R18C_form__cdefenum_t* arg, struct _fx_rR18C_form__cdefenum_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR18C_form__cdefenum_t, _fx_copy_R18C_form__cdefenum_t); } static void _fx_free_R19C_form__cdefmacro_t(struct _fx_R19C_form__cdefmacro_t* dst) { fx_free_str(&dst->cm_cname); fx_free_list_simple(&dst->cm_args); _fx_free_LN15C_form__cstmt_t(&dst->cm_body); fx_free_list_simple(&dst->cm_scope); } static void _fx_copy_R19C_form__cdefmacro_t(struct _fx_R19C_form__cdefmacro_t* src, struct _fx_R19C_form__cdefmacro_t* dst) { dst->cm_name = src->cm_name; fx_copy_str(&src->cm_cname, &dst->cm_cname); FX_COPY_PTR(src->cm_args, &dst->cm_args); FX_COPY_PTR(src->cm_body, &dst->cm_body); FX_COPY_PTR(src->cm_scope, &dst->cm_scope); dst->cm_loc = src->cm_loc; } static void _fx_make_R19C_form__cdefmacro_t( struct _fx_R9Ast__id_t* r_cm_name, fx_str_t* r_cm_cname, struct _fx_LR9Ast__id_t_data_t* r_cm_args, struct _fx_LN15C_form__cstmt_t_data_t* r_cm_body, struct _fx_LN12Ast__scope_t_data_t* r_cm_scope, struct _fx_R10Ast__loc_t* r_cm_loc, struct _fx_R19C_form__cdefmacro_t* fx_result) { fx_result->cm_name = *r_cm_name; fx_copy_str(r_cm_cname, &fx_result->cm_cname); FX_COPY_PTR(r_cm_args, &fx_result->cm_args); FX_COPY_PTR(r_cm_body, &fx_result->cm_body); FX_COPY_PTR(r_cm_scope, &fx_result->cm_scope); fx_result->cm_loc = *r_cm_loc; } static void _fx_free_rR19C_form__cdefmacro_t(struct _fx_rR19C_form__cdefmacro_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR19C_form__cdefmacro_t, _fx_free_R19C_form__cdefmacro_t); } static int _fx_make_rR19C_form__cdefmacro_t( struct _fx_R19C_form__cdefmacro_t* arg, struct _fx_rR19C_form__cdefmacro_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR19C_form__cdefmacro_t, _fx_copy_R19C_form__cdefmacro_t); } static void _fx_free_T2R9Ast__id_tN14K_form__ktyp_t(struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* dst) { _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* src, struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN14K_form__ktyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN14K_form__ktyp_t, _fx_free_T2R9Ast__id_tN14K_form__ktyp_t); } static int _fx_cons_LT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* hd, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN14K_form__ktyp_t, _fx_copy_T2R9Ast__id_tN14K_form__ktyp_t); } static void _fx_free_R23K_form__kdefinterface_t(struct _fx_R23K_form__kdefinterface_t* dst) { fx_free_str(&dst->ki_cname); _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->ki_all_methods); fx_free_list_simple(&dst->ki_scope); } static void _fx_copy_R23K_form__kdefinterface_t( struct _fx_R23K_form__kdefinterface_t* src, struct _fx_R23K_form__kdefinterface_t* dst) { dst->ki_name = src->ki_name; dst->ki_base = src->ki_base; fx_copy_str(&src->ki_cname, &dst->ki_cname); dst->ki_id = src->ki_id; FX_COPY_PTR(src->ki_all_methods, &dst->ki_all_methods); FX_COPY_PTR(src->ki_scope, &dst->ki_scope); dst->ki_loc = src->ki_loc; } static void _fx_make_R23K_form__kdefinterface_t( struct _fx_R9Ast__id_t* r_ki_name, struct _fx_R9Ast__id_t* r_ki_base, fx_str_t* r_ki_cname, struct _fx_R9Ast__id_t* r_ki_id, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* r_ki_all_methods, struct _fx_LN12Ast__scope_t_data_t* r_ki_scope, struct _fx_R10Ast__loc_t* r_ki_loc, struct _fx_R23K_form__kdefinterface_t* fx_result) { fx_result->ki_name = *r_ki_name; fx_result->ki_base = *r_ki_base; fx_copy_str(r_ki_cname, &fx_result->ki_cname); fx_result->ki_id = *r_ki_id; FX_COPY_PTR(r_ki_all_methods, &fx_result->ki_all_methods); FX_COPY_PTR(r_ki_scope, &fx_result->ki_scope); fx_result->ki_loc = *r_ki_loc; } static void _fx_free_rR23K_form__kdefinterface_t(struct _fx_rR23K_form__kdefinterface_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR23K_form__kdefinterface_t, _fx_free_R23K_form__kdefinterface_t); } static int _fx_make_rR23K_form__kdefinterface_t( struct _fx_R23K_form__kdefinterface_t* arg, struct _fx_rR23K_form__kdefinterface_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR23K_form__kdefinterface_t, _fx_copy_R23K_form__kdefinterface_t); } static void _fx_free_R17K_form__kdeffun_t(struct _fx_R17K_form__kdeffun_t* dst) { fx_free_str(&dst->kf_cname); fx_free_list_simple(&dst->kf_params); _fx_free_N14K_form__ktyp_t(&dst->kf_rt); _fx_free_N14K_form__kexp_t(&dst->kf_body); fx_free_list_simple(&dst->kf_scope); } static void _fx_copy_R17K_form__kdeffun_t(struct _fx_R17K_form__kdeffun_t* src, struct _fx_R17K_form__kdeffun_t* dst) { dst->kf_name = src->kf_name; fx_copy_str(&src->kf_cname, &dst->kf_cname); FX_COPY_PTR(src->kf_params, &dst->kf_params); FX_COPY_PTR(src->kf_rt, &dst->kf_rt); FX_COPY_PTR(src->kf_body, &dst->kf_body); dst->kf_flags = src->kf_flags; dst->kf_closure = src->kf_closure; FX_COPY_PTR(src->kf_scope, &dst->kf_scope); dst->kf_loc = src->kf_loc; } static void _fx_make_R17K_form__kdeffun_t( struct _fx_R9Ast__id_t* r_kf_name, fx_str_t* r_kf_cname, struct _fx_LR9Ast__id_t_data_t* r_kf_params, struct _fx_N14K_form__ktyp_t_data_t* r_kf_rt, struct _fx_N14K_form__kexp_t_data_t* r_kf_body, struct _fx_R16Ast__fun_flags_t* r_kf_flags, struct _fx_R25K_form__kdefclosureinfo_t* r_kf_closure, struct _fx_LN12Ast__scope_t_data_t* r_kf_scope, struct _fx_R10Ast__loc_t* r_kf_loc, struct _fx_R17K_form__kdeffun_t* fx_result) { fx_result->kf_name = *r_kf_name; fx_copy_str(r_kf_cname, &fx_result->kf_cname); FX_COPY_PTR(r_kf_params, &fx_result->kf_params); FX_COPY_PTR(r_kf_rt, &fx_result->kf_rt); FX_COPY_PTR(r_kf_body, &fx_result->kf_body); fx_result->kf_flags = *r_kf_flags; fx_result->kf_closure = *r_kf_closure; FX_COPY_PTR(r_kf_scope, &fx_result->kf_scope); fx_result->kf_loc = *r_kf_loc; } static void _fx_free_rR17K_form__kdeffun_t(struct _fx_rR17K_form__kdeffun_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17K_form__kdeffun_t, _fx_free_R17K_form__kdeffun_t); } static int _fx_make_rR17K_form__kdeffun_t( struct _fx_R17K_form__kdeffun_t* arg, struct _fx_rR17K_form__kdeffun_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17K_form__kdeffun_t, _fx_copy_R17K_form__kdeffun_t); } static void _fx_free_R17K_form__kdefexn_t(struct _fx_R17K_form__kdefexn_t* dst) { fx_free_str(&dst->ke_cname); fx_free_str(&dst->ke_base_cname); _fx_free_N14K_form__ktyp_t(&dst->ke_typ); fx_free_list_simple(&dst->ke_scope); } static void _fx_copy_R17K_form__kdefexn_t(struct _fx_R17K_form__kdefexn_t* src, struct _fx_R17K_form__kdefexn_t* dst) { dst->ke_name = src->ke_name; fx_copy_str(&src->ke_cname, &dst->ke_cname); fx_copy_str(&src->ke_base_cname, &dst->ke_base_cname); FX_COPY_PTR(src->ke_typ, &dst->ke_typ); dst->ke_std = src->ke_std; dst->ke_tag = src->ke_tag; dst->ke_make = src->ke_make; FX_COPY_PTR(src->ke_scope, &dst->ke_scope); dst->ke_loc = src->ke_loc; } static void _fx_make_R17K_form__kdefexn_t( struct _fx_R9Ast__id_t* r_ke_name, fx_str_t* r_ke_cname, fx_str_t* r_ke_base_cname, struct _fx_N14K_form__ktyp_t_data_t* r_ke_typ, bool r_ke_std, struct _fx_R9Ast__id_t* r_ke_tag, struct _fx_R9Ast__id_t* r_ke_make, struct _fx_LN12Ast__scope_t_data_t* r_ke_scope, struct _fx_R10Ast__loc_t* r_ke_loc, struct _fx_R17K_form__kdefexn_t* fx_result) { fx_result->ke_name = *r_ke_name; fx_copy_str(r_ke_cname, &fx_result->ke_cname); fx_copy_str(r_ke_base_cname, &fx_result->ke_base_cname); FX_COPY_PTR(r_ke_typ, &fx_result->ke_typ); fx_result->ke_std = r_ke_std; fx_result->ke_tag = *r_ke_tag; fx_result->ke_make = *r_ke_make; FX_COPY_PTR(r_ke_scope, &fx_result->ke_scope); fx_result->ke_loc = *r_ke_loc; } static void _fx_free_rR17K_form__kdefexn_t(struct _fx_rR17K_form__kdefexn_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17K_form__kdefexn_t, _fx_free_R17K_form__kdefexn_t); } static int _fx_make_rR17K_form__kdefexn_t( struct _fx_R17K_form__kdefexn_t* arg, struct _fx_rR17K_form__kdefexn_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17K_form__kdefexn_t, _fx_copy_R17K_form__kdefexn_t); } static void _fx_free_LN14K_form__ktyp_t(struct _fx_LN14K_form__ktyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14K_form__ktyp_t, _fx_free_N14K_form__ktyp_t); } static int _fx_cons_LN14K_form__ktyp_t( struct _fx_N14K_form__ktyp_t_data_t* hd, struct _fx_LN14K_form__ktyp_t_data_t* tl, bool addref_tl, struct _fx_LN14K_form__ktyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14K_form__ktyp_t, FX_COPY_PTR); } static void _fx_free_T2R9Ast__id_tLR9Ast__id_t(struct _fx_T2R9Ast__id_tLR9Ast__id_t* dst) { fx_free_list_simple(&dst->t1); } static void _fx_copy_T2R9Ast__id_tLR9Ast__id_t( struct _fx_T2R9Ast__id_tLR9Ast__id_t* src, struct _fx_T2R9Ast__id_tLR9Ast__id_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tLR9Ast__id_t( struct _fx_R9Ast__id_t* t0, struct _fx_LR9Ast__id_t_data_t* t1, struct _fx_T2R9Ast__id_tLR9Ast__id_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tLR9Ast__id_t(struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tLR9Ast__id_t, _fx_free_T2R9Ast__id_tLR9Ast__id_t); } static int _fx_cons_LT2R9Ast__id_tLR9Ast__id_t( struct _fx_T2R9Ast__id_tLR9Ast__id_t* hd, struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tLR9Ast__id_t, _fx_copy_T2R9Ast__id_tLR9Ast__id_t); } static void _fx_free_R21K_form__kdefvariant_t(struct _fx_R21K_form__kdefvariant_t* dst) { fx_free_str(&dst->kvar_cname); _fx_free_LN14K_form__ktyp_t(&dst->kvar_targs); _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->kvar_cases); fx_free_list_simple(&dst->kvar_ctors); _fx_free_LT2R9Ast__id_tLR9Ast__id_t(&dst->kvar_ifaces); fx_free_list_simple(&dst->kvar_scope); } static void _fx_copy_R21K_form__kdefvariant_t( struct _fx_R21K_form__kdefvariant_t* src, struct _fx_R21K_form__kdefvariant_t* dst) { dst->kvar_name = src->kvar_name; fx_copy_str(&src->kvar_cname, &dst->kvar_cname); dst->kvar_proto = src->kvar_proto; dst->kvar_props = src->kvar_props; FX_COPY_PTR(src->kvar_targs, &dst->kvar_targs); FX_COPY_PTR(src->kvar_cases, &dst->kvar_cases); FX_COPY_PTR(src->kvar_ctors, &dst->kvar_ctors); dst->kvar_flags = src->kvar_flags; FX_COPY_PTR(src->kvar_ifaces, &dst->kvar_ifaces); FX_COPY_PTR(src->kvar_scope, &dst->kvar_scope); dst->kvar_loc = src->kvar_loc; } static void _fx_make_R21K_form__kdefvariant_t( struct _fx_R9Ast__id_t* r_kvar_name, fx_str_t* r_kvar_cname, struct _fx_R9Ast__id_t* r_kvar_proto, struct _fx_Nt6option1R17K_form__ktprops_t* r_kvar_props, struct _fx_LN14K_form__ktyp_t_data_t* r_kvar_targs, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* r_kvar_cases, struct _fx_LR9Ast__id_t_data_t* r_kvar_ctors, struct _fx_R16Ast__var_flags_t* r_kvar_flags, struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* r_kvar_ifaces, struct _fx_LN12Ast__scope_t_data_t* r_kvar_scope, struct _fx_R10Ast__loc_t* r_kvar_loc, struct _fx_R21K_form__kdefvariant_t* fx_result) { fx_result->kvar_name = *r_kvar_name; fx_copy_str(r_kvar_cname, &fx_result->kvar_cname); fx_result->kvar_proto = *r_kvar_proto; fx_result->kvar_props = *r_kvar_props; FX_COPY_PTR(r_kvar_targs, &fx_result->kvar_targs); FX_COPY_PTR(r_kvar_cases, &fx_result->kvar_cases); FX_COPY_PTR(r_kvar_ctors, &fx_result->kvar_ctors); fx_result->kvar_flags = *r_kvar_flags; FX_COPY_PTR(r_kvar_ifaces, &fx_result->kvar_ifaces); FX_COPY_PTR(r_kvar_scope, &fx_result->kvar_scope); fx_result->kvar_loc = *r_kvar_loc; } static void _fx_free_rR21K_form__kdefvariant_t(struct _fx_rR21K_form__kdefvariant_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR21K_form__kdefvariant_t, _fx_free_R21K_form__kdefvariant_t); } static int _fx_make_rR21K_form__kdefvariant_t( struct _fx_R21K_form__kdefvariant_t* arg, struct _fx_rR21K_form__kdefvariant_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR21K_form__kdefvariant_t, _fx_copy_R21K_form__kdefvariant_t); } static void _fx_free_R17K_form__kdeftyp_t(struct _fx_R17K_form__kdeftyp_t* dst) { fx_free_str(&dst->kt_cname); _fx_free_LN14K_form__ktyp_t(&dst->kt_targs); _fx_free_N14K_form__ktyp_t(&dst->kt_typ); fx_free_list_simple(&dst->kt_scope); } static void _fx_copy_R17K_form__kdeftyp_t(struct _fx_R17K_form__kdeftyp_t* src, struct _fx_R17K_form__kdeftyp_t* dst) { dst->kt_name = src->kt_name; fx_copy_str(&src->kt_cname, &dst->kt_cname); dst->kt_proto = src->kt_proto; dst->kt_props = src->kt_props; FX_COPY_PTR(src->kt_targs, &dst->kt_targs); FX_COPY_PTR(src->kt_typ, &dst->kt_typ); FX_COPY_PTR(src->kt_scope, &dst->kt_scope); dst->kt_loc = src->kt_loc; } static void _fx_make_R17K_form__kdeftyp_t( struct _fx_R9Ast__id_t* r_kt_name, fx_str_t* r_kt_cname, struct _fx_R9Ast__id_t* r_kt_proto, struct _fx_Nt6option1R17K_form__ktprops_t* r_kt_props, struct _fx_LN14K_form__ktyp_t_data_t* r_kt_targs, struct _fx_N14K_form__ktyp_t_data_t* r_kt_typ, struct _fx_LN12Ast__scope_t_data_t* r_kt_scope, struct _fx_R10Ast__loc_t* r_kt_loc, struct _fx_R17K_form__kdeftyp_t* fx_result) { fx_result->kt_name = *r_kt_name; fx_copy_str(r_kt_cname, &fx_result->kt_cname); fx_result->kt_proto = *r_kt_proto; fx_result->kt_props = *r_kt_props; FX_COPY_PTR(r_kt_targs, &fx_result->kt_targs); FX_COPY_PTR(r_kt_typ, &fx_result->kt_typ); FX_COPY_PTR(r_kt_scope, &fx_result->kt_scope); fx_result->kt_loc = *r_kt_loc; } static void _fx_free_rR17K_form__kdeftyp_t(struct _fx_rR17K_form__kdeftyp_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17K_form__kdeftyp_t, _fx_free_R17K_form__kdeftyp_t); } static int _fx_make_rR17K_form__kdeftyp_t( struct _fx_R17K_form__kdeftyp_t* arg, struct _fx_rR17K_form__kdeftyp_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17K_form__kdeftyp_t, _fx_copy_R17K_form__kdeftyp_t); } static void _fx_free_R25K_form__kdefclosurevars_t(struct _fx_R25K_form__kdefclosurevars_t* dst) { fx_free_str(&dst->kcv_cname); _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->kcv_freevars); fx_free_list_simple(&dst->kcv_orig_freevars); fx_free_list_simple(&dst->kcv_scope); } static void _fx_copy_R25K_form__kdefclosurevars_t( struct _fx_R25K_form__kdefclosurevars_t* src, struct _fx_R25K_form__kdefclosurevars_t* dst) { dst->kcv_name = src->kcv_name; fx_copy_str(&src->kcv_cname, &dst->kcv_cname); FX_COPY_PTR(src->kcv_freevars, &dst->kcv_freevars); FX_COPY_PTR(src->kcv_orig_freevars, &dst->kcv_orig_freevars); FX_COPY_PTR(src->kcv_scope, &dst->kcv_scope); dst->kcv_loc = src->kcv_loc; } static void _fx_make_R25K_form__kdefclosurevars_t( struct _fx_R9Ast__id_t* r_kcv_name, fx_str_t* r_kcv_cname, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* r_kcv_freevars, struct _fx_LR9Ast__id_t_data_t* r_kcv_orig_freevars, struct _fx_LN12Ast__scope_t_data_t* r_kcv_scope, struct _fx_R10Ast__loc_t* r_kcv_loc, struct _fx_R25K_form__kdefclosurevars_t* fx_result) { fx_result->kcv_name = *r_kcv_name; fx_copy_str(r_kcv_cname, &fx_result->kcv_cname); FX_COPY_PTR(r_kcv_freevars, &fx_result->kcv_freevars); FX_COPY_PTR(r_kcv_orig_freevars, &fx_result->kcv_orig_freevars); FX_COPY_PTR(r_kcv_scope, &fx_result->kcv_scope); fx_result->kcv_loc = *r_kcv_loc; } static void _fx_free_rR25K_form__kdefclosurevars_t(struct _fx_rR25K_form__kdefclosurevars_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR25K_form__kdefclosurevars_t, _fx_free_R25K_form__kdefclosurevars_t); } static int _fx_make_rR25K_form__kdefclosurevars_t( struct _fx_R25K_form__kdefclosurevars_t* arg, struct _fx_rR25K_form__kdefclosurevars_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR25K_form__kdefclosurevars_t, _fx_copy_R25K_form__kdefclosurevars_t); } static void _fx_free_Nt6option1N10Ast__exp_t(struct _fx_Nt6option1N10Ast__exp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_N10Ast__exp_t(&(*dst)->u.Some); fx_free(*dst); } *dst = 0; } static void _fx_free_R13Ast__defval_t(struct _fx_R13Ast__defval_t* dst) { _fx_free_N10Ast__typ_t(&dst->dv_typ); _fx_free_R16Ast__val_flags_t(&dst->dv_flags); fx_free_list_simple(&dst->dv_scope); } static void _fx_copy_R13Ast__defval_t(struct _fx_R13Ast__defval_t* src, struct _fx_R13Ast__defval_t* dst) { dst->dv_name = src->dv_name; FX_COPY_PTR(src->dv_typ, &dst->dv_typ); _fx_copy_R16Ast__val_flags_t(&src->dv_flags, &dst->dv_flags); FX_COPY_PTR(src->dv_scope, &dst->dv_scope); dst->dv_loc = src->dv_loc; } static void _fx_make_R13Ast__defval_t( struct _fx_R9Ast__id_t* r_dv_name, struct _fx_N10Ast__typ_t_data_t* r_dv_typ, struct _fx_R16Ast__val_flags_t* r_dv_flags, struct _fx_LN12Ast__scope_t_data_t* r_dv_scope, struct _fx_R10Ast__loc_t* r_dv_loc, struct _fx_R13Ast__defval_t* fx_result) { fx_result->dv_name = *r_dv_name; FX_COPY_PTR(r_dv_typ, &fx_result->dv_typ); _fx_copy_R16Ast__val_flags_t(r_dv_flags, &fx_result->dv_flags); FX_COPY_PTR(r_dv_scope, &fx_result->dv_scope); fx_result->dv_loc = *r_dv_loc; } static void _fx_free_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* dst) { _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(&dst->root); fx_free_fp(&dst->cmp); } static void _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* src, struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* dst) { FX_COPY_PTR(src->root, &dst->root); FX_COPY_FP(&src->cmp, &dst->cmp); } static void _fx_make_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* r_root, struct _fx_FPi2R9Ast__id_tR9Ast__id_t* r_cmp, struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* fx_result) { FX_COPY_PTR(r_root, &fx_result->root); FX_COPY_FP(r_cmp, &fx_result->cmp); } static void _fx_free_LN10Ast__pat_t(struct _fx_LN10Ast__pat_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN10Ast__pat_t, _fx_free_N10Ast__pat_t); } static int _fx_cons_LN10Ast__pat_t( struct _fx_N10Ast__pat_t_data_t* hd, struct _fx_LN10Ast__pat_t_data_t* tl, bool addref_tl, struct _fx_LN10Ast__pat_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN10Ast__pat_t, FX_COPY_PTR); } static void _fx_free_rLR9Ast__id_t(struct _fx_rLR9Ast__id_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rLR9Ast__id_t, fx_free_list_simple); } static int _fx_make_rLR9Ast__id_t(struct _fx_LR9Ast__id_t_data_t* arg, struct _fx_rLR9Ast__id_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rLR9Ast__id_t, FX_COPY_PTR); } static void _fx_free_R13Ast__deffun_t(struct _fx_R13Ast__deffun_t* dst) { fx_free_list_simple(&dst->df_templ_args); _fx_free_LN10Ast__pat_t(&dst->df_args); _fx_free_N10Ast__typ_t(&dst->df_typ); _fx_free_N10Ast__exp_t(&dst->df_body); fx_free_list_simple(&dst->df_scope); _fx_free_rLR9Ast__id_t(&dst->df_templ_inst); _fx_free_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&dst->df_env); } static void _fx_copy_R13Ast__deffun_t(struct _fx_R13Ast__deffun_t* src, struct _fx_R13Ast__deffun_t* dst) { dst->df_name = src->df_name; FX_COPY_PTR(src->df_templ_args, &dst->df_templ_args); FX_COPY_PTR(src->df_args, &dst->df_args); FX_COPY_PTR(src->df_typ, &dst->df_typ); FX_COPY_PTR(src->df_body, &dst->df_body); dst->df_flags = src->df_flags; FX_COPY_PTR(src->df_scope, &dst->df_scope); dst->df_loc = src->df_loc; FX_COPY_PTR(src->df_templ_inst, &dst->df_templ_inst); _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&src->df_env, &dst->df_env); } static void _fx_make_R13Ast__deffun_t( struct _fx_R9Ast__id_t* r_df_name, struct _fx_LR9Ast__id_t_data_t* r_df_templ_args, struct _fx_LN10Ast__pat_t_data_t* r_df_args, struct _fx_N10Ast__typ_t_data_t* r_df_typ, struct _fx_N10Ast__exp_t_data_t* r_df_body, struct _fx_R16Ast__fun_flags_t* r_df_flags, struct _fx_LN12Ast__scope_t_data_t* r_df_scope, struct _fx_R10Ast__loc_t* r_df_loc, struct _fx_rLR9Ast__id_t_data_t* r_df_templ_inst, struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* r_df_env, struct _fx_R13Ast__deffun_t* fx_result) { fx_result->df_name = *r_df_name; FX_COPY_PTR(r_df_templ_args, &fx_result->df_templ_args); FX_COPY_PTR(r_df_args, &fx_result->df_args); FX_COPY_PTR(r_df_typ, &fx_result->df_typ); FX_COPY_PTR(r_df_body, &fx_result->df_body); fx_result->df_flags = *r_df_flags; FX_COPY_PTR(r_df_scope, &fx_result->df_scope); fx_result->df_loc = *r_df_loc; FX_COPY_PTR(r_df_templ_inst, &fx_result->df_templ_inst); _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(r_df_env, &fx_result->df_env); } static void _fx_free_rR13Ast__deffun_t(struct _fx_rR13Ast__deffun_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR13Ast__deffun_t, _fx_free_R13Ast__deffun_t); } static int _fx_make_rR13Ast__deffun_t(struct _fx_R13Ast__deffun_t* arg, struct _fx_rR13Ast__deffun_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR13Ast__deffun_t, _fx_copy_R13Ast__deffun_t); } static void _fx_free_R13Ast__defexn_t(struct _fx_R13Ast__defexn_t* dst) { _fx_free_N10Ast__typ_t(&dst->dexn_typ); fx_free_list_simple(&dst->dexn_scope); } static void _fx_copy_R13Ast__defexn_t(struct _fx_R13Ast__defexn_t* src, struct _fx_R13Ast__defexn_t* dst) { dst->dexn_name = src->dexn_name; FX_COPY_PTR(src->dexn_typ, &dst->dexn_typ); FX_COPY_PTR(src->dexn_scope, &dst->dexn_scope); dst->dexn_loc = src->dexn_loc; } static void _fx_make_R13Ast__defexn_t( struct _fx_R9Ast__id_t* r_dexn_name, struct _fx_N10Ast__typ_t_data_t* r_dexn_typ, struct _fx_LN12Ast__scope_t_data_t* r_dexn_scope, struct _fx_R10Ast__loc_t* r_dexn_loc, struct _fx_R13Ast__defexn_t* fx_result) { fx_result->dexn_name = *r_dexn_name; FX_COPY_PTR(r_dexn_typ, &fx_result->dexn_typ); FX_COPY_PTR(r_dexn_scope, &fx_result->dexn_scope); fx_result->dexn_loc = *r_dexn_loc; } static void _fx_free_rR13Ast__defexn_t(struct _fx_rR13Ast__defexn_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR13Ast__defexn_t, _fx_free_R13Ast__defexn_t); } static int _fx_make_rR13Ast__defexn_t(struct _fx_R13Ast__defexn_t* arg, struct _fx_rR13Ast__defexn_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR13Ast__defexn_t, _fx_copy_R13Ast__defexn_t); } static void _fx_free_R13Ast__deftyp_t(struct _fx_R13Ast__deftyp_t* dst) { fx_free_list_simple(&dst->dt_templ_args); _fx_free_N10Ast__typ_t(&dst->dt_typ); fx_free_list_simple(&dst->dt_scope); } static void _fx_copy_R13Ast__deftyp_t(struct _fx_R13Ast__deftyp_t* src, struct _fx_R13Ast__deftyp_t* dst) { dst->dt_name = src->dt_name; FX_COPY_PTR(src->dt_templ_args, &dst->dt_templ_args); FX_COPY_PTR(src->dt_typ, &dst->dt_typ); dst->dt_finalized = src->dt_finalized; FX_COPY_PTR(src->dt_scope, &dst->dt_scope); dst->dt_loc = src->dt_loc; } static void _fx_make_R13Ast__deftyp_t( struct _fx_R9Ast__id_t* r_dt_name, struct _fx_LR9Ast__id_t_data_t* r_dt_templ_args, struct _fx_N10Ast__typ_t_data_t* r_dt_typ, bool r_dt_finalized, struct _fx_LN12Ast__scope_t_data_t* r_dt_scope, struct _fx_R10Ast__loc_t* r_dt_loc, struct _fx_R13Ast__deftyp_t* fx_result) { fx_result->dt_name = *r_dt_name; FX_COPY_PTR(r_dt_templ_args, &fx_result->dt_templ_args); FX_COPY_PTR(r_dt_typ, &fx_result->dt_typ); fx_result->dt_finalized = r_dt_finalized; FX_COPY_PTR(r_dt_scope, &fx_result->dt_scope); fx_result->dt_loc = *r_dt_loc; } static void _fx_free_rR13Ast__deftyp_t(struct _fx_rR13Ast__deftyp_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR13Ast__deftyp_t, _fx_free_R13Ast__deftyp_t); } static int _fx_make_rR13Ast__deftyp_t(struct _fx_R13Ast__deftyp_t* arg, struct _fx_rR13Ast__deftyp_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR13Ast__deftyp_t, _fx_copy_R13Ast__deftyp_t); } static void _fx_free_T2R9Ast__id_tN10Ast__typ_t(struct _fx_T2R9Ast__id_tN10Ast__typ_t* dst) { _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN10Ast__typ_t( struct _fx_T2R9Ast__id_tN10Ast__typ_t* src, struct _fx_T2R9Ast__id_tN10Ast__typ_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN10Ast__typ_t( struct _fx_R9Ast__id_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_T2R9Ast__id_tN10Ast__typ_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN10Ast__typ_t(struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__typ_t, _fx_free_T2R9Ast__id_tN10Ast__typ_t); } static int _fx_cons_LT2R9Ast__id_tN10Ast__typ_t( struct _fx_T2R9Ast__id_tN10Ast__typ_t* hd, struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__typ_t, _fx_copy_T2R9Ast__id_tN10Ast__typ_t); } static int _fx_cons_LTa2R9Ast__id_t( struct _fx_Ta2R9Ast__id_t* hd, struct _fx_LTa2R9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LTa2R9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LTa2R9Ast__id_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T2R9Ast__id_tLTa2R9Ast__id_t(struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* dst) { fx_free_list_simple(&dst->t1); } static void _fx_copy_T2R9Ast__id_tLTa2R9Ast__id_t( struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* src, struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tLTa2R9Ast__id_t( struct _fx_R9Ast__id_t* t0, struct _fx_LTa2R9Ast__id_t_data_t* t1, struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tLTa2R9Ast__id_t(struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tLTa2R9Ast__id_t, _fx_free_T2R9Ast__id_tLTa2R9Ast__id_t); } static int _fx_cons_LT2R9Ast__id_tLTa2R9Ast__id_t( struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* hd, struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tLTa2R9Ast__id_t, _fx_copy_T2R9Ast__id_tLTa2R9Ast__id_t); } static void _fx_free_R17Ast__defvariant_t(struct _fx_R17Ast__defvariant_t* dst) { fx_free_list_simple(&dst->dvar_templ_args); _fx_free_N10Ast__typ_t(&dst->dvar_alias); _fx_free_LT2R9Ast__id_tN10Ast__typ_t(&dst->dvar_cases); fx_free_list_simple(&dst->dvar_ctors); _fx_free_rLR9Ast__id_t(&dst->dvar_templ_inst); _fx_free_LT2R9Ast__id_tLTa2R9Ast__id_t(&dst->dvar_ifaces); fx_free_list_simple(&dst->dvar_scope); } static void _fx_copy_R17Ast__defvariant_t(struct _fx_R17Ast__defvariant_t* src, struct _fx_R17Ast__defvariant_t* dst) { dst->dvar_name = src->dvar_name; FX_COPY_PTR(src->dvar_templ_args, &dst->dvar_templ_args); FX_COPY_PTR(src->dvar_alias, &dst->dvar_alias); dst->dvar_flags = src->dvar_flags; FX_COPY_PTR(src->dvar_cases, &dst->dvar_cases); FX_COPY_PTR(src->dvar_ctors, &dst->dvar_ctors); FX_COPY_PTR(src->dvar_templ_inst, &dst->dvar_templ_inst); FX_COPY_PTR(src->dvar_ifaces, &dst->dvar_ifaces); FX_COPY_PTR(src->dvar_scope, &dst->dvar_scope); dst->dvar_loc = src->dvar_loc; } static void _fx_make_R17Ast__defvariant_t( struct _fx_R9Ast__id_t* r_dvar_name, struct _fx_LR9Ast__id_t_data_t* r_dvar_templ_args, struct _fx_N10Ast__typ_t_data_t* r_dvar_alias, struct _fx_R16Ast__var_flags_t* r_dvar_flags, struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* r_dvar_cases, struct _fx_LR9Ast__id_t_data_t* r_dvar_ctors, struct _fx_rLR9Ast__id_t_data_t* r_dvar_templ_inst, struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* r_dvar_ifaces, struct _fx_LN12Ast__scope_t_data_t* r_dvar_scope, struct _fx_R10Ast__loc_t* r_dvar_loc, struct _fx_R17Ast__defvariant_t* fx_result) { fx_result->dvar_name = *r_dvar_name; FX_COPY_PTR(r_dvar_templ_args, &fx_result->dvar_templ_args); FX_COPY_PTR(r_dvar_alias, &fx_result->dvar_alias); fx_result->dvar_flags = *r_dvar_flags; FX_COPY_PTR(r_dvar_cases, &fx_result->dvar_cases); FX_COPY_PTR(r_dvar_ctors, &fx_result->dvar_ctors); FX_COPY_PTR(r_dvar_templ_inst, &fx_result->dvar_templ_inst); FX_COPY_PTR(r_dvar_ifaces, &fx_result->dvar_ifaces); FX_COPY_PTR(r_dvar_scope, &fx_result->dvar_scope); fx_result->dvar_loc = *r_dvar_loc; } static void _fx_free_rR17Ast__defvariant_t(struct _fx_rR17Ast__defvariant_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17Ast__defvariant_t, _fx_free_R17Ast__defvariant_t); } static int _fx_make_rR17Ast__defvariant_t( struct _fx_R17Ast__defvariant_t* arg, struct _fx_rR17Ast__defvariant_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17Ast__defvariant_t, _fx_copy_R17Ast__defvariant_t); } static void _fx_free_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* dst) { _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* src, struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_R9Ast__id_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_R16Ast__fun_flags_t* t2, struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t, _fx_free_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t); } static int _fx_cons_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* hd, struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* tl, bool addref_tl, struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t, _fx_copy_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t); } static void _fx_free_R19Ast__definterface_t(struct _fx_R19Ast__definterface_t* dst) { _fx_free_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t(&dst->di_new_methods); _fx_free_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t(&dst->di_all_methods); fx_free_list_simple(&dst->di_scope); } static void _fx_copy_R19Ast__definterface_t(struct _fx_R19Ast__definterface_t* src, struct _fx_R19Ast__definterface_t* dst) { dst->di_name = src->di_name; dst->di_base = src->di_base; FX_COPY_PTR(src->di_new_methods, &dst->di_new_methods); FX_COPY_PTR(src->di_all_methods, &dst->di_all_methods); FX_COPY_PTR(src->di_scope, &dst->di_scope); dst->di_loc = src->di_loc; } static void _fx_make_R19Ast__definterface_t( struct _fx_R9Ast__id_t* r_di_name, struct _fx_R9Ast__id_t* r_di_base, struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* r_di_new_methods, struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* r_di_all_methods, struct _fx_LN12Ast__scope_t_data_t* r_di_scope, struct _fx_R10Ast__loc_t* r_di_loc, struct _fx_R19Ast__definterface_t* fx_result) { fx_result->di_name = *r_di_name; fx_result->di_base = *r_di_base; FX_COPY_PTR(r_di_new_methods, &fx_result->di_new_methods); FX_COPY_PTR(r_di_all_methods, &fx_result->di_all_methods); FX_COPY_PTR(r_di_scope, &fx_result->di_scope); fx_result->di_loc = *r_di_loc; } static void _fx_free_rR19Ast__definterface_t(struct _fx_rR19Ast__definterface_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR19Ast__definterface_t, _fx_free_R19Ast__definterface_t); } static int _fx_make_rR19Ast__definterface_t( struct _fx_R19Ast__definterface_t* arg, struct _fx_rR19Ast__definterface_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR19Ast__definterface_t, _fx_copy_R19Ast__definterface_t); } static void _fx_free_N14Ast__id_info_t(struct _fx_N14Ast__id_info_t* dst) { switch (dst->tag) { case 2: _fx_free_R13Ast__defval_t(&dst->u.IdDVal); break; case 3: _fx_free_rR13Ast__deffun_t(&dst->u.IdFun); break; case 4: _fx_free_rR13Ast__defexn_t(&dst->u.IdExn); break; case 5: _fx_free_rR13Ast__deftyp_t(&dst->u.IdTyp); break; case 6: _fx_free_rR17Ast__defvariant_t(&dst->u.IdVariant); break; case 7: _fx_free_rR19Ast__definterface_t(&dst->u.IdInterface); break; default: ; } dst->tag = 0; } static void _fx_copy_N14Ast__id_info_t(struct _fx_N14Ast__id_info_t* src, struct _fx_N14Ast__id_info_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: _fx_copy_R13Ast__defval_t(&src->u.IdDVal, &dst->u.IdDVal); break; case 3: FX_COPY_PTR(src->u.IdFun, &dst->u.IdFun); break; case 4: FX_COPY_PTR(src->u.IdExn, &dst->u.IdExn); break; case 5: FX_COPY_PTR(src->u.IdTyp, &dst->u.IdTyp); break; case 6: FX_COPY_PTR(src->u.IdVariant, &dst->u.IdVariant); break; case 7: FX_COPY_PTR(src->u.IdInterface, &dst->u.IdInterface); break; default: dst->u = src->u; } } static void _fx_free_T3iA1N14Ast__id_info_tN14Ast__id_info_t(struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* dst) { fx_free_arr(&dst->t1); _fx_free_N14Ast__id_info_t(&dst->t2); } static void _fx_copy_T3iA1N14Ast__id_info_tN14Ast__id_info_t( struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* src, struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* dst) { dst->t0 = src->t0; fx_copy_arr(&src->t1, &dst->t1); _fx_copy_N14Ast__id_info_t(&src->t2, &dst->t2); } static void _fx_make_T3iA1N14Ast__id_info_tN14Ast__id_info_t( int_ t0, fx_arr_t* t1, struct _fx_N14Ast__id_info_t* t2, struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* fx_result) { fx_result->t0 = t0; fx_copy_arr(t1, &fx_result->t1); _fx_copy_N14Ast__id_info_t(t2, &fx_result->t2); } static void _fx_free_Nt9Dynvec__t1N14Ast__id_info_t(struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_T3iA1N14Ast__id_info_tN14Ast__id_info_t(&(*dst)->u.t); fx_free(*dst); } *dst = 0; } static void _fx_free_LN16Ast__env_entry_t(struct _fx_LN16Ast__env_entry_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN16Ast__env_entry_t, _fx_free_N16Ast__env_entry_t); } static int _fx_cons_LN16Ast__env_entry_t( struct _fx_N16Ast__env_entry_t_data_t* hd, struct _fx_LN16Ast__env_entry_t_data_t* tl, bool addref_tl, struct _fx_LN16Ast__env_entry_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN16Ast__env_entry_t, FX_COPY_PTR); } static void _fx_free_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t* dst) { _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(&dst->t1); _fx_free_LN16Ast__env_entry_t(&dst->t3); _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(&dst->t4); } static void _fx_copy_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t* src, struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; FX_COPY_PTR(src->t3, &dst->t3); FX_COPY_PTR(src->t4, &dst->t4); } static void _fx_make_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_N12Map__color_t* t0, struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t1, struct _fx_R9Ast__id_t* t2, struct _fx_LN16Ast__env_entry_t_data_t* t3, struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t4, struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; FX_COPY_PTR(t3, &fx_result->t3); FX_COPY_PTR(t4, &fx_result->t4); } static void _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( &(*dst)->u.Node); fx_free(*dst); } *dst = 0; } static void _fx_free_T2R10Ast__loc_tS(struct _fx_T2R10Ast__loc_tS* dst) { fx_free_str(&dst->t1); } static void _fx_copy_T2R10Ast__loc_tS(struct _fx_T2R10Ast__loc_tS* src, struct _fx_T2R10Ast__loc_tS* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_T2R10Ast__loc_tS(struct _fx_R10Ast__loc_t* t0, fx_str_t* t1, struct _fx_T2R10Ast__loc_tS* fx_result) { fx_result->t0 = *t0; fx_copy_str(t1, &fx_result->t1); } static void _fx_free_N10Ast__lit_t(struct _fx_N10Ast__lit_t* dst) { switch (dst->tag) { case 5: fx_free_str(&dst->u.LitString); break; default: ; } dst->tag = 0; } static void _fx_copy_N10Ast__lit_t(struct _fx_N10Ast__lit_t* src, struct _fx_N10Ast__lit_t* dst) { dst->tag = src->tag; switch (src->tag) { case 5: fx_copy_str(&src->u.LitString, &dst->u.LitString); break; default: dst->u = src->u; } } static void _fx_free_rNt6option1N10Ast__typ_t(struct _fx_rNt6option1N10Ast__typ_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rNt6option1N10Ast__typ_t, _fx_free_Nt6option1N10Ast__typ_t); } static int _fx_make_rNt6option1N10Ast__typ_t( struct _fx_Nt6option1N10Ast__typ_t_data_t* arg, struct _fx_rNt6option1N10Ast__typ_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rNt6option1N10Ast__typ_t, FX_COPY_PTR); } static void _fx_free_LN10Ast__typ_t(struct _fx_LN10Ast__typ_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN10Ast__typ_t, _fx_free_N10Ast__typ_t); } static int _fx_cons_LN10Ast__typ_t( struct _fx_N10Ast__typ_t_data_t* hd, struct _fx_LN10Ast__typ_t_data_t* tl, bool addref_tl, struct _fx_LN10Ast__typ_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN10Ast__typ_t, FX_COPY_PTR); } static void _fx_free_T2LN10Ast__typ_tN10Ast__typ_t(struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* dst) { _fx_free_LN10Ast__typ_t(&dst->t0); _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T2LN10Ast__typ_tN10Ast__typ_t( struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* src, struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN10Ast__typ_tN10Ast__typ_t( struct _fx_LN10Ast__typ_t_data_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iN10Ast__typ_t(struct _fx_T2iN10Ast__typ_t* dst) { _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T2iN10Ast__typ_t(struct _fx_T2iN10Ast__typ_t* src, struct _fx_T2iN10Ast__typ_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iN10Ast__typ_t(int_ t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_T2iN10Ast__typ_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* dst) { _fx_free_R16Ast__val_flags_t(&dst->t0); _fx_free_N10Ast__typ_t(&dst->t2); _fx_free_N10Ast__exp_t(&dst->t3); } static void _fx_copy_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* src, struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* dst) { _fx_copy_R16Ast__val_flags_t(&src->t0, &dst->t0); dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); FX_COPY_PTR(src->t3, &dst->t3); } static void _fx_make_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_R16Ast__val_flags_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_N10Ast__typ_t_data_t* t2, struct _fx_N10Ast__exp_t_data_t* t3, struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* fx_result) { _fx_copy_R16Ast__val_flags_t(t0, &fx_result->t0); fx_result->t1 = *t1; FX_COPY_PTR(t2, &fx_result->t2); FX_COPY_PTR(t3, &fx_result->t3); } static void _fx_free_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t, _fx_free_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t); } static int _fx_cons_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* hd, struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t, _fx_copy_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t); } static void _fx_free_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* dst) { _fx_free_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t(&dst->t0); } static void _fx_copy_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* src, struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* t0, bool t1, struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t** dst) { FX_FREE_REF_IMPL(_fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB, _fx_free_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB); } static int _fx_make_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* arg, struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB, _fx_copy_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB); } static void _fx_free_T2LN10Ast__typ_tR9Ast__id_t(struct _fx_T2LN10Ast__typ_tR9Ast__id_t* dst) { _fx_free_LN10Ast__typ_t(&dst->t0); } static void _fx_copy_T2LN10Ast__typ_tR9Ast__id_t( struct _fx_T2LN10Ast__typ_tR9Ast__id_t* src, struct _fx_T2LN10Ast__typ_tR9Ast__id_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LN10Ast__typ_tR9Ast__id_t( struct _fx_LN10Ast__typ_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_T2LN10Ast__typ_tR9Ast__id_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_N10Ast__typ_t(struct _fx_N10Ast__typ_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 1: _fx_free_rNt6option1N10Ast__typ_t(&(*dst)->u.TypVar); break; case 2: _fx_free_Nt6option1N10Ast__typ_t(&(*dst)->u.TypVarTuple); break; case 3: _fx_free_N10Ast__typ_t(&(*dst)->u.TypVarArray); break; case 13: _fx_free_T2LN10Ast__typ_tN10Ast__typ_t(&(*dst)->u.TypFun); break; case 14: _fx_free_N10Ast__typ_t(&(*dst)->u.TypList); break; case 15: _fx_free_N10Ast__typ_t(&(*dst)->u.TypVector); break; case 16: _fx_free_LN10Ast__typ_t(&(*dst)->u.TypTuple); break; case 17: _fx_free_N10Ast__typ_t(&(*dst)->u.TypRef); break; case 18: _fx_free_T2iN10Ast__typ_t(&(*dst)->u.TypArray); break; case 19: _fx_free_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB(&(*dst)->u.TypRecord); break; case 23: _fx_free_T2LN10Ast__typ_tR9Ast__id_t(&(*dst)->u.TypApp); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2Nt6option1N10Ast__exp_tR10Ast__loc_t(struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N10Ast__exp_t(&dst->t0); } static void _fx_copy_T2Nt6option1N10Ast__exp_tR10Ast__loc_t( struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* src, struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2Nt6option1N10Ast__exp_tR10Ast__loc_t( struct _fx_Nt6option1N10Ast__exp_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__typ_t(&dst->t0); } static void _fx_copy_T2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__typ_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N10Ast__exp_t(&dst->t0); _fx_free_Nt6option1N10Ast__exp_t(&dst->t1); _fx_free_Nt6option1N10Ast__exp_t(&dst->t2); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_Nt6option1N10Ast__exp_t_data_t* t0, struct _fx_Nt6option1N10Ast__exp_t_data_t* t1, struct _fx_Nt6option1N10Ast__exp_t_data_t* t2, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3, struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__lit_t(&dst->t0); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_copy_N10Ast__lit_t(&src->t0, &dst->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__lit_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { _fx_copy_N10Ast__lit_t(t0, &fx_result->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_N10Ast__exp_t(&dst->t2); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N13Ast__binary_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_N10Ast__exp_t_data_t* t2, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3, struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N12Ast__unary_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_LN10Ast__exp_t(struct _fx_LN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN10Ast__exp_t, _fx_free_N10Ast__exp_t); } static int _fx_cons_LN10Ast__exp_t( struct _fx_N10Ast__exp_t_data_t* hd, struct _fx_LN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN10Ast__exp_t, FX_COPY_PTR); } static void _fx_free_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_LN10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N13Ast__intrin_t* t0, struct _fx_LN10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2R9Ast__id_tN10Ast__exp_t(struct _fx_T2R9Ast__id_tN10Ast__exp_t* dst) { _fx_free_N10Ast__exp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN10Ast__exp_t( struct _fx_T2R9Ast__id_tN10Ast__exp_t* src, struct _fx_T2R9Ast__id_tN10Ast__exp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN10Ast__exp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_T2R9Ast__id_tN10Ast__exp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_LN10Ast__exp_t(&dst->t0); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_LN10Ast__exp_t_data_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_LLN10Ast__exp_t(struct _fx_LLN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LLN10Ast__exp_t, _fx_free_LN10Ast__exp_t); } static int _fx_cons_LLN10Ast__exp_t( struct _fx_LN10Ast__exp_t_data_t* hd, struct _fx_LLN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LLN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LLN10Ast__exp_t, FX_COPY_PTR); } static void _fx_free_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_LLN10Ast__exp_t(&dst->t0); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_LLN10Ast__exp_t_data_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN10Ast__exp_t(struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__exp_t, _fx_free_T2R9Ast__id_tN10Ast__exp_t); } static int _fx_cons_LT2R9Ast__id_tN10Ast__exp_t( struct _fx_T2R9Ast__id_tN10Ast__exp_t* hd, struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__exp_t, _fx_copy_T2R9Ast__id_tN10Ast__exp_t); } static void _fx_free_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_LT2R9Ast__id_tN10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_LN10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_LN10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_LN10Ast__exp_t(&dst->t3); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t4); } static void _fx_copy_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; dst->t2 = src->t2; FX_COPY_PTR(src->t3, &dst->t3); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t4, &dst->t4); } static void _fx_make_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N13Ast__border_t* t1, struct _fx_N18Ast__interpolate_t* t2, struct _fx_LN10Ast__exp_t_data_t* t3, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t4, struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; fx_result->t2 = *t2; FX_COPY_PTR(t3, &fx_result->t3); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t4, &fx_result->t4); } static void _fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); } static void _fx_copy_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2N10Ast__exp_tR10Ast__loc_t(struct _fx_T2N10Ast__exp_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); } static void _fx_copy_T2N10Ast__exp_tR10Ast__loc_t( struct _fx_T2N10Ast__exp_tR10Ast__loc_t* src, struct _fx_T2N10Ast__exp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N10Ast__exp_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N10Ast__exp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_N10Ast__exp_t(&dst->t2); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_N10Ast__exp_t_data_t* t2, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3, struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T2N10Ast__pat_tN10Ast__exp_t(struct _fx_T2N10Ast__pat_tN10Ast__exp_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); } static void _fx_copy_T2N10Ast__pat_tN10Ast__exp_t( struct _fx_T2N10Ast__pat_tN10Ast__exp_t* src, struct _fx_T2N10Ast__pat_tN10Ast__exp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2N10Ast__pat_tN10Ast__exp_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__pat_tN10Ast__exp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2N10Ast__pat_tN10Ast__exp_t(struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2N10Ast__pat_tN10Ast__exp_t, _fx_free_T2N10Ast__pat_tN10Ast__exp_t); } static int _fx_cons_LT2N10Ast__pat_tN10Ast__exp_t( struct _fx_T2N10Ast__pat_tN10Ast__exp_t* hd, struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2N10Ast__pat_tN10Ast__exp_t, _fx_copy_T2N10Ast__pat_tN10Ast__exp_t); } static void _fx_free_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* dst) { _fx_free_LT2N10Ast__pat_tN10Ast__exp_t(&dst->t0); _fx_free_N10Ast__pat_t(&dst->t1); _fx_free_N10Ast__exp_t(&dst->t2); } static void _fx_copy_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* src, struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; dst->t4 = src->t4; } static void _fx_make_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0, struct _fx_N10Ast__pat_t_data_t* t1, struct _fx_N10Ast__exp_t_data_t* t2, struct _fx_R16Ast__for_flags_t* t3, struct _fx_R10Ast__loc_t* t4, struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = *t3; fx_result->t4 = *t4; } static void _fx_free_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t(struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* dst) { _fx_free_LT2N10Ast__pat_tN10Ast__exp_t(&dst->t0); _fx_free_N10Ast__pat_t(&dst->t1); } static void _fx_copy_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t( struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* src, struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t( struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0, struct _fx_N10Ast__pat_t_data_t* t1, struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t( struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t, _fx_free_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t); } static int _fx_cons_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t( struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* hd, struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* tl, bool addref_tl, struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t, _fx_copy_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t); } static void _fx_free_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_R16Ast__for_flags_t* t2, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3, struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_LT2N10Ast__pat_tN10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_N10Ast__typ_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2ST2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* dst) { fx_free_str(&dst->t0); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2ST2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* dst) { fx_copy_str(&src->t0, &dst->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2ST2N10Ast__typ_tR10Ast__loc_t( fx_str_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T3SST2N10Ast__typ_tR10Ast__loc_t(struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3SST2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3SST2N10Ast__typ_tR10Ast__loc_t( fx_str_t* t0, fx_str_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t( struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_R16Ast__val_flags_t(&dst->t2); } static void _fx_copy_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t( struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* src, struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_R16Ast__val_flags_t(&src->t2, &dst->t2); dst->t3 = src->t3; } static void _fx_make_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_R16Ast__val_flags_t* t2, struct _fx_R10Ast__loc_t* t3, struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_R16Ast__val_flags_t(t2, &fx_result->t2); fx_result->t3 = *t3; } static int _fx_cons_LT2iR9Ast__id_t( struct _fx_T2iR9Ast__id_t* hd, struct _fx_LT2iR9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LT2iR9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2iR9Ast__id_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T2LT2iR9Ast__id_tR10Ast__loc_t(struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* dst) { fx_free_list_simple(&dst->t0); } static void _fx_copy_T2LT2iR9Ast__id_tR10Ast__loc_t( struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* src, struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LT2iR9Ast__id_tR10Ast__loc_t( struct _fx_LT2iR9Ast__id_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T3iLR9Ast__id_tR10Ast__loc_t(struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* dst) { fx_free_list_simple(&dst->t1); } static void _fx_copy_T3iLR9Ast__id_tR10Ast__loc_t( struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* src, struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3iLR9Ast__id_tR10Ast__loc_t( int_ t0, struct _fx_LR9Ast__id_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2LSR10Ast__loc_t(struct _fx_T2LSR10Ast__loc_t* dst) { _fx_free_LS(&dst->t0); } static void _fx_copy_T2LSR10Ast__loc_t(struct _fx_T2LSR10Ast__loc_t* src, struct _fx_T2LSR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LSR10Ast__loc_t( struct _fx_LS_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2LSR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_N10Ast__exp_t(struct _fx_N10Ast__exp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 4: _fx_free_T2Nt6option1N10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpReturn); break; case 5: _fx_free_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( &(*dst)->u.ExpRange); break; case 6: _fx_free_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpLit); break; case 7: _fx_free_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpIdent); break; case 8: _fx_free_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpBinary); break; case 9: _fx_free_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpUnary); break; case 10: _fx_free_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpIntrin); break; case 11: _fx_free_T2R9Ast__id_tN10Ast__exp_t(&(*dst)->u.ExpSync); break; case 12: _fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpSeq); break; case 13: _fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkTuple); break; case 14: _fx_free_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkArray); break; case 15: _fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkVector); break; case 16: _fx_free_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkRecord); break; case 17: _fx_free_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpUpdateRecord); break; case 18: _fx_free_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpCall); break; case 19: _fx_free_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( &(*dst)->u.ExpAt); break; case 20: _fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpAssign); break; case 21: _fx_free_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMem); break; case 22: _fx_free_T2N10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpThrow); break; case 23: _fx_free_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpIf); break; case 24: _fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpWhile); break; case 25: _fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpDoWhile); break; case 26: _fx_free_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t(&(*dst)->u.ExpFor); break; case 27: _fx_free_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t( &(*dst)->u.ExpMap); break; case 28: _fx_free_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpTryCatch); break; case 29: _fx_free_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMatch); break; case 30: _fx_free_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpCast); break; case 31: _fx_free_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpTyped); break; case 32: _fx_free_T2ST2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpCCode); break; case 33: _fx_free_T3SST2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpData); break; case 34: _fx_free_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t(&(*dst)->u.DefVal); break; case 35: _fx_free_rR13Ast__deffun_t(&(*dst)->u.DefFun); break; case 36: _fx_free_rR13Ast__defexn_t(&(*dst)->u.DefExn); break; case 37: _fx_free_rR13Ast__deftyp_t(&(*dst)->u.DefTyp); break; case 38: _fx_free_rR17Ast__defvariant_t(&(*dst)->u.DefVariant); break; case 39: _fx_free_rR19Ast__definterface_t(&(*dst)->u.DefInterface); break; case 40: _fx_free_T2LT2iR9Ast__id_tR10Ast__loc_t(&(*dst)->u.DirImport); break; case 41: _fx_free_T3iLR9Ast__id_tR10Ast__loc_t(&(*dst)->u.DirImportFrom); break; case 42: _fx_free_T2LSR10Ast__loc_t(&(*dst)->u.DirPragma); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2N10Ast__lit_tR10Ast__loc_t(struct _fx_T2N10Ast__lit_tR10Ast__loc_t* dst) { _fx_free_N10Ast__lit_t(&dst->t0); } static void _fx_copy_T2N10Ast__lit_tR10Ast__loc_t( struct _fx_T2N10Ast__lit_tR10Ast__loc_t* src, struct _fx_T2N10Ast__lit_tR10Ast__loc_t* dst) { _fx_copy_N10Ast__lit_t(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N10Ast__lit_tR10Ast__loc_t( struct _fx_N10Ast__lit_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N10Ast__lit_tR10Ast__loc_t* fx_result) { _fx_copy_N10Ast__lit_t(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2LN10Ast__pat_tR10Ast__loc_t(struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_LN10Ast__pat_t(&dst->t0); } static void _fx_copy_T2LN10Ast__pat_tR10Ast__loc_t( struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* src, struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LN10Ast__pat_tR10Ast__loc_t( struct _fx_LN10Ast__pat_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t(struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_LN10Ast__pat_t(&dst->t1); } static void _fx_copy_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_LN10Ast__pat_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2R9Ast__id_tN10Ast__pat_t(struct _fx_T2R9Ast__id_tN10Ast__pat_t* dst) { _fx_free_N10Ast__pat_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN10Ast__pat_t( struct _fx_T2R9Ast__id_tN10Ast__pat_t* src, struct _fx_T2R9Ast__id_tN10Ast__pat_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN10Ast__pat_t( struct _fx_R9Ast__id_t* t0, struct _fx_N10Ast__pat_t_data_t* t1, struct _fx_T2R9Ast__id_tN10Ast__pat_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN10Ast__pat_t(struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__pat_t, _fx_free_T2R9Ast__id_tN10Ast__pat_t); } static int _fx_cons_LT2R9Ast__id_tN10Ast__pat_t( struct _fx_T2R9Ast__id_tN10Ast__pat_t* hd, struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__pat_t, _fx_copy_T2R9Ast__id_tN10Ast__pat_t); } static void _fx_free_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t( struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_LT2R9Ast__id_tN10Ast__pat_t(&dst->t1); } static void _fx_copy_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t( struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* src, struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t( struct _fx_Nt6option1R9Ast__id_t* t0, struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__pat_t(&dst->t1); } static void _fx_copy_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t( struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* src, struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__pat_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); } static void _fx_copy_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t( struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* src, struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; dst->t2 = src->t2; } static void _fx_make_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); } static void _fx_copy_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t( struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* src, struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2N10Ast__pat_tR10Ast__loc_t(struct _fx_T2N10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); } static void _fx_copy_T2N10Ast__pat_tR10Ast__loc_t( struct _fx_T2N10Ast__pat_tR10Ast__loc_t* src, struct _fx_T2N10Ast__pat_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N10Ast__pat_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N10Ast__pat_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_N10Ast__pat_t(struct _fx_N10Ast__pat_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 2: _fx_free_T2N10Ast__lit_tR10Ast__loc_t(&(*dst)->u.PatLit); break; case 4: _fx_free_T2LN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatTuple); break; case 5: _fx_free_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatVariant); break; case 6: _fx_free_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatRecord); break; case 7: _fx_free_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatCons); break; case 8: _fx_free_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t(&(*dst)->u.PatAs); break; case 9: _fx_free_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t(&(*dst)->u.PatTyped); break; case 10: _fx_free_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.PatWhen); break; case 11: _fx_free_T2LN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatAlt); break; case 12: _fx_free_T2N10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatRef); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_N16Ast__env_entry_t(struct _fx_N16Ast__env_entry_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 2: _fx_free_N10Ast__typ_t(&(*dst)->u.EnvTyp); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2SR10Ast__loc_t(struct _fx_T2SR10Ast__loc_t* dst) { fx_free_str(&dst->t0); } static void _fx_copy_T2SR10Ast__loc_t(struct _fx_T2SR10Ast__loc_t* src, struct _fx_T2SR10Ast__loc_t* dst) { fx_copy_str(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2SR10Ast__loc_t(fx_str_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2SR10Ast__loc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_LT2SR10Ast__loc_t(struct _fx_LT2SR10Ast__loc_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2SR10Ast__loc_t, _fx_free_T2SR10Ast__loc_t); } static int _fx_cons_LT2SR10Ast__loc_t( struct _fx_T2SR10Ast__loc_t* hd, struct _fx_LT2SR10Ast__loc_t_data_t* tl, bool addref_tl, struct _fx_LT2SR10Ast__loc_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2SR10Ast__loc_t, _fx_copy_T2SR10Ast__loc_t); } static int _fx_cons_Li(int_ hd, struct _fx_Li_data_t* tl, bool addref_tl, struct _fx_Li_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_Li, FX_COPY_SIMPLE); } static void _fx_free_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t( struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* dst) { fx_free_str(&dst->t1); _fx_free_LN10Ast__exp_t(&dst->t4); fx_free_list_simple(&dst->t5); _fx_free_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&dst->t6); _fx_free_Nt9Dynvec__t1N14Ast__id_info_t(&dst->t9); } static void _fx_copy_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t( struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* src, struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); dst->t2 = src->t2; dst->t3 = src->t3; FX_COPY_PTR(src->t4, &dst->t4); FX_COPY_PTR(src->t5, &dst->t5); _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&src->t6, &dst->t6); dst->t7 = src->t7; dst->t8 = src->t8; FX_COPY_PTR(src->t9, &dst->t9); } static void _fx_make_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t( struct _fx_R9Ast__id_t* t0, fx_str_t* t1, int_ t2, bool t3, struct _fx_LN10Ast__exp_t_data_t* t4, struct _fx_Li_data_t* t5, struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* t6, bool t7, int_ t8, struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t* t9, struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* fx_result) { fx_result->t0 = *t0; fx_copy_str(t1, &fx_result->t1); fx_result->t2 = t2; fx_result->t3 = t3; FX_COPY_PTR(t4, &fx_result->t4); FX_COPY_PTR(t5, &fx_result->t5); _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(t6, &fx_result->t6); fx_result->t7 = t7; fx_result->t8 = t8; FX_COPY_PTR(t9, &fx_result->t9); } static void _fx_free_N16Ast__defmodule_t(struct _fx_N16Ast__defmodule_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t( &(*dst)->u.defmodule_t); fx_free(*dst); } *dst = 0; } static void _fx_free_LE(struct _fx_LE_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LE, fx_free_exn); } static int _fx_cons_LE(fx_exn_t* hd, struct _fx_LE_data_t* tl, bool addref_tl, struct _fx_LE_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LE, fx_copy_exn); } static void _fx_free_T2BS(struct _fx_T2BS* dst) { fx_free_str(&dst->t1); } static void _fx_copy_T2BS(struct _fx_T2BS* src, struct _fx_T2BS* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_T2BS(bool t0, fx_str_t* t1, struct _fx_T2BS* fx_result) { fx_result->t0 = t0; fx_copy_str(t1, &fx_result->t1); } static void _fx_free_N14Lexer__token_t(struct _fx_N14Lexer__token_t* dst) { switch (dst->tag) { case 1: _fx_free_N10Ast__lit_t(&dst->u.LITERAL); break; case 2: _fx_free_T2BS(&dst->u.IDENT); break; case 3: fx_free_str(&dst->u.TYVAR); break; case 13: fx_free_str(&dst->u.DATA); break; case 100: fx_free_str(&dst->u.RESERVED); break; default: ; } dst->tag = 0; } static void _fx_copy_N14Lexer__token_t(struct _fx_N14Lexer__token_t* src, struct _fx_N14Lexer__token_t* dst) { dst->tag = src->tag; switch (src->tag) { case 1: _fx_copy_N10Ast__lit_t(&src->u.LITERAL, &dst->u.LITERAL); break; case 2: _fx_copy_T2BS(&src->u.IDENT, &dst->u.IDENT); break; case 3: fx_copy_str(&src->u.TYVAR, &dst->u.TYVAR); break; case 13: fx_copy_str(&src->u.DATA, &dst->u.DATA); break; case 100: fx_copy_str(&src->u.RESERVED, &dst->u.RESERVED); break; default: dst->u = src->u; } } static void _fx_free_LN14Lexer__token_t(struct _fx_LN14Lexer__token_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14Lexer__token_t, _fx_free_N14Lexer__token_t); } static int _fx_cons_LN14Lexer__token_t( struct _fx_N14Lexer__token_t* hd, struct _fx_LN14Lexer__token_t_data_t* tl, bool addref_tl, struct _fx_LN14Lexer__token_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14Lexer__token_t, _fx_copy_N14Lexer__token_t); } static void _fx_free_N14K_form__klit_t(struct _fx_N14K_form__klit_t* dst) { switch (dst->tag) { case 5: fx_free_str(&dst->u.KLitString); break; case 8: _fx_free_N14K_form__ktyp_t(&dst->u.KLitNil); break; default: ; } dst->tag = 0; } static void _fx_copy_N14K_form__klit_t(struct _fx_N14K_form__klit_t* src, struct _fx_N14K_form__klit_t* dst) { dst->tag = src->tag; switch (src->tag) { case 5: fx_copy_str(&src->u.KLitString, &dst->u.KLitString); break; case 8: FX_COPY_PTR(src->u.KLitNil, &dst->u.KLitNil); break; default: dst->u = src->u; } } static void _fx_free_N14K_form__atom_t(struct _fx_N14K_form__atom_t* dst) { switch (dst->tag) { case 2: _fx_free_N14K_form__klit_t(&dst->u.AtomLit); break; default: ; } dst->tag = 0; } static void _fx_copy_N14K_form__atom_t(struct _fx_N14K_form__atom_t* src, struct _fx_N14K_form__atom_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: _fx_copy_N14K_form__klit_t(&src->u.AtomLit, &dst->u.AtomLit); break; default: dst->u = src->u; } } static void _fx_free_Nt6option1N14K_form__atom_t(struct _fx_Nt6option1N14K_form__atom_t* dst) { switch (dst->tag) { case 2: _fx_free_N14K_form__atom_t(&dst->u.Some); break; default: ; } dst->tag = 0; } static void _fx_copy_Nt6option1N14K_form__atom_t( struct _fx_Nt6option1N14K_form__atom_t* src, struct _fx_Nt6option1N14K_form__atom_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: _fx_copy_N14K_form__atom_t(&src->u.Some, &dst->u.Some); break; default: dst->u = src->u; } } static void _fx_free_LN14K_form__kexp_t(struct _fx_LN14K_form__kexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14K_form__kexp_t, _fx_free_N14K_form__kexp_t); } static int _fx_cons_LN14K_form__kexp_t( struct _fx_N14K_form__kexp_t_data_t* hd, struct _fx_LN14K_form__kexp_t_data_t* tl, bool addref_tl, struct _fx_LN14K_form__kexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14K_form__kexp_t, FX_COPY_PTR); } static void _fx_free_T2BN14K_form__atom_t(struct _fx_T2BN14K_form__atom_t* dst) { _fx_free_N14K_form__atom_t(&dst->t1); } static void _fx_copy_T2BN14K_form__atom_t(struct _fx_T2BN14K_form__atom_t* src, struct _fx_T2BN14K_form__atom_t* dst) { dst->t0 = src->t0; _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); } static void _fx_make_T2BN14K_form__atom_t(bool t0, struct _fx_N14K_form__atom_t* t1, struct _fx_T2BN14K_form__atom_t* fx_result) { fx_result->t0 = t0; _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); } static void _fx_free_LT2BN14K_form__atom_t(struct _fx_LT2BN14K_form__atom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2BN14K_form__atom_t, _fx_free_T2BN14K_form__atom_t); } static int _fx_cons_LT2BN14K_form__atom_t( struct _fx_T2BN14K_form__atom_t* hd, struct _fx_LT2BN14K_form__atom_t_data_t* tl, bool addref_tl, struct _fx_LT2BN14K_form__atom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2BN14K_form__atom_t, _fx_copy_T2BN14K_form__atom_t); } static void _fx_free_T2LN14K_form__ktyp_tN14K_form__ktyp_t(struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* dst) { _fx_free_LN14K_form__ktyp_t(&dst->t0); _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2LN14K_form__ktyp_tN14K_form__ktyp_t( struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* src, struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14K_form__ktyp_tN14K_form__ktyp_t( struct _fx_LN14K_form__ktyp_t_data_t* t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t(struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* dst) { _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* src, struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* t1, struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iN14K_form__ktyp_t(struct _fx_T2iN14K_form__ktyp_t* dst) { _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2iN14K_form__ktyp_t(struct _fx_T2iN14K_form__ktyp_t* src, struct _fx_T2iN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iN14K_form__ktyp_t( int_ t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2iN14K_form__ktyp_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_N14K_form__ktyp_t(struct _fx_N14K_form__ktyp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 11: _fx_free_T2LN14K_form__ktyp_tN14K_form__ktyp_t(&(*dst)->u.KTypFun); break; case 12: _fx_free_LN14K_form__ktyp_t(&(*dst)->u.KTypTuple); break; case 13: _fx_free_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t(&(*dst)->u.KTypRecord); break; case 15: _fx_free_T2iN14K_form__ktyp_t(&(*dst)->u.KTypArray); break; case 16: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypVector); break; case 17: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypList); break; case 18: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypRef); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_Ta3N14K_form__atom_t(struct _fx_Ta3N14K_form__atom_t* dst) { _fx_free_N14K_form__atom_t(&dst->t0); _fx_free_N14K_form__atom_t(&dst->t1); _fx_free_N14K_form__atom_t(&dst->t2); } static void _fx_copy_Ta3N14K_form__atom_t(struct _fx_Ta3N14K_form__atom_t* src, struct _fx_Ta3N14K_form__atom_t* dst) { _fx_copy_N14K_form__atom_t(&src->t0, &dst->t0); _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); _fx_copy_N14K_form__atom_t(&src->t2, &dst->t2); } static void _fx_make_Ta3N14K_form__atom_t( struct _fx_N14K_form__atom_t* t0, struct _fx_N14K_form__atom_t* t1, struct _fx_N14K_form__atom_t* t2, struct _fx_Ta3N14K_form__atom_t* fx_result) { _fx_copy_N14K_form__atom_t(t0, &fx_result->t0); _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); _fx_copy_N14K_form__atom_t(t2, &fx_result->t2); } static void _fx_free_N13K_form__dom_t(struct _fx_N13K_form__dom_t* dst) { switch (dst->tag) { case 1: _fx_free_N14K_form__atom_t(&dst->u.DomainElem); break; case 2: _fx_free_N14K_form__atom_t(&dst->u.DomainFast); break; case 3: _fx_free_Ta3N14K_form__atom_t(&dst->u.DomainRange); break; default: ; } dst->tag = 0; } static void _fx_copy_N13K_form__dom_t(struct _fx_N13K_form__dom_t* src, struct _fx_N13K_form__dom_t* dst) { dst->tag = src->tag; switch (src->tag) { case 1: _fx_copy_N14K_form__atom_t(&src->u.DomainElem, &dst->u.DomainElem); break; case 2: _fx_copy_N14K_form__atom_t(&src->u.DomainFast, &dst->u.DomainFast); break; case 3: _fx_copy_Ta3N14K_form__atom_t(&src->u.DomainRange, &dst->u.DomainRange); break; default: dst->u = src->u; } } static void _fx_free_T2Nt6option1N14K_form__atom_tR10Ast__loc_t(struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N14K_form__atom_t(&dst->t0); } static void _fx_copy_T2Nt6option1N14K_form__atom_tR10Ast__loc_t( struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* src, struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* dst) { _fx_copy_Nt6option1N14K_form__atom_t(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2Nt6option1N14K_form__atom_tR10Ast__loc_t( struct _fx_Nt6option1N14K_form__atom_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* fx_result) { _fx_copy_Nt6option1N14K_form__atom_t(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__ktyp_t(&dst->t0); } static void _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__ktyp_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_copy_N14K_form__atom_t(&src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__atom_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { _fx_copy_N14K_form__atom_t(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t1); _fx_free_N14K_form__atom_t(&dst->t2); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); _fx_copy_N14K_form__atom_t(&src->t2, &dst->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N13Ast__binary_t* t0, struct _fx_N14K_form__atom_t* t1, struct _fx_N14K_form__atom_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); _fx_copy_N14K_form__atom_t(t2, &fx_result->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N12Ast__unary_t* t0, struct _fx_N14K_form__atom_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_LN14K_form__atom_t(struct _fx_LN14K_form__atom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14K_form__atom_t, _fx_free_N14K_form__atom_t); } static int _fx_cons_LN14K_form__atom_t( struct _fx_N14K_form__atom_t* hd, struct _fx_LN14K_form__atom_t_data_t* tl, bool addref_tl, struct _fx_LN14K_form__atom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14K_form__atom_t, _fx_copy_N14K_form__atom_t); } static void _fx_free_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N13Ast__intrin_t* t0, struct _fx_LN14K_form__atom_t_data_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2R9Ast__id_tN14K_form__kexp_t(struct _fx_T2R9Ast__id_tN14K_form__kexp_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN14K_form__kexp_t( struct _fx_T2R9Ast__id_tN14K_form__kexp_t* src, struct _fx_T2R9Ast__id_tN14K_form__kexp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN14K_form__kexp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_T2R9Ast__id_tN14K_form__kexp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__kexp_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LN14K_form__kexp_t_data_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); _fx_free_N14K_form__kexp_t(&dst->t2); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__kexp_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_N14K_form__kexp_t_data_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_LN14K_form__atom_t_data_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t2); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, int_ t1, struct _fx_LN14K_form__atom_t_data_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; fx_result->t1 = t1; FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LN14K_form__atom_t_data_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t2); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_LN14K_form__atom_t_data_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; fx_result->t1 = *t1; FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_LLT2BN14K_form__atom_t(struct _fx_LLT2BN14K_form__atom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LLT2BN14K_form__atom_t, _fx_free_LT2BN14K_form__atom_t); } static int _fx_cons_LLT2BN14K_form__atom_t( struct _fx_LT2BN14K_form__atom_t_data_t* hd, struct _fx_LLT2BN14K_form__atom_t_data_t* tl, bool addref_tl, struct _fx_LLT2BN14K_form__atom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LLT2BN14K_form__atom_t, FX_COPY_PTR); } static void _fx_free_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LLT2BN14K_form__atom_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( bool t0, struct _fx_LLT2BN14K_form__atom_t_data_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LT2BN14K_form__atom_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LT2BN14K_form__atom_t_data_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_LN13K_form__dom_t(struct _fx_LN13K_form__dom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN13K_form__dom_t, _fx_free_N13K_form__dom_t); } static int _fx_cons_LN13K_form__dom_t( struct _fx_N13K_form__dom_t* hd, struct _fx_LN13K_form__dom_t_data_t* tl, bool addref_tl, struct _fx_LN13K_form__dom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN13K_form__dom_t, _fx_copy_N13K_form__dom_t); } static void _fx_free_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t0); _fx_free_LN13K_form__dom_t(&dst->t3); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t4); } static void _fx_copy_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_copy_N14K_form__atom_t(&src->t0, &dst->t0); dst->t1 = src->t1; dst->t2 = src->t2; FX_COPY_PTR(src->t3, &dst->t3); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t4, &dst->t4); } static void _fx_make_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__atom_t* t0, struct _fx_N13Ast__border_t* t1, struct _fx_N18Ast__interpolate_t* t2, struct _fx_LN13K_form__dom_t_data_t* t3, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t4, struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { _fx_copy_N14K_form__atom_t(t0, &fx_result->t0); fx_result->t1 = *t1; fx_result->t2 = *t2; FX_COPY_PTR(t3, &fx_result->t3); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t4, &fx_result->t4); } static void _fx_free_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; dst->t1 = src->t1; _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, int_ t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; fx_result->t1 = t1; _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t(struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t1); } static void _fx_copy_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14K_form__atom_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2LN14K_form__kexp_tN14K_form__kexp_t(struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* dst) { _fx_free_LN14K_form__kexp_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); } static void _fx_copy_T2LN14K_form__kexp_tN14K_form__kexp_t( struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* src, struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14K_form__kexp_tN14K_form__kexp_t( struct _fx_LN14K_form__kexp_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2LN14K_form__kexp_tN14K_form__kexp_t(struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2LN14K_form__kexp_tN14K_form__kexp_t, _fx_free_T2LN14K_form__kexp_tN14K_form__kexp_t); } static int _fx_cons_LT2LN14K_form__kexp_tN14K_form__kexp_t( struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* hd, struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* tl, bool addref_tl, struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2LN14K_form__kexp_tN14K_form__kexp_t, _fx_copy_T2LN14K_form__kexp_tN14K_form__kexp_t); } static void _fx_free_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LT2LN14K_form__kexp_tN14K_form__kexp_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__kexp_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t0); _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_copy_N14K_form__atom_t(&src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__atom_t* t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* fx_result) { _fx_copy_N14K_form__atom_t(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2R9Ast__id_tN13K_form__dom_t(struct _fx_T2R9Ast__id_tN13K_form__dom_t* dst) { _fx_free_N13K_form__dom_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN13K_form__dom_t( struct _fx_T2R9Ast__id_tN13K_form__dom_t* src, struct _fx_T2R9Ast__id_tN13K_form__dom_t* dst) { dst->t0 = src->t0; _fx_copy_N13K_form__dom_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN13K_form__dom_t( struct _fx_R9Ast__id_t* t0, struct _fx_N13K_form__dom_t* t1, struct _fx_T2R9Ast__id_tN13K_form__dom_t* fx_result) { fx_result->t0 = *t0; _fx_copy_N13K_form__dom_t(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN13K_form__dom_t(struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN13K_form__dom_t, _fx_free_T2R9Ast__id_tN13K_form__dom_t); } static int _fx_cons_LT2R9Ast__id_tN13K_form__dom_t( struct _fx_T2R9Ast__id_tN13K_form__dom_t* hd, struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN13K_form__dom_t, _fx_copy_T2R9Ast__id_tN13K_form__dom_t); } static void _fx_free_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t0); _fx_free_LT2R9Ast__id_tN13K_form__dom_t(&dst->t1); fx_free_list_simple(&dst->t2); } static void _fx_copy_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* src, struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); } static void _fx_make_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_N14K_form__kexp_t_data_t* t0, struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t1, struct _fx_LR9Ast__id_t_data_t* t2, struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); } static void _fx_free_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t, _fx_free_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t); } static int _fx_cons_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* hd, struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t, _fx_copy_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t); } static void _fx_free_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_R16Ast__for_flags_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* dst) { _fx_free_LT2R9Ast__id_tN13K_form__dom_t(&dst->t0); fx_free_list_simple(&dst->t1); _fx_free_N14K_form__kexp_t(&dst->t2); } static void _fx_copy_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* src, struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; dst->t4 = src->t4; } static void _fx_make_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t0, struct _fx_LR9Ast__id_t_data_t* t1, struct _fx_N14K_form__kexp_t_data_t* t2, struct _fx_R16Ast__for_flags_t* t3, struct _fx_R10Ast__loc_t* t4, struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = *t3; fx_result->t4 = *t4; } static void _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); } static void _fx_copy_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* src, struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_N14K_form__kexp_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2ST2N14K_form__ktyp_tR10Ast__loc_t(struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* dst) { fx_free_str(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2ST2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* dst) { fx_copy_str(&src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2ST2N14K_form__ktyp_tR10Ast__loc_t( fx_str_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t(struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t1); } static void _fx_copy_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_N14K_form__kexp_t(struct _fx_N14K_form__kexp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 4: _fx_free_T2Nt6option1N14K_form__atom_tR10Ast__loc_t(&(*dst)->u.KExpReturn); break; case 5: _fx_free_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpAtom); break; case 6: _fx_free_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpBinary); break; case 7: _fx_free_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpUnary); break; case 8: _fx_free_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpIntrin); break; case 9: _fx_free_T2R9Ast__id_tN14K_form__kexp_t(&(*dst)->u.KExpSync); break; case 10: _fx_free_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpSeq); break; case 11: _fx_free_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpIf); break; case 12: _fx_free_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpCall); break; case 13: _fx_free_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpICall); break; case 14: _fx_free_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkTuple); break; case 15: _fx_free_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkRecord); break; case 16: _fx_free_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkClosure); break; case 17: _fx_free_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkArray); break; case 18: _fx_free_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkVector); break; case 19: _fx_free_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t( &(*dst)->u.KExpAt); break; case 20: _fx_free_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMem); break; case 21: _fx_free_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t(&(*dst)->u.KExpAssign); break; case 22: _fx_free_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMatch); break; case 23: _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpTryCatch); break; case 25: _fx_free_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpCast); break; case 26: _fx_free_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t( &(*dst)->u.KExpMap); break; case 27: _fx_free_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t( &(*dst)->u.KExpFor); break; case 28: _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t(&(*dst)->u.KExpWhile); break; case 29: _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t(&(*dst)->u.KExpDoWhile); break; case 30: _fx_free_T2ST2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpCCode); break; case 31: _fx_free_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t(&(*dst)->u.KDefVal); break; case 32: _fx_free_rR17K_form__kdeffun_t(&(*dst)->u.KDefFun); break; case 33: _fx_free_rR17K_form__kdefexn_t(&(*dst)->u.KDefExn); break; case 34: _fx_free_rR21K_form__kdefvariant_t(&(*dst)->u.KDefVariant); break; case 35: _fx_free_rR23K_form__kdefinterface_t(&(*dst)->u.KDefInterface); break; case 36: _fx_free_rR17K_form__kdeftyp_t(&(*dst)->u.KDefTyp); break; case 37: _fx_free_rR25K_form__kdefclosurevars_t(&(*dst)->u.KDefClosureVars); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_R14Ast__pragmas_t(struct _fx_R14Ast__pragmas_t* dst) { _fx_free_LT2SR10Ast__loc_t(&dst->pragma_clibs); } static void _fx_copy_R14Ast__pragmas_t(struct _fx_R14Ast__pragmas_t* src, struct _fx_R14Ast__pragmas_t* dst) { dst->pragma_cpp = src->pragma_cpp; FX_COPY_PTR(src->pragma_clibs, &dst->pragma_clibs); } static void _fx_make_R14Ast__pragmas_t( bool r_pragma_cpp, struct _fx_LT2SR10Ast__loc_t_data_t* r_pragma_clibs, struct _fx_R14Ast__pragmas_t* fx_result) { fx_result->pragma_cpp = r_pragma_cpp; FX_COPY_PTR(r_pragma_clibs, &fx_result->pragma_clibs); } static void _fx_free_R17K_form__kmodule_t(struct _fx_R17K_form__kmodule_t* dst) { fx_free_str(&dst->km_cname); _fx_free_LN14K_form__kexp_t(&dst->km_top); fx_free_list_simple(&dst->km_deps); _fx_free_R14Ast__pragmas_t(&dst->km_pragmas); } static void _fx_copy_R17K_form__kmodule_t(struct _fx_R17K_form__kmodule_t* src, struct _fx_R17K_form__kmodule_t* dst) { dst->km_name = src->km_name; dst->km_idx = src->km_idx; dst->km_toposort_idx = src->km_toposort_idx; fx_copy_str(&src->km_cname, &dst->km_cname); FX_COPY_PTR(src->km_top, &dst->km_top); FX_COPY_PTR(src->km_deps, &dst->km_deps); dst->km_skip = src->km_skip; dst->km_main = src->km_main; _fx_copy_R14Ast__pragmas_t(&src->km_pragmas, &dst->km_pragmas); } static void _fx_make_R17K_form__kmodule_t( struct _fx_R9Ast__id_t* r_km_name, int_ r_km_idx, int_ r_km_toposort_idx, fx_str_t* r_km_cname, struct _fx_LN14K_form__kexp_t_data_t* r_km_top, struct _fx_Li_data_t* r_km_deps, bool r_km_skip, bool r_km_main, struct _fx_R14Ast__pragmas_t* r_km_pragmas, struct _fx_R17K_form__kmodule_t* fx_result) { fx_result->km_name = *r_km_name; fx_result->km_idx = r_km_idx; fx_result->km_toposort_idx = r_km_toposort_idx; fx_copy_str(r_km_cname, &fx_result->km_cname); FX_COPY_PTR(r_km_top, &fx_result->km_top); FX_COPY_PTR(r_km_deps, &fx_result->km_deps); fx_result->km_skip = r_km_skip; fx_result->km_main = r_km_main; _fx_copy_R14Ast__pragmas_t(r_km_pragmas, &fx_result->km_pragmas); } static void _fx_free_LR17K_form__kmodule_t(struct _fx_LR17K_form__kmodule_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LR17K_form__kmodule_t, _fx_free_R17K_form__kmodule_t); } static int _fx_cons_LR17K_form__kmodule_t( struct _fx_R17K_form__kmodule_t* hd, struct _fx_LR17K_form__kmodule_t_data_t* tl, bool addref_tl, struct _fx_LR17K_form__kmodule_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LR17K_form__kmodule_t, _fx_copy_R17K_form__kmodule_t); } static void _fx_free_Nt6option1N14C_form__ctyp_t(struct _fx_Nt6option1N14C_form__ctyp_t* dst) { switch (dst->tag) { case 2: _fx_free_N14C_form__ctyp_t(&dst->u.Some); break; default: ; } dst->tag = 0; } static void _fx_copy_Nt6option1N14C_form__ctyp_t( struct _fx_Nt6option1N14C_form__ctyp_t* src, struct _fx_Nt6option1N14C_form__ctyp_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: FX_COPY_PTR(src->u.Some, &dst->u.Some); break; default: dst->u = src->u; } } static void _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* dst) { _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* src, struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_Nt6option1R9Ast__id_t* t0, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* t1, struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LN14C_form__ctyp_t(struct _fx_LN14C_form__ctyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14C_form__ctyp_t, _fx_free_N14C_form__ctyp_t); } static int _fx_cons_LN14C_form__ctyp_t( struct _fx_N14C_form__ctyp_t_data_t* hd, struct _fx_LN14C_form__ctyp_t_data_t* tl, bool addref_tl, struct _fx_LN14C_form__ctyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14C_form__ctyp_t, FX_COPY_PTR); } static void _fx_free_T2LN14C_form__ctyp_tN14C_form__ctyp_t(struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* dst) { _fx_free_LN14C_form__ctyp_t(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2LN14C_form__ctyp_tN14C_form__ctyp_t( struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* src, struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__ctyp_tN14C_form__ctyp_t( struct _fx_LN14C_form__ctyp_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static int _fx_cons_LN19C_form__ctyp_attr_t( struct _fx_N19C_form__ctyp_attr_t* hd, struct _fx_LN19C_form__ctyp_attr_t_data_t* tl, bool addref_tl, struct _fx_LN19C_form__ctyp_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN19C_form__ctyp_attr_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* dst) { fx_free_list_simple(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t( struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* src, struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t( struct _fx_LN19C_form__ctyp_attr_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iN14C_form__ctyp_t(struct _fx_T2iN14C_form__ctyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2iN14C_form__ctyp_t(struct _fx_T2iN14C_form__ctyp_t* src, struct _fx_T2iN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iN14C_form__ctyp_t( int_ t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2iN14C_form__ctyp_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_N14C_form__ctyp_t(struct _fx_N14C_form__ctyp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 13: _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(&(*dst)->u.CTypStruct); break; case 14: _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(&(*dst)->u.CTypUnion); break; case 15: _fx_free_T2LN14C_form__ctyp_tN14C_form__ctyp_t(&(*dst)->u.CTypFunRawPtr); break; case 16: _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(&(*dst)->u.CTypRawPtr); break; case 17: _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(&(*dst)->u.CTypRawArray); break; case 18: _fx_free_T2iN14C_form__ctyp_t(&(*dst)->u.CTypArray); break; case 19: _fx_free_N14C_form__ctyp_t(&(*dst)->u.CTypVector); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t0); } static void _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__ctyp_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__klit_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_copy_N14K_form__klit_t(&src->t0, &dst->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14K_form__klit_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { _fx_copy_N14K_form__klit_t(t0, &fx_result->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_N14C_form__cexp_t(&dst->t2); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N17C_form__cbinary_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_N14C_form__cexp_t_data_t* t2, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t3, struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N16C_form__cunary_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_N14C_form__cexp_t(&dst->t2); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_N14C_form__cexp_t_data_t* t2, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t3, struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_LN14C_form__cexp_t(struct _fx_LN14C_form__cexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14C_form__cexp_t, _fx_free_N14C_form__cexp_t); } static int _fx_cons_LN14C_form__cexp_t( struct _fx_N14C_form__cexp_t_data_t* hd, struct _fx_LN14C_form__cexp_t_data_t* tl, bool addref_tl, struct _fx_LN14C_form__cexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14C_form__cexp_t, FX_COPY_PTR); } static void _fx_free_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LN14C_form__cexp_t(&dst->t1); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LN14C_form__cexp_t_data_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_LN14C_form__cexp_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_LN14C_form__cexp_t_data_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_N14C_form__cexp_t(struct _fx_N14C_form__cexp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 1: _fx_free_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpIdent); break; case 2: _fx_free_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpLit); break; case 3: _fx_free_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( &(*dst)->u.CExpBinary); break; case 4: _fx_free_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpUnary); break; case 5: _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpMem); break; case 6: _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpArrow); break; case 7: _fx_free_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpCast); break; case 8: _fx_free_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpTernary); break; case 9: _fx_free_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpCall); break; case 10: _fx_free_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpInit); break; case 11: _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpTyp); break; case 12: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CExpCCode); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t(struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N14C_form__cexp_t(&dst->t0); } static void _fx_copy_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_copy_Nt6option1N14C_form__cexp_t(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_Nt6option1N14C_form__cexp_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* fx_result) { _fx_copy_Nt6option1N14C_form__cexp_t(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2LN15C_form__cstmt_tR10Ast__loc_t(struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_LN15C_form__cstmt_t(&dst->t0); } static void _fx_copy_T2LN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LN15C_form__cstmt_tR10Ast__loc_t( struct _fx_LN15C_form__cstmt_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2R9Ast__id_tN15C_form__cstmt_t(struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* dst) { _fx_free_N15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN15C_form__cstmt_t( struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* src, struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN15C_form__cstmt_t( struct _fx_R9Ast__id_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N15C_form__cstmt_t(&dst->t1); _fx_free_N15C_form__cstmt_t(&dst->t2); } static void _fx_copy_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; } static void _fx_make_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_N15C_form__cstmt_t_data_t* t2, struct _fx_R10Ast__loc_t* t3, struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = *t3; } static void _fx_free_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N14C_form__ctyp_t(&dst->t0); _fx_free_LN14C_form__cexp_t(&dst->t1); _fx_free_Nt6option1N14C_form__cexp_t(&dst->t2); _fx_free_LN14C_form__cexp_t(&dst->t3); _fx_free_N15C_form__cstmt_t(&dst->t4); } static void _fx_copy_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_copy_Nt6option1N14C_form__ctyp_t(&src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_Nt6option1N14C_form__cexp_t(&src->t2, &dst->t2); FX_COPY_PTR(src->t3, &dst->t3); FX_COPY_PTR(src->t4, &dst->t4); dst->t5 = src->t5; } static void _fx_make_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_Nt6option1N14C_form__ctyp_t* t0, struct _fx_LN14C_form__cexp_t_data_t* t1, struct _fx_Nt6option1N14C_form__cexp_t* t2, struct _fx_LN14C_form__cexp_t_data_t* t3, struct _fx_N15C_form__cstmt_t_data_t* t4, struct _fx_R10Ast__loc_t* t5, struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { _fx_copy_Nt6option1N14C_form__ctyp_t(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_Nt6option1N14C_form__cexp_t(t2, &fx_result->t2); FX_COPY_PTR(t3, &fx_result->t3); FX_COPY_PTR(t4, &fx_result->t4); fx_result->t5 = *t5; } static void _fx_free_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_N15C_form__cstmt_t(&dst->t0); _fx_free_N14C_form__cexp_t(&dst->t1); } static void _fx_copy_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_N15C_form__cstmt_t_data_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2LN14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* dst) { _fx_free_LN14C_form__cexp_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* src, struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_LN14C_form__cexp_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t, _fx_free_T2LN14C_form__cexp_tLN15C_form__cstmt_t); } static int _fx_cons_LT2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* hd, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t, _fx_copy_T2LN14C_form__cexp_tLN15C_form__cstmt_t); } static void _fx_free_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t0); _fx_free_Nt6option1N14C_form__cexp_t(&dst->t2); } static void _fx_copy_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; _fx_copy_Nt6option1N14C_form__cexp_t(&src->t2, &dst->t2); dst->t3 = src->t3; } static void _fx_make_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_N14C_form__ctyp_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_Nt6option1N14C_form__cexp_t* t2, struct _fx_R10Ast__loc_t* t3, struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; _fx_copy_Nt6option1N14C_form__cexp_t(t2, &fx_result->t2); fx_result->t3 = *t3; } static void _fx_free_T2N14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* src, struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2N14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t, _fx_free_T2N14C_form__cexp_tLN15C_form__cstmt_t); } static int _fx_cons_LT2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* hd, struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t, _fx_copy_T2N14C_form__cexp_tLN15C_form__cstmt_t); } static void _fx_free_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_LT2N14C_form__cexp_tLN15C_form__cstmt_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_N15C_form__cstmt_t(struct _fx_N15C_form__cstmt_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 2: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CComment); break; case 3: _fx_free_N14C_form__cexp_t(&(*dst)->u.CExp); break; case 6: _fx_free_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CStmtReturn); break; case 7: _fx_free_T2LN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtBlock); break; case 8: _fx_free_T2R9Ast__id_tN15C_form__cstmt_t(&(*dst)->u.CStmtSync); break; case 9: _fx_free_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtIf); break; case 12: _fx_free_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( &(*dst)->u.CStmtFor); break; case 13: _fx_free_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtWhile); break; case 14: _fx_free_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CStmtDoWhile); break; case 15: _fx_free_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtSwitch); break; case 16: _fx_free_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CDefVal); break; case 17: _fx_free_rR17C_form__cdeffun_t(&(*dst)->u.CDefFun); break; case 18: _fx_free_rR17C_form__cdeftyp_t(&(*dst)->u.CDefTyp); break; case 21: _fx_free_rR18C_form__cdefenum_t(&(*dst)->u.CDefEnum); break; case 22: _fx_free_rR23C_form__cdefinterface_t(&(*dst)->u.CDefInterface); break; case 23: _fx_free_rR19C_form__cdefmacro_t(&(*dst)->u.CMacroDef); break; case 25: _fx_free_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CMacroIf); break; case 26: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CMacroInclude); break; case 27: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CMacroPragma); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_R17C_form__cmodule_t(struct _fx_R17C_form__cmodule_t* dst) { fx_free_str(&dst->cmod_cname); _fx_free_LN15C_form__cstmt_t(&dst->cmod_ccode); _fx_free_R14Ast__pragmas_t(&dst->cmod_pragmas); } static void _fx_copy_R17C_form__cmodule_t(struct _fx_R17C_form__cmodule_t* src, struct _fx_R17C_form__cmodule_t* dst) { dst->cmod_name = src->cmod_name; fx_copy_str(&src->cmod_cname, &dst->cmod_cname); FX_COPY_PTR(src->cmod_ccode, &dst->cmod_ccode); dst->cmod_main = src->cmod_main; dst->cmod_recompile = src->cmod_recompile; dst->cmod_skip = src->cmod_skip; _fx_copy_R14Ast__pragmas_t(&src->cmod_pragmas, &dst->cmod_pragmas); } static void _fx_make_R17C_form__cmodule_t( struct _fx_R9Ast__id_t* r_cmod_name, fx_str_t* r_cmod_cname, struct _fx_LN15C_form__cstmt_t_data_t* r_cmod_ccode, bool r_cmod_main, bool r_cmod_recompile, bool r_cmod_skip, struct _fx_R14Ast__pragmas_t* r_cmod_pragmas, struct _fx_R17C_form__cmodule_t* fx_result) { fx_result->cmod_name = *r_cmod_name; fx_copy_str(r_cmod_cname, &fx_result->cmod_cname); FX_COPY_PTR(r_cmod_ccode, &fx_result->cmod_ccode); fx_result->cmod_main = r_cmod_main; fx_result->cmod_recompile = r_cmod_recompile; fx_result->cmod_skip = r_cmod_skip; _fx_copy_R14Ast__pragmas_t(r_cmod_pragmas, &fx_result->cmod_pragmas); } static void _fx_free_LR17C_form__cmodule_t(struct _fx_LR17C_form__cmodule_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LR17C_form__cmodule_t, _fx_free_R17C_form__cmodule_t); } static int _fx_cons_LR17C_form__cmodule_t( struct _fx_R17C_form__cmodule_t* hd, struct _fx_LR17C_form__cmodule_t_data_t* tl, bool addref_tl, struct _fx_LR17C_form__cmodule_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LR17C_form__cmodule_t, _fx_copy_R17C_form__cmodule_t); } static void _fx_free_T2LN14Lexer__token_tB(struct _fx_T2LN14Lexer__token_tB* dst) { _fx_free_LN14Lexer__token_t(&dst->t0); } static void _fx_copy_T2LN14Lexer__token_tB(struct _fx_T2LN14Lexer__token_tB* src, struct _fx_T2LN14Lexer__token_tB* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LN14Lexer__token_tB( struct _fx_LN14Lexer__token_t_data_t* t0, bool t1, struct _fx_T2LN14Lexer__token_tB* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_T2SB(struct _fx_T2SB* dst) { fx_free_str(&dst->t0); } static void _fx_copy_T2SB(struct _fx_T2SB* src, struct _fx_T2SB* dst) { fx_copy_str(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2SB(fx_str_t* t0, bool t1, struct _fx_T2SB* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_LT2SB(struct _fx_LT2SB_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2SB, _fx_free_T2SB); } static int _fx_cons_LT2SB(struct _fx_T2SB* hd, struct _fx_LT2SB_data_t* tl, bool addref_tl, struct _fx_LT2SB_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2SB, _fx_copy_T2SB); } static void _fx_free_T2SLS(struct _fx_T2SLS* dst) { fx_free_str(&dst->t0); _fx_free_LS(&dst->t1); } static void _fx_copy_T2SLS(struct _fx_T2SLS* src, struct _fx_T2SLS* dst) { fx_copy_str(&src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2SLS(fx_str_t* t0, struct _fx_LS_data_t* t1, struct _fx_T2SLS* fx_result) { fx_copy_str(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_Ta2LS(struct _fx_Ta2LS* dst) { _fx_free_LS(&dst->t0); _fx_free_LS(&dst->t1); } static void _fx_copy_Ta2LS(struct _fx_Ta2LS* src, struct _fx_Ta2LS* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_Ta2LS(struct _fx_LS_data_t* t0, struct _fx_LS_data_t* t1, struct _fx_Ta2LS* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iLi(struct _fx_T2iLi* dst) { fx_free_list_simple(&dst->t1); } static void _fx_copy_T2iLi(struct _fx_T2iLi* src, struct _fx_T2iLi* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iLi(int_ t0, struct _fx_Li_data_t* t1, struct _fx_T2iLi* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2iLi(struct _fx_LT2iLi_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2iLi, _fx_free_T2iLi); } static int _fx_cons_LT2iLi( struct _fx_T2iLi* hd, struct _fx_LT2iLi_data_t* tl, bool addref_tl, struct _fx_LT2iLi_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2iLi, _fx_copy_T2iLi); } static void _fx_free_rLi(struct _fx_rLi_data_t** dst) { FX_FREE_REF_IMPL(_fx_rLi, fx_free_list_simple); } static int _fx_make_rLi(struct _fx_Li_data_t* arg, struct _fx_rLi_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rLi, FX_COPY_PTR); } static void _fx_free_T3BBS(struct _fx_T3BBS* dst) { fx_free_str(&dst->t2); } static void _fx_copy_T3BBS(struct _fx_T3BBS* src, struct _fx_T3BBS* dst) { dst->t0 = src->t0; dst->t1 = src->t1; fx_copy_str(&src->t2, &dst->t2); } static void _fx_make_T3BBS(bool t0, bool t1, fx_str_t* t2, struct _fx_T3BBS* fx_result) { fx_result->t0 = t0; fx_result->t1 = t1; fx_copy_str(t2, &fx_result->t2); } static void _fx_free_T2LR17K_form__kmodule_tB(struct _fx_T2LR17K_form__kmodule_tB* dst) { _fx_free_LR17K_form__kmodule_t(&dst->t0); } static void _fx_copy_T2LR17K_form__kmodule_tB( struct _fx_T2LR17K_form__kmodule_tB* src, struct _fx_T2LR17K_form__kmodule_tB* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LR17K_form__kmodule_tB( struct _fx_LR17K_form__kmodule_t_data_t* t0, bool t1, struct _fx_T2LR17K_form__kmodule_tB* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_T2LR17C_form__cmodule_tB(struct _fx_T2LR17C_form__cmodule_tB* dst) { _fx_free_LR17C_form__cmodule_t(&dst->t0); } static void _fx_copy_T2LR17C_form__cmodule_tB( struct _fx_T2LR17C_form__cmodule_tB* src, struct _fx_T2LR17C_form__cmodule_tB* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LR17C_form__cmodule_tB( struct _fx_LR17C_form__cmodule_t_data_t* t0, bool t1, struct _fx_T2LR17C_form__cmodule_tB* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_Ta9S(struct _fx_Ta9S* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); fx_free_str(&dst->t2); fx_free_str(&dst->t3); fx_free_str(&dst->t4); fx_free_str(&dst->t5); fx_free_str(&dst->t6); fx_free_str(&dst->t7); fx_free_str(&dst->t8); } static void _fx_copy_Ta9S(struct _fx_Ta9S* src, struct _fx_Ta9S* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); fx_copy_str(&src->t2, &dst->t2); fx_copy_str(&src->t3, &dst->t3); fx_copy_str(&src->t4, &dst->t4); fx_copy_str(&src->t5, &dst->t5); fx_copy_str(&src->t6, &dst->t6); fx_copy_str(&src->t7, &dst->t7); fx_copy_str(&src->t8, &dst->t8); } static void _fx_make_Ta9S( fx_str_t* t0, fx_str_t* t1, fx_str_t* t2, fx_str_t* t3, fx_str_t* t4, fx_str_t* t5, fx_str_t* t6, fx_str_t* t7, fx_str_t* t8, struct _fx_Ta9S* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); fx_copy_str(t2, &fx_result->t2); fx_copy_str(t3, &fx_result->t3); fx_copy_str(t4, &fx_result->t4); fx_copy_str(t5, &fx_result->t5); fx_copy_str(t6, &fx_result->t6); fx_copy_str(t7, &fx_result->t7); fx_copy_str(t8, &fx_result->t8); } static void _fx_free_Ta2S(struct _fx_Ta2S* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); } static void _fx_copy_Ta2S(struct _fx_Ta2S* src, struct _fx_Ta2S* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_Ta2S(fx_str_t* t0, fx_str_t* t1, struct _fx_Ta2S* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); } static void _fx_free_Ta3S(struct _fx_Ta3S* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); fx_free_str(&dst->t2); } static void _fx_copy_Ta3S(struct _fx_Ta3S* src, struct _fx_Ta3S* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); fx_copy_str(&src->t2, &dst->t2); } static void _fx_make_Ta3S(fx_str_t* t0, fx_str_t* t1, fx_str_t* t2, struct _fx_Ta3S* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); fx_copy_str(t2, &fx_result->t2); } static void _fx_free_Ta4S(struct _fx_Ta4S* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); fx_free_str(&dst->t2); fx_free_str(&dst->t3); } static void _fx_copy_Ta4S(struct _fx_Ta4S* src, struct _fx_Ta4S* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); fx_copy_str(&src->t2, &dst->t2); fx_copy_str(&src->t3, &dst->t3); } static void _fx_make_Ta4S(fx_str_t* t0, fx_str_t* t1, fx_str_t* t2, fx_str_t* t3, struct _fx_Ta4S* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); fx_copy_str(t2, &fx_result->t2); fx_copy_str(t3, &fx_result->t3); } static void _fx_free_T5BBLSBS(struct _fx_T5BBLSBS* dst) { _fx_free_LS(&dst->t2); fx_free_str(&dst->t4); } static void _fx_copy_T5BBLSBS(struct _fx_T5BBLSBS* src, struct _fx_T5BBLSBS* dst) { dst->t0 = src->t0; dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; fx_copy_str(&src->t4, &dst->t4); } static void _fx_make_T5BBLSBS(bool t0, bool t1, struct _fx_LS_data_t* t2, bool t3, fx_str_t* t4, struct _fx_T5BBLSBS* fx_result) { fx_result->t0 = t0; fx_result->t1 = t1; FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = t3; fx_copy_str(t4, &fx_result->t4); } static void _fx_free_T5BBLSBLS(struct _fx_T5BBLSBLS* dst) { _fx_free_LS(&dst->t2); _fx_free_LS(&dst->t4); } static void _fx_copy_T5BBLSBLS(struct _fx_T5BBLSBLS* src, struct _fx_T5BBLSBLS* dst) { dst->t0 = src->t0; dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; FX_COPY_PTR(src->t4, &dst->t4); } static void _fx_make_T5BBLSBLS( bool t0, bool t1, struct _fx_LS_data_t* t2, bool t3, struct _fx_LS_data_t* t4, struct _fx_T5BBLSBLS* fx_result) { fx_result->t0 = t0; fx_result->t1 = t1; FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = t3; FX_COPY_PTR(t4, &fx_result->t4); } _fx_N14Lexer__token_t _fx_g14Compiler__FROM = { 20 }; _fx_N14Lexer__token_t _fx_g19Compiler__SEMICOLON = { 59 }; int _FX_EXN_E30Compiler__CumulativeParseError = 0; _fx_N20Compiler__msgcolor_t _fx_g16Compiler__MsgRed = { 1 }; _fx_N20Compiler__msgcolor_t _fx_g18Compiler__MsgGreen = { 2 }; _fx_N20Compiler__msgcolor_t _fx_g17Compiler__MsgBlue = { 3 }; bool _fx_g21Compiler__iscolorterm; fx_str_t _fx_g15Compiler__error = {0}; FX_EXTERN_C int _fx_F4joinS2SLS(fx_str_t* sep_0, struct _fx_LS_data_t* strs_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int_ _fx_M6StringFM4findi3SSi(fx_str_t* s, fx_str_t* part, int_ from_pos, void* fx_fv); FX_EXTERN_C int _fx_M3SysFM9colortermB0(bool* fx_result, void* fx_fv); FX_EXTERN_C int _fx_F6stringS1S(fx_str_t* a_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(struct _fx_R18Options__options_t _fx_g12Options__opt) FX_EXTERN_C int _fx_M8FilenameFM8basenameS1S(fx_str_t* path_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8FilenameFM16remove_extensionS1S(fx_str_t* path_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C bool _fx_F6__eq__B2SS(fx_str_t* a, fx_str_t* b, void* fx_fv); FX_EXTERN_C void _fx_M5LexerFM5IDENTN14Lexer__token_t2BS(bool arg0, fx_str_t* arg1, struct _fx_N14Lexer__token_t* fx_result); FX_EXTERN_C void _fx_M5LexerFM6IMPORTN14Lexer__token_t1B(bool arg0, struct _fx_N14Lexer__token_t* fx_result); FX_EXTERN_C void _fx_M5LexerFM4STARN14Lexer__token_t1B(bool arg0, struct _fx_N14Lexer__token_t* fx_result); FX_EXTERN_C int _fx_M3SysFM7getpathLS1S(fx_str_t* name_0, struct _fx_LS_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8FilenameFM6getcwdS0(fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(struct _fx_LS_data_t* _fx_g9Sys__argv) FX_EXTERN_C int _fx_M8FilenameFM9normalizeS2SS(fx_str_t* dir_0, fx_str_t* fname_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8FilenameFM7dirnameS1S(fx_str_t* path_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(int_ _fx_g15__ficus_major__) FX_EXTERN_C int _fx_F6stringS1i(int_ a, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(int_ _fx_g15__ficus_minor__) FX_EXTERN_C int _fx_M8FilenameFM6existsB1S(fx_str_t* name, bool* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM6get_idRM4id_t1S(fx_str_t* s_0, struct _fx_R9Ast__id_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM11find_modulei2RM4id_tS( struct _fx_R9Ast__id_t* mname_0, fx_str_t* mfname_0, int_* fx_result, void* fx_fv); FX_EXTERN_C_VAL(fx_arr_t _fx_g16Ast__all_modules) FX_EXTERN_C int _fx_M6ParserFM5parseB3iLN14Lexer__token_tLS( int_ m_idx_0, struct _fx_LN14Lexer__token_t_data_t* preamble_0, struct _fx_LS_data_t* inc_dirs_0, bool* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM10get_moduleN16Ast__defmodule_t1i( int_ m_0, struct _fx_N16Ast__defmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C_VAL(int _FX_EXN_E22LexerUtils__LexerError) FX_EXTERN_C void _fx_F12print_stringv1S(fx_str_t* a, void* fx_fv); FX_EXTERN_C_VAL(int _FX_EXN_E18Parser__ParseError) FX_EXTERN_C int _fx_M3AstFM6stringS1RM5loc_t(struct _fx_R10Ast__loc_t* loc_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_F6stringS1E(fx_exn_t* a, fx_str_t* fx_result, void* fx_fv); static int _fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi( int_ i_0, struct _fx_Li_data_t* visited_0, fx_arr_t* graph_0, fx_arr_t* processed_0, struct _fx_rLi_data_t* result_ref_0, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM15get_module_nameRM4id_t1i(int_ m_0, struct _fx_R9Ast__id_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM2ppS1RM4id_t(struct _fx_R9Ast__id_t* i_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_F9make_FailE1S(fx_str_t* arg0, fx_exn_t* fx_result); FX_EXTERN_C int _fx_M3SysFM5mkdirB2Si(fx_str_t* name, int_ permissions, bool* fx_result, void* fx_fv); FX_EXTERN_C_VAL(bool _fx_g10Sys__win32) FX_EXTERN_C int _fx_M8K_mangleFM12mangle_mnameS1S(fx_str_t* m_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M4K_ppFM16pp_top_to_stringS1LN14K_form__kexp_t( struct _fx_LN14K_form__kexp_t_data_t* code_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M4FileFM9read_utf8S1S(fx_str_t* fname, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(int FX_EXN_IOError) FX_EXTERN_C_VAL(int FX_EXN_FileOpenError) FX_EXTERN_C int _fx_M4FileFM10write_utf8v2SS(fx_str_t* fname, fx_str_t* text, void* fx_fv); FX_EXTERN_C int _fx_M3SysFM6removev1S(fx_str_t* name, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM10pr_verbosev1S(fx_str_t* str_0, void* fx_fv); FX_EXTERN_C int _fx_M6K_formFM9KExpCCodeN14K_form__kexp_t2ST2N14K_form__ktyp_tR10Ast__loc_t( fx_str_t* arg0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* arg1, struct _fx_N14K_form__kexp_t_data_t** fx_result); FX_EXTERN_C_VAL(struct _fx_LE_data_t* _fx_g21Ast__all_compile_errs) FX_EXTERN_C int _fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, bool initial_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M10K_annotateFM14annotate_typesLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M13K_copy_n_skipFM9copy_someLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M15K_remove_unusedFM21remove_unused_by_mainLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_mangleFM10mangle_allLR17K_form__kmodule_t2LR17K_form__kmodule_tB( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, bool final_mode_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_mangleFM13mangle_localsLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_mangleFM12demangle_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M13K_lift_simpleFM4liftLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M9K_tailrecFM17tailrec2loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M10K_loop_invFM18move_loop_invs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_inlineFM11inline_someLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M9K_flattenFM11flatten_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M12K_fuse_loopsFM14fuse_loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M10K_fast_idxFM23optimize_idx_checks_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M15K_cfold_dealiasFM13cfold_dealiasLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M18K_nothrow_wrappersFM25make_wrappers_for_nothrowLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M10K_freevarsFM21mutable_freevars2refsLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M11K_declosureFM13declosure_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M6K_liftFM8lift_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_inlineFM24find_recursive_funcs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M6C_formFM13init_all_idcsv0(void* fx_fv); FX_EXTERN_C int _fx_M9C_gen_stdFM14init_std_namesv0(void* fx_fv); FX_EXTERN_C int _fx_M10C_gen_codeFM13gen_ccode_allLR17C_form__cmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17C_form__cmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M20C_post_rename_localsFM13rename_localsLR17C_form__cmodule_t1LR17C_form__cmodule_t( struct _fx_LR17C_form__cmodule_t_data_t* cmods_0, struct _fx_LR17C_form__cmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M19C_post_adjust_declsFM12adjust_declsR17C_form__cmodule_t1R17C_form__cmodule_t( struct _fx_R17C_form__cmodule_t* cmod_0, struct _fx_R17C_form__cmodule_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(struct _fx_FPS1B _fx_g11Sys__osname) FX_EXTERN_C_VAL(bool _fx_g9Sys__unix) FX_EXTERN_C int _fx_M3SysFM6getenvS1S(fx_str_t* name, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(struct _fx_R9Ast__id_t _fx_g9Ast__noid) FX_EXTERN_C int _fx_M4C_ppFM20pprint_top_to_stringS1LN15C_form__cstmt_t( struct _fx_LN15C_form__cstmt_t_data_t* code_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3SysFM7commandi1S(fx_str_t* cmd, int_* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM8init_allv0(void* fx_fv); FX_EXTERN_C_VAL(fx_exn_t _fx_E30Compiler__CumulativeParseErrorv) FX_EXTERN_C_VAL(struct _fx_Li_data_t* _fx_g23Ast__all_modules_sorted) FX_EXTERN_C int _fx_M6Ast_ppFM10pprint_modv1N16Ast__defmodule_t(struct _fx_N16Ast__defmodule_t_data_t* dm_0, void* fx_fv); FX_EXTERN_C int _fx_M13Ast_typecheckFM9check_modv1i(int_ m_idx_0, void* fx_fv); FX_EXTERN_C int _fx_M6K_formFM13init_all_idksv0(void* fx_fv); FX_EXTERN_C int _fx_M11K_normalizeFM21normalize_all_modulesLR17K_form__kmodule_t1Li( struct _fx_Li_data_t* modules_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M4K_ppFM8pp_kmodsv1LR17K_form__kmodule_t(struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM17print_compile_errv1E(fx_exn_t* err_0, void* fx_fv); FX_EXTERN_C_VAL(int _FX_EXN_E4Fail) FX_EXTERN_C_VAL(int _FX_EXN_E17Ast__CompileError) fx_exn_info_t _fx_E30Compiler__CumulativeParseError_info = {0}; fx_exn_t _fx_E30Compiler__CumulativeParseErrorv = {0}; FX_EXTERN_C int_ _fx_M8CompilerFM6lengthi1LE(struct _fx_LE_data_t* l, void* fx_fv) { return fx_list_length(l); } FX_EXTERN_C int_ _fx_M8CompilerFM6lengthi1LS(struct _fx_LS_data_t* l, void* fx_fv) { return fx_list_length(l); } FX_EXTERN_C void _fx_M8CompilerFM5link2LN14Lexer__token_t2LN14Lexer__token_tLN14Lexer__token_t( struct _fx_LN14Lexer__token_t_data_t* l1, struct _fx_LN14Lexer__token_t_data_t* l2, struct _fx_LN14Lexer__token_t_data_t** fx_result, void* fx_fv) { fx_link_lists(l1, l2, fx_result); } FX_EXTERN_C void _fx_M8CompilerFM5link2LS2LSLS( struct _fx_LS_data_t* l1, struct _fx_LS_data_t* l2, struct _fx_LS_data_t** fx_result, void* fx_fv) { fx_link_lists(l1, l2, fx_result); } FX_EXTERN_C int _fx_M8CompilerFM7__add__LS2LSLS( struct _fx_LS_data_t* l1_0, struct _fx_LS_data_t* l2_0, struct _fx_LS_data_t** fx_result, void* fx_fv) { int fx_status = 0; if (l1_0 == 0) { FX_COPY_PTR(l2_0, fx_result); } else if (l2_0 == 0) { FX_COPY_PTR(l1_0, fx_result); } else { _fx_LS v_0 = 0; _fx_LS lstend_0 = 0; _fx_LS lst_0 = l1_0; for (; lst_0; lst_0 = lst_0->tl) { fx_str_t* x_0 = &lst_0->hd; _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(x_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(v_0, lstend_0, node_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_catch_1); } _fx_M8CompilerFM5link2LS2LSLS(v_0, l2_0, fx_result, 0); _fx_catch_1: ; if (v_0) { _fx_free_LS(&v_0); } } return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM5arrayA1R17C_form__cmodule_t1LR17C_form__cmodule_t( struct _fx_LR17C_form__cmodule_t_data_t* l_0, fx_arr_t* fx_result, void* fx_fv) { int fx_status = 0; _fx_R17C_form__cmodule_t* dstptr_0 = 0; _fx_LR17C_form__cmodule_t lst_0 = l_0; int_ len_0 = fx_list_length(lst_0); { const int_ shape_0[] = { len_0 }; FX_CALL( fx_make_arr(1, shape_0, sizeof(_fx_R17C_form__cmodule_t), (fx_free_t)_fx_free_R17C_form__cmodule_t, (fx_copy_t)_fx_copy_R17C_form__cmodule_t, 0, fx_result), _fx_cleanup); } dstptr_0 = (_fx_R17C_form__cmodule_t*)fx_result->data; for (; lst_0; lst_0 = lst_0->tl, dstptr_0++) { _fx_R17C_form__cmodule_t* x_0 = &lst_0->hd; _fx_copy_R17C_form__cmodule_t(x_0, dstptr_0); } _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM3revLS1LS(struct _fx_LS_data_t* l_0, struct _fx_LS_data_t** fx_result, void* fx_fv) { _fx_LS __fold_result___0 = 0; int fx_status = 0; _fx_LS lst_0 = l_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_LS r_0 = 0; fx_str_t* a_0 = &lst_0->hd; FX_COPY_PTR(__fold_result___0, &r_0); FX_CALL(_fx_cons_LS(a_0, r_0, false, &r_0), _fx_catch_0); _fx_free_LS(&__fold_result___0); FX_COPY_PTR(r_0, &__fold_result___0); _fx_catch_0: ; if (r_0) { _fx_free_LS(&r_0); } FX_CHECK_EXN(_fx_cleanup); } FX_COPY_PTR(__fold_result___0, fx_result); _fx_cleanup: ; if (__fold_result___0) { _fx_free_LS(&__fold_result___0); } return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM4joinS2SLS(fx_str_t* sep_0, struct _fx_LS_data_t* strs_0, fx_str_t* fx_result, void* fx_fv) { int fx_status = 0; FX_CALL(_fx_F4joinS2SLS(sep_0, strs_0, fx_result, 0), _fx_cleanup); _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM8containsB2SS(fx_str_t* s_0, fx_str_t* substr_0, bool* fx_result, void* fx_fv) { int fx_status = 0; int_ v_0 = _fx_M6StringFM4findi3SSi(s_0, substr_0, 0, 0); *fx_result = v_0 >= 0; return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS( struct _fx_N20Compiler__msgcolor_t* clr_0, fx_str_t* msg_0, fx_str_t* fx_result, void* fx_fv) { fx_str_t esc_0 = {0}; fx_str_t v_0 = {0}; fx_str_t v_1 = {0}; int fx_status = 0; if (_fx_g21Compiler__iscolorterm) { int tag_0 = clr_0->tag; if (tag_0 == 1) { fx_str_t slit_0 = FX_MAKE_STR(""); fx_copy_str(&slit_0, &esc_0); } else if (tag_0 == 2) { fx_str_t slit_1 = FX_MAKE_STR(""); fx_copy_str(&slit_1, &esc_0); } else if (tag_0 == 3) { fx_str_t slit_2 = FX_MAKE_STR(""); fx_copy_str(&slit_2, &esc_0); } else { fx_str_t slit_3 = FX_MAKE_STR(""); fx_copy_str(&slit_3, &esc_0); } FX_CHECK_EXN(_fx_cleanup); FX_CALL(_fx_F6stringS1S(&esc_0, &v_0, 0), _fx_cleanup); FX_CALL(_fx_F6stringS1S(msg_0, &v_1, 0), _fx_cleanup); fx_str_t slit_4 = FX_MAKE_STR(""); { const fx_str_t strs_0[] = { v_0, v_1, slit_4 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 3, fx_result), _fx_cleanup); } } else { fx_copy_str(msg_0, fx_result); } _fx_cleanup: ; FX_FREE_STR(&esc_0); FX_FREE_STR(&v_0); FX_FREE_STR(&v_1); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM12get_preambleLN14Lexer__token_t1S( fx_str_t* mfname_0, struct _fx_LN14Lexer__token_t_data_t** fx_result, void* fx_fv) { fx_str_t v_0 = {0}; fx_str_t bare_name_0 = {0}; _fx_T2LN14Lexer__token_tB __fold_result___0 = {0}; _fx_T2SB v_1 = {0}; _fx_T2SB v_2 = {0}; _fx_T2SB v_3 = {0}; _fx_T2SB v_4 = {0}; _fx_T2SB v_5 = {0}; _fx_T2SB v_6 = {0}; _fx_T2SB v_7 = {0}; _fx_LT2SB v_8 = 0; _fx_T2LN14Lexer__token_tB v_9 = {0}; int fx_status = 0; if (_fx_g12Options__opt.use_preamble) { FX_CALL(_fx_M8FilenameFM8basenameS1S(mfname_0, &v_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM16remove_extensionS1S(&v_0, &bare_name_0, 0), _fx_cleanup); _fx_make_T2LN14Lexer__token_tB(0, false, &__fold_result___0); fx_str_t slit_0 = FX_MAKE_STR("Builtins"); _fx_make_T2SB(&slit_0, true, &v_1); fx_str_t slit_1 = FX_MAKE_STR("Math"); _fx_make_T2SB(&slit_1, true, &v_2); fx_str_t slit_2 = FX_MAKE_STR("Array"); _fx_make_T2SB(&slit_2, true, &v_3); fx_str_t slit_3 = FX_MAKE_STR("List"); _fx_make_T2SB(&slit_3, false, &v_4); fx_str_t slit_4 = FX_MAKE_STR("Vector"); _fx_make_T2SB(&slit_4, false, &v_5); fx_str_t slit_5 = FX_MAKE_STR("Char"); _fx_make_T2SB(&slit_5, false, &v_6); fx_str_t slit_6 = FX_MAKE_STR("String"); _fx_make_T2SB(&slit_6, false, &v_7); FX_CALL(_fx_cons_LT2SB(&v_7, 0, true, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_6, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_5, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_4, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_3, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_2, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_1, v_8, false, &v_8), _fx_cleanup); _fx_LT2SB lst_0 = v_8; for (; lst_0; lst_0 = lst_0->tl) { fx_str_t mname_0 = {0}; _fx_T2LN14Lexer__token_tB v_10 = {0}; _fx_LN14Lexer__token_t preamble_0 = 0; _fx_T2LN14Lexer__token_tB v_11 = {0}; _fx_N14Lexer__token_t v_12 = {0}; _fx_N14Lexer__token_t v_13 = {0}; _fx_N14Lexer__token_t v_14 = {0}; _fx_LN14Lexer__token_t v_15 = 0; _fx_LN14Lexer__token_t v_16 = 0; _fx_N14Lexer__token_t v_17 = {0}; _fx_N14Lexer__token_t v_18 = {0}; _fx_LN14Lexer__token_t v_19 = 0; _fx_LN14Lexer__token_t v_20 = 0; _fx_T2SB* __pat___0 = &lst_0->hd; fx_copy_str(&__pat___0->t0, &mname_0); _fx_copy_T2LN14Lexer__token_tB(&__fold_result___0, &v_10); FX_COPY_PTR(v_10.t0, &preamble_0); bool found_0 = v_10.t1; if (found_0) { _fx_make_T2LN14Lexer__token_tB(preamble_0, found_0, &v_11); } else { bool v_21 = _fx_F6__eq__B2SS(&bare_name_0, &mname_0, 0); if (v_21) { _fx_make_T2LN14Lexer__token_tB(preamble_0, true, &v_11); } else if (__pat___0->t1) { _fx_M5LexerFM5IDENTN14Lexer__token_t2BS(true, &mname_0, &v_12); _fx_M5LexerFM6IMPORTN14Lexer__token_t1B(false, &v_13); _fx_M5LexerFM4STARN14Lexer__token_t1B(true, &v_14); FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g19Compiler__SEMICOLON, 0, true, &v_15), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_14, v_15, false, &v_15), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_13, v_15, false, &v_15), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_12, v_15, false, &v_15), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g14Compiler__FROM, v_15, false, &v_15), _fx_catch_4); if (preamble_0 == 0) { FX_COPY_PTR(v_15, &v_16); } else if (v_15 == 0) { FX_COPY_PTR(preamble_0, &v_16); } else { _fx_LN14Lexer__token_t v_22 = 0; _fx_LN14Lexer__token_t lstend_0 = 0; _fx_LN14Lexer__token_t lst_1 = preamble_0; for (; lst_1; lst_1 = lst_1->tl) { _fx_N14Lexer__token_t* x_0 = &lst_1->hd; _fx_LN14Lexer__token_t node_0 = 0; FX_CALL(_fx_cons_LN14Lexer__token_t(x_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(v_22, lstend_0, node_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_catch_1); } _fx_M8CompilerFM5link2LN14Lexer__token_t2LN14Lexer__token_tLN14Lexer__token_t(v_22, v_15, &v_16, 0); _fx_catch_1: ; if (v_22) { _fx_free_LN14Lexer__token_t(&v_22); } } FX_CHECK_EXN(_fx_catch_4); _fx_make_T2LN14Lexer__token_tB(v_16, false, &v_11); } else { _fx_M5LexerFM6IMPORTN14Lexer__token_t1B(true, &v_17); _fx_M5LexerFM5IDENTN14Lexer__token_t2BS(true, &mname_0, &v_18); FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g19Compiler__SEMICOLON, 0, true, &v_19), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_18, v_19, false, &v_19), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_17, v_19, false, &v_19), _fx_catch_4); if (preamble_0 == 0) { FX_COPY_PTR(v_19, &v_20); } else if (v_19 == 0) { FX_COPY_PTR(preamble_0, &v_20); } else { _fx_LN14Lexer__token_t v_23 = 0; _fx_LN14Lexer__token_t lstend_1 = 0; _fx_LN14Lexer__token_t lst_2 = preamble_0; for (; lst_2; lst_2 = lst_2->tl) { _fx_N14Lexer__token_t* x_1 = &lst_2->hd; _fx_LN14Lexer__token_t node_1 = 0; FX_CALL(_fx_cons_LN14Lexer__token_t(x_1, 0, false, &node_1), _fx_catch_2); FX_LIST_APPEND(v_23, lstend_1, node_1); _fx_catch_2: ; FX_CHECK_EXN(_fx_catch_3); } _fx_M8CompilerFM5link2LN14Lexer__token_t2LN14Lexer__token_tLN14Lexer__token_t(v_23, v_19, &v_20, 0); _fx_catch_3: ; if (v_23) { _fx_free_LN14Lexer__token_t(&v_23); } } FX_CHECK_EXN(_fx_catch_4); _fx_make_T2LN14Lexer__token_tB(v_20, false, &v_11); } } _fx_free_T2LN14Lexer__token_tB(&__fold_result___0); _fx_copy_T2LN14Lexer__token_tB(&v_11, &__fold_result___0); _fx_catch_4: ; if (v_20) { _fx_free_LN14Lexer__token_t(&v_20); } if (v_19) { _fx_free_LN14Lexer__token_t(&v_19); } _fx_free_N14Lexer__token_t(&v_18); _fx_free_N14Lexer__token_t(&v_17); if (v_16) { _fx_free_LN14Lexer__token_t(&v_16); } if (v_15) { _fx_free_LN14Lexer__token_t(&v_15); } _fx_free_N14Lexer__token_t(&v_14); _fx_free_N14Lexer__token_t(&v_13); _fx_free_N14Lexer__token_t(&v_12); _fx_free_T2LN14Lexer__token_tB(&v_11); if (preamble_0) { _fx_free_LN14Lexer__token_t(&preamble_0); } _fx_free_T2LN14Lexer__token_tB(&v_10); FX_FREE_STR(&mname_0); FX_CHECK_EXN(_fx_cleanup); } _fx_copy_T2LN14Lexer__token_tB(&__fold_result___0, &v_9); FX_COPY_PTR(v_9.t0, fx_result); } _fx_cleanup: ; FX_FREE_STR(&v_0); FX_FREE_STR(&bare_name_0); _fx_free_T2LN14Lexer__token_tB(&__fold_result___0); _fx_free_T2SB(&v_1); _fx_free_T2SB(&v_2); _fx_free_T2SB(&v_3); _fx_free_T2SB(&v_4); _fx_free_T2SB(&v_5); _fx_free_T2SB(&v_6); _fx_free_T2SB(&v_7); if (v_8) { _fx_free_LT2SB(&v_8); } _fx_free_T2LN14Lexer__token_tB(&v_9); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM15find_ficus_dirsT2SLS0(struct _fx_T2SLS* fx_result, void* fx_fv) { _fx_LS ficus_path_0 = 0; fx_str_t v_0 = {0}; fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; fx_str_t ficus_app_path_0 = {0}; fx_str_t v_3 = {0}; fx_str_t ficus_pp_path_0 = {0}; fx_str_t v_4 = {0}; fx_str_t v_5 = {0}; fx_str_t v_6 = {0}; fx_str_t v_7 = {0}; fx_str_t ficus_inst_path_0 = {0}; fx_str_t v_8 = {0}; fx_str_t v_9 = {0}; fx_str_t v_10 = {0}; fx_str_t v_11 = {0}; _fx_LS v_12 = 0; _fx_LS std_ficus_path_0 = 0; _fx_Ta2LS v_13 = {0}; _fx_LS search_path_0 = 0; fx_str_t found_0 = {0}; int fx_status = 0; fx_str_t slit_0 = FX_MAKE_STR("FICUS_PATH"); FX_CALL(_fx_M3SysFM7getpathLS1S(&slit_0, &ficus_path_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM6getcwdS0(&v_0, 0), _fx_cleanup); if (_fx_g9Sys__argv != 0) { fx_copy_str(&_fx_g9Sys__argv->hd, &v_1); } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_cleanup); } FX_CHECK_EXN(_fx_cleanup); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_0, &v_1, &v_2, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&v_2, &ficus_app_path_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&ficus_app_path_0, &v_3, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&v_3, &ficus_pp_path_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&ficus_app_path_0, &v_4, 0), _fx_cleanup); FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_major__, &v_5, 0), _fx_cleanup); FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_minor__, &v_6, 0), _fx_cleanup); fx_str_t slit_1 = FX_MAKE_STR("lib/ficus-"); fx_str_t slit_2 = FX_MAKE_STR("."); { const fx_str_t strs_0[] = { slit_1, v_5, slit_2, v_6 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 4, &v_7), _fx_cleanup); } FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_4, &v_7, &ficus_inst_path_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&ficus_app_path_0, &v_8, 0), _fx_cleanup); fx_str_t slit_3 = FX_MAKE_STR("lib"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_8, &slit_3, &v_9, 0), _fx_cleanup); fx_str_t slit_4 = FX_MAKE_STR("lib"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&ficus_pp_path_0, &slit_4, &v_10, 0), _fx_cleanup); fx_str_t slit_5 = FX_MAKE_STR("lib"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&ficus_inst_path_0, &slit_5, &v_11, 0), _fx_cleanup); FX_CALL(_fx_cons_LS(&v_11, 0, true, &v_12), _fx_cleanup); FX_CALL(_fx_cons_LS(&v_10, v_12, false, &v_12), _fx_cleanup); FX_CALL(_fx_cons_LS(&v_9, v_12, true, &std_ficus_path_0), _fx_cleanup); int_ std_ficus_path_len_0 = _fx_M8CompilerFM6lengthi1LS(std_ficus_path_0, 0); _fx_make_Ta2LS(std_ficus_path_0, ficus_path_0, &v_13); if (v_13.t0 == 0) { FX_COPY_PTR(ficus_path_0, &search_path_0); } else if (v_13.t1 == 0) { FX_COPY_PTR(std_ficus_path_0, &search_path_0); } else { _fx_LS v_14 = 0; _fx_LS lstend_0 = 0; _fx_LS lst_0 = std_ficus_path_0; for (; lst_0; lst_0 = lst_0->tl) { fx_str_t* x_0 = &lst_0->hd; _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(x_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(v_14, lstend_0, node_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_catch_1); } _fx_M8CompilerFM5link2LS2LSLS(v_14, ficus_path_0, &search_path_0, 0); _fx_catch_1: ; if (v_14) { _fx_free_LS(&v_14); } } FX_CHECK_EXN(_fx_cleanup); fx_str_t slit_6 = FX_MAKE_STR(""); fx_copy_str(&slit_6, &found_0); int_ i_0 = 0; _fx_LS lst_1 = search_path_0; for (; lst_1; lst_1 = lst_1->tl, i_0 += 1) { fx_str_t builtins_fx_0 = {0}; fx_str_t ficus_h_0 = {0}; fx_str_t v_15 = {0}; _fx_LS v_16 = 0; _fx_Ta2LS v_17 = {0}; _fx_LS v_18 = 0; fx_str_t* d_0 = &lst_1->hd; fx_str_t slit_7 = FX_MAKE_STR("Builtins.fx"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(d_0, &slit_7, &builtins_fx_0, 0), _fx_catch_4); fx_str_t slit_8 = FX_MAKE_STR("../runtime/ficus/ficus.h"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(d_0, &slit_8, &ficus_h_0, 0), _fx_catch_4); bool v_19; bool res_0; FX_CALL(_fx_M8FilenameFM6existsB1S(&builtins_fx_0, &res_0, 0), _fx_catch_4); if (res_0) { FX_CALL(_fx_M8FilenameFM6existsB1S(&ficus_h_0, &v_19, 0), _fx_catch_4); } else { v_19 = false; } if (v_19) { FX_CALL(_fx_M8FilenameFM7dirnameS1S(d_0, &v_15, 0), _fx_catch_4); FX_FREE_STR(&found_0); fx_copy_str(&v_15, &found_0); if (i_0 < std_ficus_path_len_0) { FX_CALL(_fx_cons_LS(d_0, 0, true, &v_16), _fx_catch_4); _fx_make_Ta2LS(ficus_path_0, v_16, &v_17); if (v_17.t0 == 0) { FX_COPY_PTR(v_16, &v_18); } else if (v_17.t1 == 0) { FX_COPY_PTR(ficus_path_0, &v_18); } else { _fx_LS v_20 = 0; _fx_LS lstend_1 = 0; _fx_LS lst_2 = ficus_path_0; for (; lst_2; lst_2 = lst_2->tl) { fx_str_t* x_1 = &lst_2->hd; _fx_LS node_1 = 0; FX_CALL(_fx_cons_LS(x_1, 0, false, &node_1), _fx_catch_2); FX_LIST_APPEND(v_20, lstend_1, node_1); _fx_catch_2: ; FX_CHECK_EXN(_fx_catch_3); } _fx_M8CompilerFM5link2LS2LSLS(v_20, v_16, &v_18, 0); _fx_catch_3: ; if (v_20) { _fx_free_LS(&v_20); } } FX_CHECK_EXN(_fx_catch_4); _fx_free_LS(&ficus_path_0); FX_COPY_PTR(v_18, &ficus_path_0); } FX_BREAK(_fx_catch_4); } _fx_catch_4: ; if (v_18) { _fx_free_LS(&v_18); } _fx_free_Ta2LS(&v_17); if (v_16) { _fx_free_LS(&v_16); } FX_FREE_STR(&v_15); FX_FREE_STR(&ficus_h_0); FX_FREE_STR(&builtins_fx_0); FX_CHECK_BREAK(); FX_CHECK_EXN(_fx_cleanup); } _fx_make_T2SLS(&found_0, ficus_path_0, fx_result); _fx_cleanup: ; if (ficus_path_0) { _fx_free_LS(&ficus_path_0); } FX_FREE_STR(&v_0); FX_FREE_STR(&v_1); FX_FREE_STR(&v_2); FX_FREE_STR(&ficus_app_path_0); FX_FREE_STR(&v_3); FX_FREE_STR(&ficus_pp_path_0); FX_FREE_STR(&v_4); FX_FREE_STR(&v_5); FX_FREE_STR(&v_6); FX_FREE_STR(&v_7); FX_FREE_STR(&ficus_inst_path_0); FX_FREE_STR(&v_8); FX_FREE_STR(&v_9); FX_FREE_STR(&v_10); FX_FREE_STR(&v_11); if (v_12) { _fx_free_LS(&v_12); } if (std_ficus_path_0) { _fx_free_LS(&std_ficus_path_0); } _fx_free_Ta2LS(&v_13); if (search_path_0) { _fx_free_LS(&search_path_0); } FX_FREE_STR(&found_0); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM9parse_allB2SLS( fx_str_t* fname0_0, struct _fx_LS_data_t* ficus_path_0, bool* fx_result, void* fx_fv) { fx_str_t cwd_0 = {0}; fx_str_t fname0_1 = {0}; fx_str_t dir0_0 = {0}; _fx_LS inc_dirs0_0 = 0; _fx_LS v_0 = 0; _fx_LS v_1 = 0; _fx_LS inc_dirs0_1 = 0; _fx_LS inc_dirs0_2 = 0; _fx_LS inc_dirs0_3 = 0; fx_str_t v_2 = {0}; fx_str_t v_3 = {0}; _fx_Li queue_0 = 0; int fx_status = 0; FX_CALL(_fx_M8FilenameFM6getcwdS0(&cwd_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&cwd_0, fname0_0, &fname0_1, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&fname0_1, &dir0_0, 0), _fx_cleanup); bool v_4 = _fx_F6__eq__B2SS(&dir0_0, &cwd_0, 0); if (v_4) { FX_CALL(_fx_cons_LS(&cwd_0, 0, true, &inc_dirs0_0), _fx_cleanup); } else { FX_CALL(_fx_cons_LS(&cwd_0, 0, true, &v_0), _fx_cleanup); FX_CALL(_fx_cons_LS(&dir0_0, v_0, true, &inc_dirs0_0), _fx_cleanup); } FX_COPY_PTR(_fx_g12Options__opt.include_path, &v_1); if (inc_dirs0_0 == 0) { FX_COPY_PTR(v_1, &inc_dirs0_1); } else if (v_1 == 0) { FX_COPY_PTR(inc_dirs0_0, &inc_dirs0_1); } else { _fx_LS v_5 = 0; _fx_LS lstend_0 = 0; _fx_LS lst_0 = inc_dirs0_0; for (; lst_0; lst_0 = lst_0->tl) { fx_str_t* x_0 = &lst_0->hd; _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(x_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(v_5, lstend_0, node_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_catch_1); } _fx_M8CompilerFM5link2LS2LSLS(v_5, v_1, &inc_dirs0_1, 0); _fx_catch_1: ; if (v_5) { _fx_free_LS(&v_5); } } FX_CHECK_EXN(_fx_cleanup); if (inc_dirs0_1 == 0) { FX_COPY_PTR(ficus_path_0, &inc_dirs0_2); } else if (ficus_path_0 == 0) { FX_COPY_PTR(inc_dirs0_1, &inc_dirs0_2); } else { _fx_LS v_6 = 0; _fx_LS lstend_1 = 0; _fx_LS lst_1 = inc_dirs0_1; for (; lst_1; lst_1 = lst_1->tl) { fx_str_t* x_1 = &lst_1->hd; _fx_LS node_1 = 0; FX_CALL(_fx_cons_LS(x_1, 0, false, &node_1), _fx_catch_2); FX_LIST_APPEND(v_6, lstend_1, node_1); _fx_catch_2: ; FX_CHECK_EXN(_fx_catch_3); } _fx_M8CompilerFM5link2LS2LSLS(v_6, ficus_path_0, &inc_dirs0_2, 0); _fx_catch_3: ; if (v_6) { _fx_free_LS(&v_6); } } FX_CHECK_EXN(_fx_cleanup); _fx_LS lstend_2 = 0; _fx_LS lst_2 = inc_dirs0_2; for (; lst_2; lst_2 = lst_2->tl) { fx_str_t res_0 = {0}; fx_str_t* d_0 = &lst_2->hd; FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&cwd_0, d_0, &res_0, 0), _fx_catch_4); _fx_LS node_2 = 0; FX_CALL(_fx_cons_LS(&res_0, 0, false, &node_2), _fx_catch_4); FX_LIST_APPEND(inc_dirs0_3, lstend_2, node_2); _fx_catch_4: ; FX_FREE_STR(&res_0); FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_M8FilenameFM8basenameS1S(&fname0_1, &v_2, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM16remove_extensionS1S(&v_2, &v_3, 0), _fx_cleanup); _fx_R9Ast__id_t name0_id_0; FX_CALL(_fx_M3AstFM6get_idRM4id_t1S(&v_3, &name0_id_0, 0), _fx_cleanup); int_ m_idx_0; FX_CALL(_fx_M3AstFM11find_modulei2RM4id_tS(&name0_id_0, &fname0_1, &m_idx_0, 0), _fx_cleanup); FX_CALL(_fx_cons_Li(m_idx_0, 0, true, &queue_0), _fx_cleanup); bool ok_0 = true; while (queue_0 != 0) { _fx_Li v_7 = 0; _fx_N16Ast__defmodule_t minfo_0 = 0; fx_str_t mfname_0 = {0}; fx_exn_t exn_0 = {0}; int_ m_idx_1; if (queue_0 != 0) { m_idx_1 = queue_0->hd; } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_13); } FX_CHECK_EXN(_fx_catch_13); if (queue_0 != 0) { FX_COPY_PTR(queue_0->tl, &v_7); } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_13); } FX_CHECK_EXN(_fx_catch_13); FX_FREE_LIST_SIMPLE(&queue_0); FX_COPY_PTR(v_7, &queue_0); FX_CHKIDX(FX_CHKIDX1(_fx_g16Ast__all_modules, 0, m_idx_1), _fx_catch_13); FX_COPY_PTR(*FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, m_idx_1), &minfo_0); fx_copy_str(&minfo_0->u.defmodule_t.t1, &mfname_0); if (!minfo_0->u.defmodule_t.t7) { fx_str_t dir1_0 = {0}; _fx_LS v_8 = 0; _fx_LS inc_dirs_0 = 0; _fx_LN14Lexer__token_t preamble_0 = 0; _fx_Li v_9 = 0; _fx_Li __fold_result___0 = 0; _fx_Li v_10 = 0; FX_CHKIDX(FX_CHKIDX1(_fx_g16Ast__all_modules, 0, m_idx_1), _fx_catch_9); (*FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, m_idx_1))->u.defmodule_t.t7 = true; FX_CALL(_fx_M8FilenameFM7dirnameS1S(&mfname_0, &dir1_0, 0), _fx_catch_9); bool v_11 = _fx_F6__eq__B2SS(&dir1_0, &dir0_0, 0); if (!v_11) { FX_CALL(_fx_cons_LS(&dir1_0, 0, true, &v_8), _fx_catch_9); } if (v_8 == 0) { FX_COPY_PTR(inc_dirs0_3, &inc_dirs_0); } else if (inc_dirs0_3 == 0) { FX_COPY_PTR(v_8, &inc_dirs_0); } else { _fx_LS v_12 = 0; _fx_LS lstend_3 = 0; _fx_LS lst_3 = v_8; for (; lst_3; lst_3 = lst_3->tl) { fx_str_t* x_2 = &lst_3->hd; _fx_LS node_3 = 0; FX_CALL(_fx_cons_LS(x_2, 0, false, &node_3), _fx_catch_5); FX_LIST_APPEND(v_12, lstend_3, node_3); _fx_catch_5: ; FX_CHECK_EXN(_fx_catch_6); } _fx_M8CompilerFM5link2LS2LSLS(v_12, inc_dirs0_3, &inc_dirs_0, 0); _fx_catch_6: ; if (v_12) { _fx_free_LS(&v_12); } } FX_CHECK_EXN(_fx_catch_9); FX_CALL(_fx_M8CompilerFM12get_preambleLN14Lexer__token_t1S(&mfname_0, &preamble_0, 0), _fx_catch_9); bool v_13; FX_CALL(_fx_M6ParserFM5parseB3iLN14Lexer__token_tLS(m_idx_1, preamble_0, inc_dirs_0, &v_13, 0), _fx_catch_9); ok_0 = ok_0 && v_13; FX_CHKIDX(FX_CHKIDX1(_fx_g16Ast__all_modules, 0, m_idx_1), _fx_catch_9); FX_COPY_PTR((*FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, m_idx_1))->u.defmodule_t.t5, &v_9); _fx_Li lst_4 = v_9; for (; lst_4; lst_4 = lst_4->tl) { _fx_Li r_0 = 0; int_ a_0 = lst_4->hd; FX_COPY_PTR(__fold_result___0, &r_0); FX_CALL(_fx_cons_Li(a_0, r_0, false, &r_0), _fx_catch_7); FX_FREE_LIST_SIMPLE(&__fold_result___0); FX_COPY_PTR(r_0, &__fold_result___0); _fx_catch_7: ; FX_FREE_LIST_SIMPLE(&r_0); FX_CHECK_EXN(_fx_catch_9); } FX_COPY_PTR(__fold_result___0, &v_10); _fx_Li lst_5 = v_10; for (; lst_5; lst_5 = lst_5->tl) { _fx_N16Ast__defmodule_t dep_minfo_0 = 0; _fx_Li v_14 = 0; int_ dep_0 = lst_5->hd; FX_CALL(_fx_M3AstFM10get_moduleN16Ast__defmodule_t1i(dep_0, &dep_minfo_0, 0), _fx_catch_8); if (!dep_minfo_0->u.defmodule_t.t7) { FX_CALL(_fx_cons_Li(dep_0, queue_0, true, &v_14), _fx_catch_8); FX_FREE_LIST_SIMPLE(&queue_0); FX_COPY_PTR(v_14, &queue_0); } _fx_catch_8: ; FX_FREE_LIST_SIMPLE(&v_14); if (dep_minfo_0) { _fx_free_N16Ast__defmodule_t(&dep_minfo_0); } FX_CHECK_EXN(_fx_catch_9); } _fx_catch_9: ; FX_FREE_STR(&dir1_0); if (v_8) { _fx_free_LS(&v_8); } if (inc_dirs_0) { _fx_free_LS(&inc_dirs_0); } if (preamble_0) { _fx_free_LN14Lexer__token_t(&preamble_0); } FX_FREE_LIST_SIMPLE(&v_9); FX_FREE_LIST_SIMPLE(&__fold_result___0); FX_FREE_LIST_SIMPLE(&v_10); if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_0); fx_status = 0; int tag_0 = exn_0.tag; if (tag_0 == _FX_EXN_E22LexerUtils__LexerError) { fx_str_t v_15 = {0}; fx_str_t v_16 = {0}; fx_str_t v_17 = {0}; fx_str_t v_18 = {0}; fx_str_t v_19 = {0}; _fx_T2Ta2iS* vcase_0 = &FX_EXN_DATA(_fx_E22LexerUtils__LexerError_data_t, exn_0.data); _fx_Ta2i* v_20 = &vcase_0->t0; FX_CALL(_fx_F6stringS1S(&mfname_0, &v_15, 0), _fx_catch_10); FX_CALL(_fx_F6stringS1i(v_20->t0, &v_16, 0), _fx_catch_10); FX_CALL(_fx_F6stringS1i(v_20->t1, &v_17, 0), _fx_catch_10); FX_CALL(_fx_F6stringS1S(&vcase_0->t1, &v_18, 0), _fx_catch_10); fx_str_t slit_0 = FX_MAKE_STR(":"); fx_str_t slit_1 = FX_MAKE_STR(":"); fx_str_t slit_2 = FX_MAKE_STR(": error: "); fx_str_t slit_3 = FX_MAKE_STR("\n"); { const fx_str_t strs_0[] = { v_15, slit_0, v_16, slit_1, v_17, slit_2, v_18, slit_3 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 8, &v_19), _fx_catch_10); } _fx_F12print_stringv1S(&v_19, 0); fx_str_t slit_4 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_4, 0); ok_0 = false; _fx_catch_10: ; FX_FREE_STR(&v_19); FX_FREE_STR(&v_18); FX_FREE_STR(&v_17); FX_FREE_STR(&v_16); FX_FREE_STR(&v_15); } else if (tag_0 == _FX_EXN_E18Parser__ParseError) { fx_str_t v_21 = {0}; fx_str_t v_22 = {0}; fx_str_t v_23 = {0}; _fx_T2R10Ast__loc_tS* vcase_1 = &FX_EXN_DATA(_fx_E18Parser__ParseError_data_t, exn_0.data); FX_CALL(_fx_M3AstFM6stringS1RM5loc_t(&vcase_1->t0, &v_21, 0), _fx_catch_11); FX_CALL(_fx_F6stringS1S(&vcase_1->t1, &v_22, 0), _fx_catch_11); fx_str_t slit_5 = FX_MAKE_STR(": error: "); fx_str_t slit_6 = FX_MAKE_STR("\n"); { const fx_str_t strs_1[] = { v_21, slit_5, v_22, slit_6 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 4, &v_23), _fx_catch_11); } _fx_F12print_stringv1S(&v_23, 0); fx_str_t slit_7 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_7, 0); ok_0 = false; _fx_catch_11: ; FX_FREE_STR(&v_23); FX_FREE_STR(&v_22); FX_FREE_STR(&v_21); } else { fx_str_t v_24 = {0}; fx_str_t v_25 = {0}; fx_str_t v_26 = {0}; FX_CALL(_fx_F6stringS1S(&mfname_0, &v_24, 0), _fx_catch_12); FX_CALL(_fx_F6stringS1E(&exn_0, &v_25, 0), _fx_catch_12); fx_str_t slit_8 = FX_MAKE_STR(": exception "); fx_str_t slit_9 = FX_MAKE_STR(" occured"); { const fx_str_t strs_2[] = { v_24, slit_8, v_25, slit_9 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 4, &v_26), _fx_catch_12); } _fx_F12print_stringv1S(&v_26, 0); fx_str_t slit_10 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_10, 0); ok_0 = false; _fx_catch_12: ; FX_FREE_STR(&v_26); FX_FREE_STR(&v_25); FX_FREE_STR(&v_24); } FX_CHECK_EXN(_fx_catch_13); } } _fx_catch_13: ; fx_free_exn(&exn_0); FX_FREE_STR(&mfname_0); if (minfo_0) { _fx_free_N16Ast__defmodule_t(&minfo_0); } FX_FREE_LIST_SIMPLE(&v_7); FX_CHECK_EXN(_fx_cleanup); } *fx_result = ok_0; _fx_cleanup: ; FX_FREE_STR(&cwd_0); FX_FREE_STR(&fname0_1); FX_FREE_STR(&dir0_0); if (inc_dirs0_0) { _fx_free_LS(&inc_dirs0_0); } if (v_0) { _fx_free_LS(&v_0); } if (v_1) { _fx_free_LS(&v_1); } if (inc_dirs0_1) { _fx_free_LS(&inc_dirs0_1); } if (inc_dirs0_2) { _fx_free_LS(&inc_dirs0_2); } if (inc_dirs0_3) { _fx_free_LS(&inc_dirs0_3); } FX_FREE_STR(&v_2); FX_FREE_STR(&v_3); FX_FREE_LIST_SIMPLE(&queue_0); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM8toposortLi1LT2iLi( struct _fx_LT2iLi_data_t* graph_0, struct _fx_Li_data_t** fx_result, void* fx_fv) { fx_arr_t graph_1 = {0}; fx_arr_t processed_0 = {0}; _fx_rLi result_ref_0 = 0; _fx_Li __fold_result___0 = 0; _fx_Li result_0 = 0; int fx_status = 0; _fx_Li* dstptr_0 = 0; _fx_LT2iLi lst_0 = graph_0; int_ len_0 = fx_list_length(lst_0); { const int_ shape_0[] = { len_0 }; FX_CALL(fx_make_arr(1, shape_0, sizeof(_fx_Li), (fx_free_t)fx_free_list_simple, (fx_copy_t)fx_copy_ptr, 0, &graph_1), _fx_cleanup); } dstptr_0 = (_fx_Li*)graph_1.data; for (; lst_0; lst_0 = lst_0->tl, dstptr_0++) { _fx_T2iLi* __pat___0 = &lst_0->hd; FX_COPY_PTR(__pat___0->t1, dstptr_0); } int_ nvtx_0 = FX_ARR_SIZE(graph_1, 0); bool* dstptr_1 = 0; { const int_ shape_1[] = { nvtx_0 }; FX_CALL(fx_make_arr(1, shape_1, sizeof(bool), 0, 0, 0, &processed_0), _fx_cleanup); } dstptr_1 = (bool*)processed_0.data; for (int_ i_0 = 0; i_0 < nvtx_0; i_0++, dstptr_1++) { *dstptr_1 = false; } FX_CALL(_fx_make_rLi(0, &result_ref_0), _fx_cleanup); FX_CHKIDX_RANGE(FX_ARR_SIZE(processed_0, 0), 0, nvtx_0, 1, 1, 0, _fx_cleanup); for (int_ i_1 = 0; i_1 < nvtx_0; i_1++) { if (*FX_PTR_1D(bool, processed_0, i_1)) { FX_CONTINUE(_fx_catch_0); } FX_CALL(_fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi(i_1, 0, &graph_1, &processed_0, result_ref_0, 0), _fx_catch_0); _fx_catch_0: ; FX_CHECK_CONTINUE(); FX_CHECK_EXN(_fx_cleanup); } FX_COPY_PTR(result_ref_0->data, &result_0); _fx_Li lst_1 = result_0; for (; lst_1; lst_1 = lst_1->tl) { _fx_Li r_0 = 0; int_ a_0 = lst_1->hd; FX_COPY_PTR(__fold_result___0, &r_0); FX_CALL(_fx_cons_Li(a_0, r_0, false, &r_0), _fx_catch_1); FX_FREE_LIST_SIMPLE(&__fold_result___0); FX_COPY_PTR(r_0, &__fold_result___0); _fx_catch_1: ; FX_FREE_LIST_SIMPLE(&r_0); FX_CHECK_EXN(_fx_cleanup); } FX_COPY_PTR(__fold_result___0, fx_result); _fx_cleanup: ; FX_FREE_ARR(&graph_1); FX_FREE_ARR(&processed_0); if (result_ref_0) { _fx_free_rLi(&result_ref_0); } FX_FREE_LIST_SIMPLE(&__fold_result___0); FX_FREE_LIST_SIMPLE(&result_0); return fx_status; } static int _fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi( int_ i_0, struct _fx_Li_data_t* visited_0, fx_arr_t* graph_0, fx_arr_t* processed_0, struct _fx_rLi_data_t* result_ref_0, void* fx_fv) { _fx_Li deps_0 = 0; _fx_LS v_0 = 0; fx_str_t vlist_0 = {0}; fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; fx_exn_t v_3 = {0}; _fx_Li visited_1 = 0; _fx_Li v_4 = 0; int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); _fx_Li* result_0 = &result_ref_0->data; FX_CHKIDX(FX_CHKIDX1(*graph_0, 0, i_0), _fx_cleanup); FX_COPY_PTR(*FX_PTR_1D(_fx_Li, *graph_0, i_0), &deps_0); bool __fold_result___0 = false; _fx_Li lst_0 = visited_0; for (; lst_0; lst_0 = lst_0->tl) { int_ b_0 = lst_0->hd; if (i_0 == b_0) { __fold_result___0 = true; FX_BREAK(_fx_catch_0); } _fx_catch_0: ; FX_CHECK_BREAK(); FX_CHECK_EXN(_fx_cleanup); } if (__fold_result___0) { _fx_LS lstend_0 = 0; _fx_Li lst_1 = visited_0; for (; lst_1; lst_1 = lst_1->tl) { fx_str_t res_0 = {0}; int_ j_0 = lst_1->hd; _fx_R9Ast__id_t v_5; FX_CALL(_fx_M3AstFM15get_module_nameRM4id_t1i(j_0, &v_5, 0), _fx_catch_1); FX_CALL(_fx_M3AstFM2ppS1RM4id_t(&v_5, &res_0, 0), _fx_catch_1); _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(&res_0, 0, false, &node_0), _fx_catch_1); FX_LIST_APPEND(v_0, lstend_0, node_0); _fx_catch_1: ; FX_FREE_STR(&res_0); FX_CHECK_EXN(_fx_cleanup); } fx_str_t slit_0 = FX_MAKE_STR(", "); FX_CALL(_fx_F4joinS2SLS(&slit_0, v_0, &vlist_0, 0), _fx_cleanup); FX_CALL(_fx_F6stringS1S(&vlist_0, &v_1, 0), _fx_cleanup); fx_str_t slit_1 = FX_MAKE_STR("error: cyclib dependency between the modules: "); { const fx_str_t strs_0[] = { slit_1, v_1 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &v_2), _fx_cleanup); } FX_CALL(_fx_F9make_FailE1S(&v_2, &v_3), _fx_cleanup); FX_THROW(&v_3, true, _fx_cleanup); } FX_CALL(_fx_cons_Li(i_0, visited_0, true, &visited_1), _fx_cleanup); _fx_Li lst_2 = deps_0; for (; lst_2; lst_2 = lst_2->tl) { int_ j_1 = lst_2->hd; FX_CHKIDX(FX_CHKIDX1(*processed_0, 0, j_1), _fx_catch_2); if (*FX_PTR_1D(bool, *processed_0, j_1)) { FX_CONTINUE(_fx_catch_2); } FX_CALL(_fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi(j_1, visited_1, graph_0, processed_0, result_ref_0, 0), _fx_catch_2); _fx_catch_2: ; FX_CHECK_CONTINUE(); FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_cons_Li(i_0, *result_0, true, &v_4), _fx_cleanup); FX_FREE_LIST_SIMPLE(result_0); FX_COPY_PTR(v_4, result_0); FX_CHKIDX(FX_CHKIDX1(*processed_0, 0, i_0), _fx_cleanup); *FX_PTR_1D(bool, *processed_0, i_0) = true; _fx_cleanup: ; FX_FREE_LIST_SIMPLE(&deps_0); if (v_0) { _fx_free_LS(&v_0); } FX_FREE_STR(&vlist_0); FX_FREE_STR(&v_1); FX_FREE_STR(&v_2); fx_free_exn(&v_3); FX_FREE_LIST_SIMPLE(&visited_1); FX_FREE_LIST_SIMPLE(&v_4); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM11k_skip_someLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv) { fx_arr_t skip_flags_0 = {0}; fx_str_t build_root_dir_0 = {0}; fx_str_t build_dir_0 = {0}; fx_str_t obj_ext_0 = {0}; _fx_LR17K_form__kmodule_t kmods_1 = 0; fx_exn_t v_0 = {0}; int fx_status = 0; bool* dstptr_0 = 0; int_ v_1 = FX_ARR_SIZE(_fx_g16Ast__all_modules, 0); { const int_ shape_0[] = { v_1 }; FX_CALL(fx_make_arr(1, shape_0, sizeof(bool), 0, 0, 0, &skip_flags_0), _fx_cleanup); } dstptr_0 = (bool*)skip_flags_0.data; for (int_ i_0 = 0; i_0 < v_1; i_0++, dstptr_0++) { *dstptr_0 = false; } fx_copy_str(&_fx_g12Options__opt.build_rootdir, &build_root_dir_0); bool ok_0; FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_root_dir_0, 493, &ok_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.build_dir, &build_dir_0); bool ok_1; if (ok_0) { FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_dir_0, 493, &ok_1, 0), _fx_cleanup); } else { ok_1 = false; } if (_fx_g10Sys__win32) { fx_str_t slit_0 = FX_MAKE_STR(".obj"); fx_copy_str(&slit_0, &obj_ext_0); } else { fx_str_t slit_1 = FX_MAKE_STR(".o"); fx_copy_str(&slit_1, &obj_ext_0); } _fx_LR17K_form__kmodule_t lstend_0 = 0; _fx_LR17K_form__kmodule_t lst_0 = kmods_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_R14Ast__pragmas_t km_pragmas_0 = {0}; _fx_Li km_deps_0 = 0; _fx_LN14K_form__kexp_t km_top_0 = 0; fx_str_t km_cname_0 = {0}; fx_str_t ext_0 = {0}; fx_str_t mname_0 = {0}; fx_str_t cname_0 = {0}; fx_str_t k_filename_0 = {0}; fx_str_t c_filename_0 = {0}; fx_str_t o_filename_0 = {0}; fx_str_t new_kform_0 = {0}; fx_str_t old_kform_0 = {0}; fx_exn_t exn_0 = {0}; _fx_T3BBS v_2 = {0}; fx_exn_t exn_1 = {0}; fx_str_t v_3 = {0}; fx_str_t v_4 = {0}; fx_str_t v_5 = {0}; fx_str_t status_j_0 = {0}; fx_str_t status_j_1 = {0}; fx_str_t v_6 = {0}; fx_str_t v_7 = {0}; fx_str_t v_8 = {0}; fx_str_t v_9 = {0}; fx_str_t v_10 = {0}; _fx_R17K_form__kmodule_t rec_0 = {0}; _fx_R17K_form__kmodule_t* km_0 = &lst_0->hd; _fx_copy_R14Ast__pragmas_t(&km_0->km_pragmas, &km_pragmas_0); FX_COPY_PTR(km_0->km_deps, &km_deps_0); FX_COPY_PTR(km_0->km_top, &km_top_0); fx_copy_str(&km_0->km_cname, &km_cname_0); int_ km_idx_0 = km_0->km_idx; bool is_cpp_0; if (_fx_g12Options__opt.compile_by_cpp) { is_cpp_0 = true; } else { is_cpp_0 = km_pragmas_0.pragma_cpp; } if (is_cpp_0) { fx_str_t slit_2 = FX_MAKE_STR(".cpp"); fx_copy_str(&slit_2, &ext_0); } else { fx_str_t slit_3 = FX_MAKE_STR(".c"); fx_copy_str(&slit_3, &ext_0); } FX_CALL(_fx_M8K_mangleFM12mangle_mnameS1S(&km_cname_0, &mname_0, 0), _fx_catch_5); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&build_dir_0, &mname_0, &cname_0, 0), _fx_catch_5); fx_str_t slit_4 = FX_MAKE_STR(".k"); { const fx_str_t strs_0[] = { cname_0, slit_4 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &k_filename_0), _fx_catch_5); } { const fx_str_t strs_1[] = { cname_0, ext_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &c_filename_0), _fx_catch_5); } { const fx_str_t strs_2[] = { cname_0, obj_ext_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 2, &o_filename_0), _fx_catch_5); } FX_CALL(_fx_M4K_ppFM16pp_top_to_stringS1LN14K_form__kexp_t(km_top_0, &new_kform_0, 0), _fx_catch_5); bool have_k_0; FX_CALL(_fx_M8FilenameFM6existsB1S(&k_filename_0, &have_k_0, 0), _fx_catch_5); bool have_c_0; FX_CALL(_fx_M8FilenameFM6existsB1S(&c_filename_0, &have_c_0, 0), _fx_catch_5); bool have_o_0; FX_CALL(_fx_M8FilenameFM6existsB1S(&o_filename_0, &have_o_0, 0), _fx_catch_5); bool have_all_0 = have_k_0 && have_c_0 && have_o_0; bool t_0; if (_fx_g12Options__opt.force_rebuild) { t_0 = true; } else { t_0 = !have_all_0; } if (t_0) { fx_str_t slit_5 = FX_MAKE_STR(""); fx_copy_str(&slit_5, &old_kform_0); } else { FX_CALL(_fx_M4FileFM9read_utf8S1S(&k_filename_0, &old_kform_0, 0), _fx_catch_0); _fx_catch_0: ; if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_0); fx_status = 0; FX_FREE_STR(&old_kform_0); int tag_0 = exn_0.tag; bool res_0; if (tag_0 == FX_EXN_IOError) { res_0 = true; } else if (tag_0 == FX_EXN_FileOpenError) { res_0 = true; } else { res_0 = false; } FX_CHECK_EXN(_fx_catch_5); if (res_0) { fx_str_t slit_6 = FX_MAKE_STR(""); fx_copy_str(&slit_6, &old_kform_0); goto _fx_endmatch_0; } FX_RETHROW(&exn_0, _fx_catch_5); _fx_endmatch_0: ; FX_CHECK_EXN(_fx_catch_5); } } bool v_11 = _fx_F6__eq__B2SS(&new_kform_0, &old_kform_0, 0); if (v_11) { fx_str_t slit_7 = FX_MAKE_STR(""); _fx_make_T3BBS(true, true, &slit_7, &v_2); } else { bool well_written_0; FX_CALL(_fx_M4FileFM10write_utf8v2SS(&k_filename_0, &new_kform_0, 0), _fx_catch_1); well_written_0 = true; _fx_catch_1: ; if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_1); fx_status = 0; int tag_1 = exn_1.tag; bool res_1; if (tag_1 == FX_EXN_IOError) { res_1 = true; } else if (tag_1 == FX_EXN_FileOpenError) { res_1 = true; } else { res_1 = false; } FX_CHECK_EXN(_fx_catch_5); if (res_1) { well_written_0 = false; goto _fx_endmatch_1; } FX_RETHROW(&exn_1, _fx_catch_5); _fx_endmatch_1: ; FX_CHECK_EXN(_fx_catch_5); } if (well_written_0) { fx_str_t slit_8 = FX_MAKE_STR(""); fx_copy_str(&slit_8, &v_3); } else if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_9 = FX_MAKE_STR(""); FX_CALL(_fx_F6stringS1S(&slit_9, &v_4, 0), _fx_catch_5); fx_str_t slit_10 = FX_MAKE_STR("failed to write .k"); FX_CALL(_fx_F6stringS1S(&slit_10, &v_5, 0), _fx_catch_5); fx_str_t slit_11 = FX_MAKE_STR(""); { const fx_str_t strs_3[] = { v_4, v_5, slit_11 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 3, &v_3), _fx_catch_5); } } else { fx_str_t slit_12 = FX_MAKE_STR("failed to write .k"); fx_copy_str(&slit_12, &v_3); } _fx_make_T3BBS(well_written_0, false, &v_3, &v_2); } bool ok_j_0 = v_2.t0; bool same_kform_0 = v_2.t1; fx_copy_str(&v_2.t2, &status_j_0); ok_1 = ok_1 && ok_j_0; if (!same_kform_0) { if (have_c_0) { FX_CALL(_fx_M3SysFM6removev1S(&c_filename_0, 0), _fx_catch_5); } if (have_o_0) { FX_CALL(_fx_M3SysFM6removev1S(&o_filename_0, 0), _fx_catch_5); } } bool skip_module_0; if (same_kform_0) { bool __fold_result___0 = true; _fx_Li lst_1 = km_deps_0; for (; lst_1; lst_1 = lst_1->tl) { int_ d_0 = lst_1->hd; FX_CHKIDX(FX_CHKIDX1(skip_flags_0, 0, d_0), _fx_catch_2); if (!*FX_PTR_1D(bool, skip_flags_0, d_0)) { __fold_result___0 = false; FX_BREAK(_fx_catch_2); } _fx_catch_2: ; FX_CHECK_BREAK(); FX_CHECK_EXN(_fx_catch_5); } skip_module_0 = __fold_result___0; } else { skip_module_0 = false; } if (FX_STR_LENGTH(status_j_0) != 0) { fx_copy_str(&status_j_0, &status_j_1); } else if (skip_module_0) { fx_str_t slit_13 = FX_MAKE_STR("skip"); fx_copy_str(&slit_13, &status_j_1); } else if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_14 = FX_MAKE_STR(""); FX_CALL(_fx_F6stringS1S(&slit_14, &v_6, 0), _fx_catch_5); fx_str_t slit_15 = FX_MAKE_STR("process"); FX_CALL(_fx_F6stringS1S(&slit_15, &v_7, 0), _fx_catch_5); fx_str_t slit_16 = FX_MAKE_STR(""); { const fx_str_t strs_4[] = { v_6, v_7, slit_16 }; FX_CALL(fx_strjoin(0, 0, 0, strs_4, 3, &status_j_1), _fx_catch_5); } } else { fx_str_t slit_17 = FX_MAKE_STR("process"); fx_copy_str(&slit_17, &status_j_1); } FX_CALL(_fx_F6stringS1S(&km_cname_0, &v_8, 0), _fx_catch_5); FX_CALL(_fx_F6stringS1S(&status_j_1, &v_9, 0), _fx_catch_5); fx_str_t slit_18 = FX_MAKE_STR("K "); fx_str_t slit_19 = FX_MAKE_STR(": "); { const fx_str_t strs_5[] = { slit_18, v_8, slit_19, v_9 }; FX_CALL(fx_strjoin(0, 0, 0, strs_5, 4, &v_10), _fx_catch_5); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_10, 0), _fx_catch_5); if (skip_module_0) { _fx_LN14K_form__kexp_t lst_2 = km_top_0; for (; lst_2; lst_2 = lst_2->tl) { _fx_N14K_form__kexp_t e_0 = lst_2->hd; if (FX_REC_VARIANT_TAG(e_0) == 32) { _fx_rR17K_form__kdeffun_t kf_0 = e_0->u.KDefFun; _fx_N17Ast__fun_constr_t v_12 = kf_0->data.kf_flags.fun_flag_ctor; if (v_12.tag == 1) { _fx_N14K_form__ktyp_t kf_rt_0 = 0; _fx_T2N14K_form__ktyp_tR10Ast__loc_t v_13 = {0}; _fx_N14K_form__kexp_t v_14 = 0; _fx_R17K_form__kdeffun_t v_15 = {0}; _fx_R17K_form__kdeffun_t* v_16 = &kf_0->data; _fx_R10Ast__loc_t kf_loc_0 = v_16->kf_loc; FX_COPY_PTR(v_16->kf_rt, &kf_rt_0); _fx_R16Ast__fun_flags_t kf_flags_0 = v_16->kf_flags; _fx_R17K_form__kdeffun_t* v_17 = &kf_0->data; _fx_make_T2N14K_form__ktyp_tR10Ast__loc_t(kf_rt_0, &kf_loc_0, &v_13); fx_str_t slit_20 = FX_MAKE_STR(""); FX_CALL(_fx_M6K_formFM9KExpCCodeN14K_form__kexp_t2ST2N14K_form__ktyp_tR10Ast__loc_t(&slit_20, &v_13, &v_14), _fx_catch_3); _fx_R16Ast__fun_flags_t v_18 = { kf_flags_0.fun_flag_pure, true, kf_flags_0.fun_flag_have_keywords, false, kf_flags_0.fun_flag_nothrow, kf_flags_0.fun_flag_really_nothrow, kf_flags_0.fun_flag_private, kf_flags_0.fun_flag_ctor, kf_flags_0.fun_flag_method_of, kf_flags_0.fun_flag_uses_fv, kf_flags_0.fun_flag_recursive, kf_flags_0.fun_flag_instance }; _fx_make_R17K_form__kdeffun_t(&v_17->kf_name, &v_17->kf_cname, v_17->kf_params, v_17->kf_rt, v_14, &v_18, &v_17->kf_closure, v_17->kf_scope, &v_17->kf_loc, &v_15); _fx_R17K_form__kdeffun_t* v_19 = &kf_0->data; _fx_free_R17K_form__kdeffun_t(v_19); _fx_copy_R17K_form__kdeffun_t(&v_15, v_19); _fx_catch_3: ; _fx_free_R17K_form__kdeffun_t(&v_15); if (v_14) { _fx_free_N14K_form__kexp_t(&v_14); } _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&v_13); if (kf_rt_0) { _fx_free_N14K_form__ktyp_t(&kf_rt_0); } goto _fx_endmatch_2; } } _fx_endmatch_2: ; FX_CHECK_EXN(_fx_catch_4); _fx_catch_4: ; FX_CHECK_EXN(_fx_catch_5); } } FX_CHKIDX(FX_CHKIDX1(skip_flags_0, 0, km_idx_0), _fx_catch_5); *FX_PTR_1D(bool, skip_flags_0, km_idx_0) = skip_module_0; _fx_make_R17K_form__kmodule_t(&km_0->km_name, km_0->km_idx, km_0->km_toposort_idx, &km_0->km_cname, km_0->km_top, km_0->km_deps, skip_module_0, km_0->km_main, &km_0->km_pragmas, &rec_0); _fx_LR17K_form__kmodule_t node_0 = 0; FX_CALL(_fx_cons_LR17K_form__kmodule_t(&rec_0, 0, false, &node_0), _fx_catch_5); FX_LIST_APPEND(kmods_1, lstend_0, node_0); _fx_catch_5: ; _fx_free_R17K_form__kmodule_t(&rec_0); FX_FREE_STR(&v_10); FX_FREE_STR(&v_9); FX_FREE_STR(&v_8); FX_FREE_STR(&v_7); FX_FREE_STR(&v_6); FX_FREE_STR(&status_j_1); FX_FREE_STR(&status_j_0); FX_FREE_STR(&v_5); FX_FREE_STR(&v_4); FX_FREE_STR(&v_3); fx_free_exn(&exn_1); _fx_free_T3BBS(&v_2); fx_free_exn(&exn_0); FX_FREE_STR(&old_kform_0); FX_FREE_STR(&new_kform_0); FX_FREE_STR(&o_filename_0); FX_FREE_STR(&c_filename_0); FX_FREE_STR(&k_filename_0); FX_FREE_STR(&cname_0); FX_FREE_STR(&mname_0); FX_FREE_STR(&ext_0); FX_FREE_STR(&km_cname_0); if (km_top_0) { _fx_free_LN14K_form__kexp_t(&km_top_0); } FX_FREE_LIST_SIMPLE(&km_deps_0); _fx_free_R14Ast__pragmas_t(&km_pragmas_0); FX_CHECK_EXN(_fx_cleanup); } if (!ok_1) { fx_str_t slit_21 = FX_MAKE_STR("failed to write some k-forms"); FX_CALL(_fx_F9make_FailE1S(&slit_21, &v_0), _fx_cleanup); FX_THROW(&v_0, true, _fx_cleanup); } FX_COPY_PTR(kmods_1, fx_result); _fx_cleanup: ; FX_FREE_ARR(&skip_flags_0); FX_FREE_STR(&build_root_dir_0); FX_FREE_STR(&build_dir_0); FX_FREE_STR(&obj_ext_0); if (kmods_1) { _fx_free_LR17K_form__kmodule_t(&kmods_1); } fx_free_exn(&v_0); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM14k_optimize_allT2LR17K_form__kmodule_tB1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_T2LR17K_form__kmodule_tB* fx_result, void* fx_fv) { _fx_LR17K_form__kmodule_t temp_kmods_0 = 0; fx_str_t v_0 = {0}; fx_str_t v_1 = {0}; _fx_LR17K_form__kmodule_t v_2 = 0; fx_str_t v_3 = {0}; fx_str_t v_4 = {0}; _fx_LR17K_form__kmodule_t v_5 = 0; fx_str_t v_6 = {0}; fx_str_t v_7 = {0}; _fx_LR17K_form__kmodule_t v_8 = 0; fx_str_t v_9 = {0}; fx_str_t v_10 = {0}; _fx_LR17K_form__kmodule_t v_11 = 0; fx_str_t v_12 = {0}; fx_str_t v_13 = {0}; _fx_LR17K_form__kmodule_t v_14 = 0; _fx_LR17K_form__kmodule_t v_15 = 0; _fx_LR17K_form__kmodule_t v_16 = 0; fx_str_t v_17 = {0}; fx_str_t v_18 = {0}; _fx_LR17K_form__kmodule_t v_19 = 0; fx_str_t v_20 = {0}; fx_str_t v_21 = {0}; _fx_LR17K_form__kmodule_t v_22 = 0; fx_str_t v_23 = {0}; fx_str_t v_24 = {0}; _fx_LR17K_form__kmodule_t v_25 = 0; fx_str_t v_26 = {0}; fx_str_t v_27 = {0}; _fx_LR17K_form__kmodule_t v_28 = 0; fx_str_t v_29 = {0}; fx_str_t v_30 = {0}; _fx_LR17K_form__kmodule_t v_31 = 0; fx_str_t v_32 = {0}; fx_str_t v_33 = {0}; _fx_LR17K_form__kmodule_t v_34 = 0; fx_str_t v_35 = {0}; fx_str_t v_36 = {0}; _fx_LR17K_form__kmodule_t v_37 = 0; fx_str_t v_38 = {0}; fx_str_t v_39 = {0}; _fx_LR17K_form__kmodule_t v_40 = 0; fx_str_t v_41 = {0}; fx_str_t v_42 = {0}; _fx_LR17K_form__kmodule_t v_43 = 0; fx_str_t v_44 = {0}; fx_str_t v_45 = {0}; _fx_LR17K_form__kmodule_t v_46 = 0; fx_str_t v_47 = {0}; fx_str_t v_48 = {0}; _fx_LR17K_form__kmodule_t v_49 = 0; int fx_status = 0; _fx_free_LE(&_fx_g21Ast__all_compile_errs); _fx_g21Ast__all_compile_errs = 0; int_ niters_0 = _fx_g12Options__opt.optim_iters; FX_COPY_PTR(kmods_0, &temp_kmods_0); fx_str_t slit_0 = FX_MAKE_STR("remove unused"); FX_CALL(_fx_F6stringS1S(&slit_0, &v_0, 0), _fx_cleanup); fx_str_t slit_1 = FX_MAKE_STR("\t"); { const fx_str_t strs_0[] = { slit_1, v_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &v_1), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_1, 0), _fx_cleanup); FX_CALL(_fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, true, &v_2, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_2, &temp_kmods_0); fx_str_t slit_2 = FX_MAKE_STR("annotate types"); FX_CALL(_fx_F6stringS1S(&slit_2, &v_3, 0), _fx_cleanup); fx_str_t slit_3 = FX_MAKE_STR("\t"); { const fx_str_t strs_1[] = { slit_3, v_3 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &v_4), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_4, 0), _fx_cleanup); FX_CALL(_fx_M10K_annotateFM14annotate_typesLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_5, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_5, &temp_kmods_0); fx_str_t slit_4 = FX_MAKE_STR("copy generic/inline functions"); FX_CALL(_fx_F6stringS1S(&slit_4, &v_6, 0), _fx_cleanup); fx_str_t slit_5 = FX_MAKE_STR("\t"); { const fx_str_t strs_2[] = { slit_5, v_6 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 2, &v_7), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_7, 0), _fx_cleanup); FX_CALL(_fx_M13K_copy_n_skipFM9copy_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_8, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_8, &temp_kmods_0); fx_str_t slit_6 = FX_MAKE_STR("remove unused by main"); FX_CALL(_fx_F6stringS1S(&slit_6, &v_9, 0), _fx_cleanup); fx_str_t slit_7 = FX_MAKE_STR("\t"); { const fx_str_t strs_3[] = { slit_7, v_9 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 2, &v_10), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_10, 0), _fx_cleanup); FX_CALL(_fx_M15K_remove_unusedFM21remove_unused_by_mainLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_11, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_11, &temp_kmods_0); fx_str_t slit_8 = FX_MAKE_STR("mangle & dump intermediate K-forms"); FX_CALL(_fx_F6stringS1S(&slit_8, &v_12, 0), _fx_cleanup); fx_str_t slit_9 = FX_MAKE_STR("\t"); { const fx_str_t strs_4[] = { slit_9, v_12 }; FX_CALL(fx_strjoin(0, 0, 0, strs_4, 2, &v_13), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_13, 0), _fx_cleanup); FX_CALL(_fx_M8K_mangleFM10mangle_allLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_14, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_14, &temp_kmods_0); FX_CALL(_fx_M8K_mangleFM13mangle_localsLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_15, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_15, &temp_kmods_0); FX_CALL(_fx_M8CompilerFM11k_skip_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_16, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_16, &temp_kmods_0); fx_str_t slit_10 = FX_MAKE_STR("demangle"); FX_CALL(_fx_F6stringS1S(&slit_10, &v_17, 0), _fx_cleanup); fx_str_t slit_11 = FX_MAKE_STR("\t"); { const fx_str_t strs_5[] = { slit_11, v_17 }; FX_CALL(fx_strjoin(0, 0, 0, strs_5, 2, &v_18), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_18, 0), _fx_cleanup); FX_CALL(_fx_M8K_mangleFM12demangle_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_19, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_19, &temp_kmods_0); int_ v_50 = niters_0 + 1; int_ n_0 = FX_LOOP_COUNT(1, v_50, 1); for (int_ i_0 = 0; i_0 < n_0; i_0++) { fx_str_t v_51 = {0}; fx_str_t v_52 = {0}; fx_str_t v_53 = {0}; fx_str_t v_54 = {0}; _fx_LR17K_form__kmodule_t v_55 = 0; fx_str_t v_56 = {0}; fx_str_t v_57 = {0}; _fx_LR17K_form__kmodule_t v_58 = 0; fx_str_t v_59 = {0}; fx_str_t v_60 = {0}; _fx_LR17K_form__kmodule_t v_61 = 0; fx_str_t v_62 = {0}; fx_str_t v_63 = {0}; _fx_LR17K_form__kmodule_t v_64 = 0; fx_str_t v_65 = {0}; fx_str_t v_66 = {0}; _fx_LR17K_form__kmodule_t v_67 = 0; fx_str_t v_68 = {0}; fx_str_t v_69 = {0}; _fx_LR17K_form__kmodule_t v_70 = 0; fx_str_t v_71 = {0}; fx_str_t v_72 = {0}; _fx_LR17K_form__kmodule_t v_73 = 0; fx_str_t v_74 = {0}; fx_str_t v_75 = {0}; _fx_LR17K_form__kmodule_t v_76 = 0; fx_str_t v_77 = {0}; fx_str_t v_78 = {0}; _fx_LR17K_form__kmodule_t v_79 = 0; int_ i_1 = 1 + i_0 * 1; FX_CALL(_fx_F6stringS1i(i_1, &v_51, 0), _fx_catch_0); fx_str_t slit_12 = FX_MAKE_STR("Optimization pass #"); fx_str_t slit_13 = FX_MAKE_STR(":"); { const fx_str_t strs_6[] = { slit_12, v_51, slit_13 }; FX_CALL(fx_strjoin(0, 0, 0, strs_6, 3, &v_52), _fx_catch_0); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_52, 0), _fx_catch_0); if (i_1 <= 2) { fx_str_t slit_14 = FX_MAKE_STR("simple lambda lifting"); FX_CALL(_fx_F6stringS1S(&slit_14, &v_53, 0), _fx_catch_0); fx_str_t slit_15 = FX_MAKE_STR("\t"); { const fx_str_t strs_7[] = { slit_15, v_53 }; FX_CALL(fx_strjoin(0, 0, 0, strs_7, 2, &v_54), _fx_catch_0); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_54, 0), _fx_catch_0); FX_CALL(_fx_M13K_lift_simpleFM4liftLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_55, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_55, &temp_kmods_0); } fx_str_t slit_16 = FX_MAKE_STR("tailrec"); FX_CALL(_fx_F6stringS1S(&slit_16, &v_56, 0), _fx_catch_0); fx_str_t slit_17 = FX_MAKE_STR("\t"); { const fx_str_t strs_8[] = { slit_17, v_56 }; FX_CALL(fx_strjoin(0, 0, 0, strs_8, 2, &v_57), _fx_catch_0); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_57, 0), _fx_catch_0); FX_CALL(_fx_M9K_tailrecFM17tailrec2loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_58, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_58, &temp_kmods_0); fx_str_t slit_18 = FX_MAKE_STR("loop inv"); FX_CALL(_fx_F6stringS1S(&slit_18, &v_59, 0), _fx_catch_0); fx_str_t slit_19 = FX_MAKE_STR("\t"); { const fx_str_t strs_9[] = { slit_19, v_59 }; FX_CALL(fx_strjoin(0, 0, 0, strs_9, 2, &v_60), _fx_catch_0); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_60, 0), _fx_catch_0); FX_CALL(_fx_M10K_loop_invFM18move_loop_invs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_61, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_61, &temp_kmods_0); fx_str_t slit_20 = FX_MAKE_STR("inline"); FX_CALL(_fx_F6stringS1S(&slit_20, &v_62, 0), _fx_catch_0); fx_str_t slit_21 = FX_MAKE_STR("\t"); { const fx_str_t strs_10[] = { slit_21, v_62 }; FX_CALL(fx_strjoin(0, 0, 0, strs_10, 2, &v_63), _fx_catch_0); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_63, 0), _fx_catch_0); if (_fx_g12Options__opt.inline_thresh > 0) { FX_CALL(_fx_M8K_inlineFM11inline_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_64, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_64, &temp_kmods_0); } fx_str_t slit_22 = FX_MAKE_STR("flatten"); FX_CALL(_fx_F6stringS1S(&slit_22, &v_65, 0), _fx_catch_0); fx_str_t slit_23 = FX_MAKE_STR("\t"); { const fx_str_t strs_11[] = { slit_23, v_65 }; FX_CALL(fx_strjoin(0, 0, 0, strs_11, 2, &v_66), _fx_catch_0); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_66, 0), _fx_catch_0); FX_CALL(_fx_M9K_flattenFM11flatten_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_67, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_67, &temp_kmods_0); fx_str_t slit_24 = FX_MAKE_STR("fuse loops"); FX_CALL(_fx_F6stringS1S(&slit_24, &v_68, 0), _fx_catch_0); fx_str_t slit_25 = FX_MAKE_STR("\t"); { const fx_str_t strs_12[] = { slit_25, v_68 }; FX_CALL(fx_strjoin(0, 0, 0, strs_12, 2, &v_69), _fx_catch_0); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_69, 0), _fx_catch_0); FX_CALL(_fx_M12K_fuse_loopsFM14fuse_loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_70, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_70, &temp_kmods_0); fx_str_t slit_26 = FX_MAKE_STR("fast idx"); FX_CALL(_fx_F6stringS1S(&slit_26, &v_71, 0), _fx_catch_0); fx_str_t slit_27 = FX_MAKE_STR("\t"); { const fx_str_t strs_13[] = { slit_27, v_71 }; FX_CALL(fx_strjoin(0, 0, 0, strs_13, 2, &v_72), _fx_catch_0); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_72, 0), _fx_catch_0); FX_CALL(_fx_M10K_fast_idxFM23optimize_idx_checks_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_73, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_73, &temp_kmods_0); fx_str_t slit_28 = FX_MAKE_STR("const folding"); FX_CALL(_fx_F6stringS1S(&slit_28, &v_74, 0), _fx_catch_0); fx_str_t slit_29 = FX_MAKE_STR("\t"); { const fx_str_t strs_14[] = { slit_29, v_74 }; FX_CALL(fx_strjoin(0, 0, 0, strs_14, 2, &v_75), _fx_catch_0); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_75, 0), _fx_catch_0); FX_CALL(_fx_M15K_cfold_dealiasFM13cfold_dealiasLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_76, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_76, &temp_kmods_0); fx_str_t slit_30 = FX_MAKE_STR("remove unused"); FX_CALL(_fx_F6stringS1S(&slit_30, &v_77, 0), _fx_catch_0); fx_str_t slit_31 = FX_MAKE_STR("\t"); { const fx_str_t strs_15[] = { slit_31, v_77 }; FX_CALL(fx_strjoin(0, 0, 0, strs_15, 2, &v_78), _fx_catch_0); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_78, 0), _fx_catch_0); FX_CALL( _fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_79, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_79, &temp_kmods_0); _fx_catch_0: ; if (v_79) { _fx_free_LR17K_form__kmodule_t(&v_79); } FX_FREE_STR(&v_78); FX_FREE_STR(&v_77); if (v_76) { _fx_free_LR17K_form__kmodule_t(&v_76); } FX_FREE_STR(&v_75); FX_FREE_STR(&v_74); if (v_73) { _fx_free_LR17K_form__kmodule_t(&v_73); } FX_FREE_STR(&v_72); FX_FREE_STR(&v_71); if (v_70) { _fx_free_LR17K_form__kmodule_t(&v_70); } FX_FREE_STR(&v_69); FX_FREE_STR(&v_68); if (v_67) { _fx_free_LR17K_form__kmodule_t(&v_67); } FX_FREE_STR(&v_66); FX_FREE_STR(&v_65); if (v_64) { _fx_free_LR17K_form__kmodule_t(&v_64); } FX_FREE_STR(&v_63); FX_FREE_STR(&v_62); if (v_61) { _fx_free_LR17K_form__kmodule_t(&v_61); } FX_FREE_STR(&v_60); FX_FREE_STR(&v_59); if (v_58) { _fx_free_LR17K_form__kmodule_t(&v_58); } FX_FREE_STR(&v_57); FX_FREE_STR(&v_56); if (v_55) { _fx_free_LR17K_form__kmodule_t(&v_55); } FX_FREE_STR(&v_54); FX_FREE_STR(&v_53); FX_FREE_STR(&v_52); FX_FREE_STR(&v_51); FX_CHECK_EXN(_fx_cleanup); } fx_str_t slit_32 = FX_MAKE_STR("Finalizing K-form:"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_32, 0), _fx_cleanup); fx_str_t slit_33 = FX_MAKE_STR("making wrappers for nothrow functions"); FX_CALL(_fx_F6stringS1S(&slit_33, &v_20, 0), _fx_cleanup); fx_str_t slit_34 = FX_MAKE_STR("\t"); { const fx_str_t strs_16[] = { slit_34, v_20 }; FX_CALL(fx_strjoin(0, 0, 0, strs_16, 2, &v_21), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_21, 0), _fx_cleanup); FX_CALL( _fx_M18K_nothrow_wrappersFM25make_wrappers_for_nothrowLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_22, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_22, &temp_kmods_0); fx_str_t slit_35 = FX_MAKE_STR("mutable freevars referencing"); FX_CALL(_fx_F6stringS1S(&slit_35, &v_23, 0), _fx_cleanup); fx_str_t slit_36 = FX_MAKE_STR("\t"); { const fx_str_t strs_17[] = { slit_36, v_23 }; FX_CALL(fx_strjoin(0, 0, 0, strs_17, 2, &v_24), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_24, 0), _fx_cleanup); FX_CALL(_fx_M10K_freevarsFM21mutable_freevars2refsLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_25, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_25, &temp_kmods_0); fx_str_t slit_37 = FX_MAKE_STR("declosuring"); FX_CALL(_fx_F6stringS1S(&slit_37, &v_26, 0), _fx_cleanup); fx_str_t slit_38 = FX_MAKE_STR("\t"); { const fx_str_t strs_18[] = { slit_38, v_26 }; FX_CALL(fx_strjoin(0, 0, 0, strs_18, 2, &v_27), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_27, 0), _fx_cleanup); FX_CALL(_fx_M11K_declosureFM13declosure_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_28, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_28, &temp_kmods_0); fx_str_t slit_39 = FX_MAKE_STR("lambda lifting"); FX_CALL(_fx_F6stringS1S(&slit_39, &v_29, 0), _fx_cleanup); fx_str_t slit_40 = FX_MAKE_STR("\t"); { const fx_str_t strs_19[] = { slit_40, v_29 }; FX_CALL(fx_strjoin(0, 0, 0, strs_19, 2, &v_30), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_30, 0), _fx_cleanup); FX_CALL(_fx_M6K_liftFM8lift_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_31, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_31, &temp_kmods_0); fx_str_t slit_41 = FX_MAKE_STR("flatten"); FX_CALL(_fx_F6stringS1S(&slit_41, &v_32, 0), _fx_cleanup); fx_str_t slit_42 = FX_MAKE_STR("\t"); { const fx_str_t strs_20[] = { slit_42, v_32 }; FX_CALL(fx_strjoin(0, 0, 0, strs_20, 2, &v_33), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_33, 0), _fx_cleanup); FX_CALL(_fx_M9K_flattenFM11flatten_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_34, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_34, &temp_kmods_0); fx_str_t slit_43 = FX_MAKE_STR("remove unused"); FX_CALL(_fx_F6stringS1S(&slit_43, &v_35, 0), _fx_cleanup); fx_str_t slit_44 = FX_MAKE_STR("\t"); { const fx_str_t strs_21[] = { slit_44, v_35 }; FX_CALL(fx_strjoin(0, 0, 0, strs_21, 2, &v_36), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_36, 0), _fx_cleanup); FX_CALL(_fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_37, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_37, &temp_kmods_0); fx_str_t slit_45 = FX_MAKE_STR("mangle"); FX_CALL(_fx_F6stringS1S(&slit_45, &v_38, 0), _fx_cleanup); fx_str_t slit_46 = FX_MAKE_STR("\t"); { const fx_str_t strs_22[] = { slit_46, v_38 }; FX_CALL(fx_strjoin(0, 0, 0, strs_22, 2, &v_39), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_39, 0), _fx_cleanup); FX_CALL(_fx_M8K_mangleFM10mangle_allLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, true, &v_40, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_40, &temp_kmods_0); fx_str_t slit_47 = FX_MAKE_STR("remove unused"); FX_CALL(_fx_F6stringS1S(&slit_47, &v_41, 0), _fx_cleanup); fx_str_t slit_48 = FX_MAKE_STR("\t"); { const fx_str_t strs_23[] = { slit_48, v_41 }; FX_CALL(fx_strjoin(0, 0, 0, strs_23, 2, &v_42), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_42, 0), _fx_cleanup); FX_CALL(_fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_43, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_43, &temp_kmods_0); fx_str_t slit_49 = FX_MAKE_STR("mark recursive"); FX_CALL(_fx_F6stringS1S(&slit_49, &v_44, 0), _fx_cleanup); fx_str_t slit_50 = FX_MAKE_STR("\t"); { const fx_str_t strs_24[] = { slit_50, v_44 }; FX_CALL(fx_strjoin(0, 0, 0, strs_24, 2, &v_45), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_45, 0), _fx_cleanup); FX_CALL(_fx_M8K_inlineFM24find_recursive_funcs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_46, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_46, &temp_kmods_0); fx_str_t slit_51 = FX_MAKE_STR("annotate types"); FX_CALL(_fx_F6stringS1S(&slit_51, &v_47, 0), _fx_cleanup); fx_str_t slit_52 = FX_MAKE_STR("\t"); { const fx_str_t strs_25[] = { slit_52, v_47 }; FX_CALL(fx_strjoin(0, 0, 0, strs_25, 2, &v_48), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_48, 0), _fx_cleanup); FX_CALL(_fx_M10K_annotateFM14annotate_typesLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_49, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_49, &temp_kmods_0); _fx_make_T2LR17K_form__kmodule_tB(temp_kmods_0, _fx_g21Ast__all_compile_errs == 0, fx_result); _fx_cleanup: ; if (temp_kmods_0) { _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); } FX_FREE_STR(&v_0); FX_FREE_STR(&v_1); if (v_2) { _fx_free_LR17K_form__kmodule_t(&v_2); } FX_FREE_STR(&v_3); FX_FREE_STR(&v_4); if (v_5) { _fx_free_LR17K_form__kmodule_t(&v_5); } FX_FREE_STR(&v_6); FX_FREE_STR(&v_7); if (v_8) { _fx_free_LR17K_form__kmodule_t(&v_8); } FX_FREE_STR(&v_9); FX_FREE_STR(&v_10); if (v_11) { _fx_free_LR17K_form__kmodule_t(&v_11); } FX_FREE_STR(&v_12); FX_FREE_STR(&v_13); if (v_14) { _fx_free_LR17K_form__kmodule_t(&v_14); } if (v_15) { _fx_free_LR17K_form__kmodule_t(&v_15); } if (v_16) { _fx_free_LR17K_form__kmodule_t(&v_16); } FX_FREE_STR(&v_17); FX_FREE_STR(&v_18); if (v_19) { _fx_free_LR17K_form__kmodule_t(&v_19); } FX_FREE_STR(&v_20); FX_FREE_STR(&v_21); if (v_22) { _fx_free_LR17K_form__kmodule_t(&v_22); } FX_FREE_STR(&v_23); FX_FREE_STR(&v_24); if (v_25) { _fx_free_LR17K_form__kmodule_t(&v_25); } FX_FREE_STR(&v_26); FX_FREE_STR(&v_27); if (v_28) { _fx_free_LR17K_form__kmodule_t(&v_28); } FX_FREE_STR(&v_29); FX_FREE_STR(&v_30); if (v_31) { _fx_free_LR17K_form__kmodule_t(&v_31); } FX_FREE_STR(&v_32); FX_FREE_STR(&v_33); if (v_34) { _fx_free_LR17K_form__kmodule_t(&v_34); } FX_FREE_STR(&v_35); FX_FREE_STR(&v_36); if (v_37) { _fx_free_LR17K_form__kmodule_t(&v_37); } FX_FREE_STR(&v_38); FX_FREE_STR(&v_39); if (v_40) { _fx_free_LR17K_form__kmodule_t(&v_40); } FX_FREE_STR(&v_41); FX_FREE_STR(&v_42); if (v_43) { _fx_free_LR17K_form__kmodule_t(&v_43); } FX_FREE_STR(&v_44); FX_FREE_STR(&v_45); if (v_46) { _fx_free_LR17K_form__kmodule_t(&v_46); } FX_FREE_STR(&v_47); FX_FREE_STR(&v_48); if (v_49) { _fx_free_LR17K_form__kmodule_t(&v_49); } return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM7k2c_allT2LR17C_form__cmodule_tB1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_T2LR17C_form__cmodule_tB* fx_result, void* fx_fv) { fx_str_t v_0 = {0}; fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; _fx_LR17C_form__cmodule_t cmods_0 = 0; fx_str_t v_3 = {0}; fx_str_t v_4 = {0}; fx_str_t v_5 = {0}; _fx_LR17C_form__cmodule_t cmods_1 = 0; _fx_LR17C_form__cmodule_t cmods_2 = 0; int fx_status = 0; if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_0 = FX_MAKE_STR(""); FX_CALL(_fx_F6stringS1S(&slit_0, &v_1, 0), _fx_cleanup); fx_str_t slit_1 = FX_MAKE_STR("Generating C code"); FX_CALL(_fx_F6stringS1S(&slit_1, &v_2, 0), _fx_cleanup); fx_str_t slit_2 = FX_MAKE_STR(""); { const fx_str_t strs_0[] = { v_1, v_2, slit_2 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 3, &v_0), _fx_cleanup); } } else { fx_str_t slit_3 = FX_MAKE_STR("Generating C code"); fx_copy_str(&slit_3, &v_0); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_0, 0), _fx_cleanup); _fx_free_LE(&_fx_g21Ast__all_compile_errs); _fx_g21Ast__all_compile_errs = 0; FX_CALL(_fx_M6C_formFM13init_all_idcsv0(0), _fx_cleanup); FX_CALL(_fx_M9C_gen_stdFM14init_std_namesv0(0), _fx_cleanup); FX_CALL(_fx_M10C_gen_codeFM13gen_ccode_allLR17C_form__cmodule_t1LR17K_form__kmodule_t(kmods_0, &cmods_0, 0), _fx_cleanup); if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_4 = FX_MAKE_STR(""); FX_CALL(_fx_F6stringS1S(&slit_4, &v_4, 0), _fx_cleanup); fx_str_t slit_5 = FX_MAKE_STR("C code generated"); FX_CALL(_fx_F6stringS1S(&slit_5, &v_5, 0), _fx_cleanup); fx_str_t slit_6 = FX_MAKE_STR(""); { const fx_str_t strs_1[] = { v_4, v_5, slit_6 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 3, &v_3), _fx_cleanup); } } else { fx_str_t slit_7 = FX_MAKE_STR("C code generated"); fx_copy_str(&slit_7, &v_3); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_3, 0), _fx_cleanup); FX_CALL(_fx_M20C_post_rename_localsFM13rename_localsLR17C_form__cmodule_t1LR17C_form__cmodule_t(cmods_0, &cmods_1, 0), _fx_cleanup); _fx_LR17C_form__cmodule_t lstend_0 = 0; _fx_LR17C_form__cmodule_t lst_0 = cmods_1; for (; lst_0; lst_0 = lst_0->tl) { _fx_R17C_form__cmodule_t t_0 = {0}; _fx_R17C_form__cmodule_t* cmod_0 = &lst_0->hd; bool is_cpp_0; if (_fx_g12Options__opt.compile_by_cpp) { is_cpp_0 = true; } else { is_cpp_0 = cmod_0->cmod_pragmas.pragma_cpp; } if (is_cpp_0) { FX_CALL(_fx_M19C_post_adjust_declsFM12adjust_declsR17C_form__cmodule_t1R17C_form__cmodule_t(cmod_0, &t_0, 0), _fx_catch_0); } else { _fx_copy_R17C_form__cmodule_t(cmod_0, &t_0); } _fx_LR17C_form__cmodule_t node_0 = 0; FX_CALL(_fx_cons_LR17C_form__cmodule_t(&t_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(cmods_2, lstend_0, node_0); _fx_catch_0: ; _fx_free_R17C_form__cmodule_t(&t_0); FX_CHECK_EXN(_fx_cleanup); } fx_str_t slit_8 = FX_MAKE_STR("\tConversion to C-form complete"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_8, 0), _fx_cleanup); _fx_make_T2LR17C_form__cmodule_tB(cmods_2, _fx_g21Ast__all_compile_errs == 0, fx_result); _fx_cleanup: ; FX_FREE_STR(&v_0); FX_FREE_STR(&v_1); FX_FREE_STR(&v_2); if (cmods_0) { _fx_free_LR17C_form__cmodule_t(&cmods_0); } FX_FREE_STR(&v_3); FX_FREE_STR(&v_4); FX_FREE_STR(&v_5); if (cmods_1) { _fx_free_LR17C_form__cmodule_t(&cmods_1); } if (cmods_2) { _fx_free_LR17C_form__cmodule_t(&cmods_2); } return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM6run_ccB2LR17C_form__cmodule_tS( struct _fx_LR17C_form__cmodule_t_data_t* cmods_0, fx_str_t* ficus_root_0, bool* fx_result, void* fx_fv) { fx_str_t osinfo_0 = {0}; fx_str_t runtime_include_path_0 = {0}; fx_str_t runtime_lib_path_0 = {0}; fx_str_t runtime_impl_0 = {0}; fx_str_t build_root_dir_0 = {0}; fx_str_t build_dir_0 = {0}; _fx_Ta9S v_0 = {0}; fx_str_t opt_flags_0 = {0}; fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; fx_str_t v_3 = {0}; fx_str_t v_4 = {0}; fx_str_t cflags_0 = {0}; _fx_Ta4S v_5 = {0}; _fx_Ta2S v_6 = {0}; fx_str_t omp_cflags_0 = {0}; fx_str_t omp_lib_0 = {0}; _fx_Ta3S v_7 = {0}; fx_str_t v_8 = {0}; fx_str_t v_9 = {0}; fx_str_t libpath_0 = {0}; fx_str_t cflags_1 = {0}; fx_str_t clibs_0 = {0}; fx_str_t omp_flags_0 = {0}; fx_str_t os_0 = {0}; fx_str_t libpath_1 = {0}; fx_str_t cflags_2 = {0}; fx_str_t clibs_1 = {0}; fx_str_t ggdb_opt_0 = {0}; fx_str_t v_10 = {0}; fx_str_t v_11 = {0}; fx_str_t v_12 = {0}; fx_str_t v_13 = {0}; fx_str_t v_14 = {0}; fx_str_t cflags_3 = {0}; fx_str_t v_15 = {0}; fx_str_t v_16 = {0}; fx_str_t v_17 = {0}; fx_str_t v_18 = {0}; fx_str_t clibs_2 = {0}; fx_str_t c_comp_0 = {0}; fx_str_t cpp_comp_0 = {0}; fx_str_t obj_ext_0 = {0}; fx_str_t obj_opt_0 = {0}; fx_str_t appname_opt_0 = {0}; fx_str_t link_lib_opt_0 = {0}; fx_str_t cflags_4 = {0}; fx_str_t clibs_3 = {0}; fx_str_t custom_cflags_0 = {0}; fx_str_t v_19 = {0}; fx_str_t custom_cflags_1 = {0}; fx_str_t v_20 = {0}; fx_str_t cflags_5 = {0}; fx_str_t v_21 = {0}; fx_str_t v_22 = {0}; fx_str_t v_23 = {0}; _fx_R14Ast__pragmas_t v_24 = {0}; _fx_R17C_form__cmodule_t runtime_pseudo_cmod_0 = {0}; _fx_LR17C_form__cmodule_t cmods_1 = 0; fx_arr_t v_25 = {0}; fx_arr_t results_0 = {0}; _fx_T5BBLSBLS __fold_result___0 = {0}; _fx_T5BBLSBLS v_26 = {0}; _fx_LS all_clibs_0 = 0; _fx_LS objs_0 = 0; fx_str_t v_27 = {0}; fx_str_t v_28 = {0}; fx_str_t v_29 = {0}; fx_str_t v_30 = {0}; fx_str_t custom_clibs_0 = {0}; fx_str_t v_31 = {0}; fx_str_t custom_clibs_1 = {0}; fx_str_t v_32 = {0}; fx_str_t custom_clibs_2 = {0}; _fx_LS v_33 = 0; _fx_LS v_34 = 0; fx_str_t v_35 = {0}; fx_str_t clibs_4 = {0}; fx_str_t v_36 = {0}; fx_str_t v_37 = {0}; fx_str_t v_38 = {0}; fx_str_t v_39 = {0}; fx_str_t cmd_0 = {0}; fx_str_t v_40 = {0}; fx_str_t cmd_1 = {0}; int fx_status = 0; FX_CALL(_fx_g11Sys__osname.fp(true, &osinfo_0, _fx_g11Sys__osname.fcv), _fx_cleanup); int_ opt_level_0 = _fx_g12Options__opt.optimize_level; bool enable_openmp_0 = _fx_g12Options__opt.enable_openmp; fx_str_t slit_0 = FX_MAKE_STR("runtime"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(ficus_root_0, &slit_0, &runtime_include_path_0, 0), _fx_cleanup); fx_str_t slit_1 = FX_MAKE_STR("runtime/lib"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(ficus_root_0, &slit_1, &runtime_lib_path_0, 0), _fx_cleanup); fx_str_t slit_2 = FX_MAKE_STR("runtime/ficus/impl/libficus"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(ficus_root_0, &slit_2, &runtime_impl_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.build_rootdir, &build_root_dir_0); bool ok_0; FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_root_dir_0, 493, &ok_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.build_dir, &build_dir_0); bool ok_1; if (ok_0) { FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_dir_0, 493, &ok_1, 0), _fx_cleanup); } else { ok_1 = false; } if (_fx_g10Sys__win32) { if (opt_level_0 == 0) { fx_str_t slit_3 = FX_MAKE_STR(" /MTd /Od /GF"); fx_copy_str(&slit_3, &opt_flags_0); } else { if (opt_level_0 == 1) { fx_str_t slit_4 = FX_MAKE_STR("/O1"); fx_copy_str(&slit_4, &v_1); } else { fx_str_t slit_5 = FX_MAKE_STR("/O2"); fx_copy_str(&slit_5, &v_1); } fx_str_t slit_6 = FX_MAKE_STR(" /MT "); { const fx_str_t strs_0[] = { slit_6, v_1 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &opt_flags_0), _fx_cleanup); } } FX_CALL(_fx_F6stringS1S(&opt_flags_0, &v_2, 0), _fx_cleanup); fx_str_t slit_7 = FX_MAKE_STR(""); FX_CALL(_fx_F6stringS1S(&slit_7, &v_3, 0), _fx_cleanup); FX_CALL(_fx_F6stringS1S(&runtime_include_path_0, &v_4, 0), _fx_cleanup); fx_str_t slit_8 = FX_MAKE_STR("/nologo"); fx_str_t slit_9 = FX_MAKE_STR(" /I"); { const fx_str_t strs_1[] = { slit_8, v_2, v_3, slit_9, v_4 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 5, &cflags_0), _fx_cleanup); } fx_str_t slit_10 = FX_MAKE_STR("win"); fx_str_t slit_11 = FX_MAKE_STR("cl"); fx_str_t slit_12 = FX_MAKE_STR("cl"); fx_str_t slit_13 = FX_MAKE_STR(".obj"); fx_str_t slit_14 = FX_MAKE_STR("/c /Fo"); fx_str_t slit_15 = FX_MAKE_STR("/Fe"); fx_str_t slit_16 = FX_MAKE_STR(""); fx_str_t slit_17 = FX_MAKE_STR("kernel32.lib advapi32.lib"); _fx_make_Ta9S(&slit_10, &slit_11, &slit_12, &slit_13, &slit_14, &slit_15, &slit_16, &cflags_0, &slit_17, &v_0); } else { bool v_41; fx_str_t slit_18 = FX_MAKE_STR("Darwin"); FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_18, &v_41, 0), _fx_cleanup); if (v_41) { if (enable_openmp_0) { fx_str_t slit_19 = FX_MAKE_STR("-Xclang -fopenmp"); fx_str_t slit_20 = FX_MAKE_STR(" -lomp"); _fx_make_Ta2S(&slit_19, &slit_20, &v_6); } else { fx_str_t slit_21 = FX_MAKE_STR(""); fx_str_t slit_22 = FX_MAKE_STR(""); _fx_make_Ta2S(&slit_21, &slit_22, &v_6); } fx_copy_str(&v_6.t0, &omp_cflags_0); fx_copy_str(&v_6.t1, &omp_lib_0); bool v_42; fx_str_t slit_23 = FX_MAKE_STR("x86_64"); FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_23, &v_42, 0), _fx_cleanup); if (v_42) { fx_str_t slit_24 = FX_MAKE_STR(" "); { const fx_str_t strs_2[] = { slit_24, omp_cflags_0, omp_lib_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 3, &v_8), _fx_cleanup); } fx_str_t slit_25 = FX_MAKE_STR("macos_x64"); _fx_make_Ta3S(&slit_25, &omp_cflags_0, &v_8, &v_7); } else { bool v_43; fx_str_t slit_26 = FX_MAKE_STR("arm64"); FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_26, &v_43, 0), _fx_cleanup); if (v_43) { fx_str_t slit_27 = FX_MAKE_STR(" "); { const fx_str_t strs_3[] = { slit_27, omp_cflags_0, omp_lib_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 3, &v_9), _fx_cleanup); } fx_str_t slit_28 = FX_MAKE_STR("macos_arm64"); _fx_make_Ta3S(&slit_28, &omp_cflags_0, &v_9, &v_7); } else { fx_str_t slit_29 = FX_MAKE_STR(""); fx_str_t slit_30 = FX_MAKE_STR(""); fx_str_t slit_31 = FX_MAKE_STR(""); _fx_make_Ta3S(&slit_29, &slit_30, &slit_31, &v_7); } } fx_copy_str(&v_7.t0, &libpath_0); fx_copy_str(&v_7.t1, &cflags_1); fx_copy_str(&v_7.t2, &clibs_0); fx_str_t slit_32 = FX_MAKE_STR("macos"); _fx_make_Ta4S(&slit_32, &libpath_0, &cflags_1, &clibs_0, &v_5); } else { bool v_44; fx_str_t slit_33 = FX_MAKE_STR("Linux"); FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_33, &v_44, 0), _fx_cleanup); if (v_44) { if (enable_openmp_0) { fx_str_t slit_34 = FX_MAKE_STR(" -fopenmp"); fx_copy_str(&slit_34, &omp_flags_0); } else { fx_str_t slit_35 = FX_MAKE_STR(""); fx_copy_str(&slit_35, &omp_flags_0); } fx_str_t slit_36 = FX_MAKE_STR("linux"); fx_str_t slit_37 = FX_MAKE_STR(""); _fx_make_Ta4S(&slit_36, &slit_37, &omp_flags_0, &omp_flags_0, &v_5); } else if (_fx_g9Sys__unix) { fx_str_t slit_38 = FX_MAKE_STR("unix"); fx_str_t slit_39 = FX_MAKE_STR(""); fx_str_t slit_40 = FX_MAKE_STR(""); fx_str_t slit_41 = FX_MAKE_STR(""); _fx_make_Ta4S(&slit_38, &slit_39, &slit_40, &slit_41, &v_5); } else { fx_str_t slit_42 = FX_MAKE_STR(""); fx_str_t slit_43 = FX_MAKE_STR(""); fx_str_t slit_44 = FX_MAKE_STR(""); fx_str_t slit_45 = FX_MAKE_STR(""); _fx_make_Ta4S(&slit_42, &slit_43, &slit_44, &slit_45, &v_5); } } fx_copy_str(&v_5.t0, &os_0); fx_copy_str(&v_5.t1, &libpath_1); fx_copy_str(&v_5.t2, &cflags_2); fx_copy_str(&v_5.t3, &clibs_1); if (opt_level_0 == 0) { fx_str_t slit_46 = FX_MAKE_STR(" -ggdb"); fx_copy_str(&slit_46, &ggdb_opt_0); } else { fx_str_t slit_47 = FX_MAKE_STR(""); fx_copy_str(&slit_47, &ggdb_opt_0); } FX_CALL(_fx_F6stringS1i(opt_level_0, &v_10, 0), _fx_cleanup); FX_CALL(_fx_F6stringS1S(&ggdb_opt_0, &v_11, 0), _fx_cleanup); FX_CALL(_fx_F6stringS1S(&cflags_2, &v_12, 0), _fx_cleanup); fx_str_t slit_48 = FX_MAKE_STR("-Wno-unknown-warning-option -Wno-dangling-else -Wno-static-in-inline"); FX_CALL(_fx_F6stringS1S(&slit_48, &v_13, 0), _fx_cleanup); FX_CALL(_fx_F6stringS1S(&runtime_include_path_0, &v_14, 0), _fx_cleanup); fx_str_t slit_49 = FX_MAKE_STR("-O"); fx_str_t slit_50 = FX_MAKE_STR(" "); fx_str_t slit_51 = FX_MAKE_STR(" "); fx_str_t slit_52 = FX_MAKE_STR(" -I"); { const fx_str_t strs_4[] = { slit_49, v_10, v_11, slit_50, v_12, slit_51, v_13, slit_52, v_14 }; FX_CALL(fx_strjoin(0, 0, 0, strs_4, 9, &cflags_3), _fx_cleanup); } if (FX_STR_LENGTH(libpath_1) != 0) { FX_CALL(_fx_F6stringS1S(&runtime_lib_path_0, &v_16, 0), _fx_cleanup); FX_CALL(_fx_F6stringS1S(&libpath_1, &v_17, 0), _fx_cleanup); fx_str_t slit_53 = FX_MAKE_STR("-L"); fx_str_t slit_54 = FX_MAKE_STR("/"); fx_str_t slit_55 = FX_MAKE_STR(" "); { const fx_str_t strs_5[] = { slit_53, v_16, slit_54, v_17, slit_55 }; FX_CALL(fx_strjoin(0, 0, 0, strs_5, 5, &v_15), _fx_cleanup); } } else { fx_str_t slit_56 = FX_MAKE_STR(""); fx_copy_str(&slit_56, &v_15); } FX_CALL(_fx_F6stringS1S(&clibs_1, &v_18, 0), _fx_cleanup); fx_str_t slit_57 = FX_MAKE_STR("-lm "); { const fx_str_t strs_6[] = { v_15, slit_57, v_18 }; FX_CALL(fx_strjoin(0, 0, 0, strs_6, 3, &clibs_2), _fx_cleanup); } fx_str_t slit_58 = FX_MAKE_STR("cc"); fx_str_t slit_59 = FX_MAKE_STR("c++ -std=c++11"); fx_str_t slit_60 = FX_MAKE_STR(".o"); fx_str_t slit_61 = FX_MAKE_STR("-c -o "); fx_str_t slit_62 = FX_MAKE_STR("-o "); fx_str_t slit_63 = FX_MAKE_STR("-l"); _fx_make_Ta9S(&os_0, &slit_58, &slit_59, &slit_60, &slit_61, &slit_62, &slit_63, &cflags_3, &clibs_2, &v_0); } fx_copy_str(&v_0.t1, &c_comp_0); fx_copy_str(&v_0.t2, &cpp_comp_0); fx_copy_str(&v_0.t3, &obj_ext_0); fx_copy_str(&v_0.t4, &obj_opt_0); fx_copy_str(&v_0.t5, &appname_opt_0); fx_copy_str(&v_0.t6, &link_lib_opt_0); fx_copy_str(&v_0.t7, &cflags_4); fx_copy_str(&v_0.t8, &clibs_3); fx_str_t slit_64 = FX_MAKE_STR("FICUS_CFLAGS"); FX_CALL(_fx_M3SysFM6getenvS1S(&slit_64, &custom_cflags_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.cflags, &v_19); if (FX_STR_LENGTH(v_19) == 0) { fx_copy_str(&custom_cflags_0, &custom_cflags_1); } else { fx_copy_str(&_fx_g12Options__opt.cflags, &v_20); fx_str_t slit_65 = FX_MAKE_STR(" "); { const fx_str_t strs_7[] = { v_20, slit_65, custom_cflags_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_7, 3, &custom_cflags_1), _fx_cleanup); } } fx_str_t slit_66 = FX_MAKE_STR(" "); { const fx_str_t strs_8[] = { cflags_4, slit_66, custom_cflags_1 }; FX_CALL(fx_strjoin(0, 0, 0, strs_8, 3, &cflags_5), _fx_cleanup); } FX_CALL(_fx_F6stringS1S(&cflags_5, &v_21, 0), _fx_cleanup); fx_str_t slit_67 = FX_MAKE_STR("Compiling .c/.cpp files with cflags="); { const fx_str_t strs_9[] = { slit_67, v_21 }; FX_CALL(fx_strjoin(0, 0, 0, strs_9, 2, &v_22), _fx_cleanup); } FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g17Compiler__MsgBlue, &v_22, &v_23, 0), _fx_cleanup); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_23, 0), _fx_cleanup); _fx_make_R14Ast__pragmas_t(false, 0, &v_24); _fx_make_R17C_form__cmodule_t(&_fx_g9Ast__noid, &runtime_impl_0, 0, false, true, false, &v_24, &runtime_pseudo_cmod_0); FX_CALL(_fx_cons_LR17C_form__cmodule_t(&runtime_pseudo_cmod_0, cmods_0, true, &cmods_1), _fx_cleanup); FX_CALL(_fx_M8CompilerFM5arrayA1R17C_form__cmodule_t1LR17C_form__cmodule_t(cmods_1, &v_25, 0), _fx_cleanup); int par_status_0 = 0; int_ ni_0 = FX_ARR_SIZE(v_25, 0); _fx_R17C_form__cmodule_t* ptr_v_0 = FX_PTR_1D(_fx_R17C_form__cmodule_t, v_25, 0); { const int_ shape_0[] = { ni_0 }; FX_CALL( fx_make_arr(1, shape_0, sizeof(_fx_T5BBLSBS), (fx_free_t)_fx_free_T5BBLSBS, (fx_copy_t)_fx_copy_T5BBLSBS, 0, &results_0), _fx_cleanup); } #pragma omp parallel for for (int_ i_0 = 0; i_0 < ni_0; i_0++) { int fx_status = 0; _fx_R17C_form__cmodule_t __pat___0 = {0}; _fx_LT2SR10Ast__loc_t pragma_clibs_0 = 0; _fx_LN15C_form__cstmt_t cmod_ccode_0 = 0; fx_str_t cmod_cname_0 = {0}; fx_str_t output_fname_0 = {0}; _fx_Ta2S v_45 = {0}; fx_str_t comp_0 = {0}; fx_str_t ext_0 = {0}; fx_str_t output_fname_1 = {0}; fx_str_t output_fname_c_0 = {0}; _fx_T3BBS v_46 = {0}; fx_str_t str_new_0 = {0}; fx_str_t str_old_0 = {0}; fx_exn_t exn_0 = {0}; fx_exn_t exn_1 = {0}; fx_str_t v_47 = {0}; fx_str_t v_48 = {0}; fx_str_t v_49 = {0}; fx_str_t status_j_0 = {0}; fx_str_t c_filename_0 = {0}; fx_str_t obj_filename_0 = {0}; _fx_T3BBS v_50 = {0}; fx_str_t v_51 = {0}; fx_str_t v_52 = {0}; fx_str_t v_53 = {0}; fx_str_t v_54 = {0}; fx_str_t v_55 = {0}; fx_str_t cmd_2 = {0}; fx_str_t status_0 = {0}; fx_str_t status_j_1 = {0}; fx_str_t v_56 = {0}; fx_str_t v_57 = {0}; fx_str_t v_58 = {0}; _fx_LS v_59 = 0; _fx_LS clibs_5 = 0; _fx_T5BBLSBS tup_0 = {0}; _fx_copy_R17C_form__cmodule_t(ptr_v_0 + i_0, &__pat___0); _fx_T5BBLSBS* dstptr_0 = FX_PTR_1D(_fx_T5BBLSBS, results_0, i_0); _fx_R14Ast__pragmas_t* i_1 = &__pat___0.cmod_pragmas; FX_COPY_PTR(i_1->pragma_clibs, &pragma_clibs_0); FX_COPY_PTR(__pat___0.cmod_ccode, &cmod_ccode_0); fx_copy_str(&__pat___0.cmod_cname, &cmod_cname_0); FX_CALL(_fx_M8FilenameFM8basenameS1S(&cmod_cname_0, &output_fname_0, 0), _fx_catch_3); bool is_runtime_0 = _fx_F6__eq__B2SS(&cmod_cname_0, &runtime_impl_0, 0); bool is_cpp_0; if (!is_runtime_0) { if (_fx_g12Options__opt.compile_by_cpp) { is_cpp_0 = true; } else { is_cpp_0 = i_1->pragma_cpp; } } else { is_cpp_0 = false; } if (is_cpp_0) { fx_str_t slit_68 = FX_MAKE_STR(".cpp"); _fx_make_Ta2S(&cpp_comp_0, &slit_68, &v_45); } else { fx_str_t slit_69 = FX_MAKE_STR(".c"); _fx_make_Ta2S(&c_comp_0, &slit_69, &v_45); } fx_copy_str(&v_45.t0, &comp_0); fx_copy_str(&v_45.t1, &ext_0); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&build_dir_0, &output_fname_0, &output_fname_1, 0), _fx_catch_3); { const fx_str_t strs_10[] = { output_fname_1, ext_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_10, 2, &output_fname_c_0), _fx_catch_3); } if (__pat___0.cmod_skip) { fx_str_t slit_70 = FX_MAKE_STR("skipped"); _fx_make_T3BBS(true, false, &slit_70, &v_46); } else if (is_runtime_0) { fx_str_t slit_71 = FX_MAKE_STR(""); _fx_make_T3BBS(true, true, &slit_71, &v_46); } else { FX_CALL(_fx_M4C_ppFM20pprint_top_to_stringS1LN15C_form__cstmt_t(cmod_ccode_0, &str_new_0, 0), _fx_catch_3); if (_fx_g12Options__opt.force_rebuild) { fx_str_t slit_72 = FX_MAKE_STR(""); fx_copy_str(&slit_72, &str_old_0); } else { FX_CALL(_fx_M4FileFM9read_utf8S1S(&output_fname_c_0, &str_old_0, 0), _fx_catch_0); _fx_catch_0: ; if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_0); fx_status = 0; FX_FREE_STR(&str_old_0); int tag_0 = exn_0.tag; bool res_0; if (tag_0 == FX_EXN_IOError) { res_0 = true; } else if (tag_0 == FX_EXN_FileOpenError) { res_0 = true; } else { res_0 = false; } FX_CHECK_EXN(_fx_catch_3); if (res_0) { fx_str_t slit_73 = FX_MAKE_STR(""); fx_copy_str(&slit_73, &str_old_0); goto _fx_endmatch_0; } FX_RETHROW(&exn_0, _fx_catch_3); _fx_endmatch_0: ; FX_CHECK_EXN(_fx_catch_3); } } bool v_60 = _fx_F6__eq__B2SS(&str_new_0, &str_old_0, 0); if (v_60) { fx_str_t slit_74 = FX_MAKE_STR("skipped"); _fx_make_T3BBS(ok_1, false, &slit_74, &v_46); } else { bool well_written_0; FX_CALL(_fx_M4FileFM10write_utf8v2SS(&output_fname_c_0, &str_new_0, 0), _fx_catch_1); well_written_0 = true; _fx_catch_1: ; if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_1); fx_status = 0; int tag_1 = exn_1.tag; bool res_1; if (tag_1 == FX_EXN_IOError) { res_1 = true; } else if (tag_1 == FX_EXN_FileOpenError) { res_1 = true; } else { res_1 = false; } FX_CHECK_EXN(_fx_catch_3); if (res_1) { well_written_0 = false; goto _fx_endmatch_1; } FX_RETHROW(&exn_1, _fx_catch_3); _fx_endmatch_1: ; FX_CHECK_EXN(_fx_catch_3); } if (well_written_0) { fx_str_t slit_75 = FX_MAKE_STR(""); fx_copy_str(&slit_75, &v_47); } else { FX_CALL(_fx_F6stringS1S(&output_fname_c_0, &v_48, 0), _fx_catch_3); fx_str_t slit_76 = FX_MAKE_STR("failed to write "); { const fx_str_t strs_11[] = { slit_76, v_48 }; FX_CALL(fx_strjoin(0, 0, 0, strs_11, 2, &v_49), _fx_catch_3); } FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g16Compiler__MsgRed, &v_49, &v_47, 0), _fx_catch_3); } _fx_make_T3BBS(well_written_0, well_written_0, &v_47, &v_46); } } bool ok_j_0 = v_46.t0; bool reprocess_0 = v_46.t1; fx_copy_str(&v_46.t2, &status_j_0); if (is_runtime_0) { fx_str_t slit_77 = FX_MAKE_STR(".c"); { const fx_str_t strs_12[] = { runtime_impl_0, slit_77 }; FX_CALL(fx_strjoin(0, 0, 0, strs_12, 2, &c_filename_0), _fx_catch_3); } } else { fx_copy_str(&output_fname_c_0, &c_filename_0); } { const fx_str_t strs_13[] = { output_fname_1, obj_ext_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_13, 2, &obj_filename_0), _fx_catch_3); } bool v_61; if (ok_j_0) { if (reprocess_0) { v_61 = true; } else { bool v_62; FX_CALL(_fx_M8FilenameFM6existsB1S(&obj_filename_0, &v_62, 0), _fx_catch_3); v_61 = !v_62; } } else { v_61 = false; } if (v_61) { FX_CALL(_fx_F6stringS1S(&comp_0, &v_51, 0), _fx_catch_3); FX_CALL(_fx_F6stringS1S(&cflags_5, &v_52, 0), _fx_catch_3); FX_CALL(_fx_F6stringS1S(&obj_opt_0, &v_53, 0), _fx_catch_3); FX_CALL(_fx_F6stringS1S(&obj_filename_0, &v_54, 0), _fx_catch_3); FX_CALL(_fx_F6stringS1S(&c_filename_0, &v_55, 0), _fx_catch_3); fx_str_t slit_78 = FX_MAKE_STR(" "); fx_str_t slit_79 = FX_MAKE_STR(" "); fx_str_t slit_80 = FX_MAKE_STR(" "); { const fx_str_t strs_14[] = { v_51, slit_78, v_52, slit_79, v_53, v_54, slit_80, v_55 }; FX_CALL(fx_strjoin(0, 0, 0, strs_14, 8, &cmd_2), _fx_catch_3); } int_ v_63; FX_CALL(_fx_M3SysFM7commandi1S(&cmd_2, &v_63, 0), _fx_catch_3); bool result_0 = v_63 == 0; if (result_0) { fx_str_t slit_81 = FX_MAKE_STR("ok"); FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g18Compiler__MsgGreen, &slit_81, &status_0, 0), _fx_catch_3); } else { fx_str_t slit_82 = FX_MAKE_STR("fail"); FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g16Compiler__MsgRed, &slit_82, &status_0, 0), _fx_catch_3); } _fx_make_T3BBS(result_0, true, &status_0, &v_50); } else { _fx_make_T3BBS(ok_j_0, false, &status_j_0, &v_50); } bool ok_j_1 = v_50.t0; bool recompiled_0 = v_50.t1; fx_copy_str(&v_50.t2, &status_j_1); FX_CALL(_fx_F6stringS1S(&c_filename_0, &v_56, 0), _fx_catch_3); FX_CALL(_fx_F6stringS1S(&status_j_1, &v_57, 0), _fx_catch_3); fx_str_t slit_83 = FX_MAKE_STR("CC "); fx_str_t slit_84 = FX_MAKE_STR(": "); { const fx_str_t strs_15[] = { slit_83, v_56, slit_84, v_57 }; FX_CALL(fx_strjoin(0, 0, 0, strs_15, 4, &v_58), _fx_catch_3); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_58, 0), _fx_catch_3); _fx_LS lstend_0 = 0; _fx_LT2SR10Ast__loc_t lst_0 = pragma_clibs_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_T2SR10Ast__loc_t* __pat___1 = &lst_0->hd; _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(&__pat___1->t0, 0, false, &node_0), _fx_catch_2); FX_LIST_APPEND(v_59, lstend_0, node_0); _fx_catch_2: ; FX_CHECK_EXN(_fx_catch_3); } FX_CALL(_fx_M8CompilerFM3revLS1LS(v_59, &clibs_5, 0), _fx_catch_3); _fx_make_T5BBLSBS(is_cpp_0, recompiled_0, clibs_5, ok_j_1, &obj_filename_0, &tup_0); _fx_copy_T5BBLSBS(&tup_0, dstptr_0); _fx_catch_3: ; _fx_free_T5BBLSBS(&tup_0); if (clibs_5) { _fx_free_LS(&clibs_5); } if (v_59) { _fx_free_LS(&v_59); } FX_FREE_STR(&v_58); FX_FREE_STR(&v_57); FX_FREE_STR(&v_56); FX_FREE_STR(&status_j_1); FX_FREE_STR(&status_0); FX_FREE_STR(&cmd_2); FX_FREE_STR(&v_55); FX_FREE_STR(&v_54); FX_FREE_STR(&v_53); FX_FREE_STR(&v_52); FX_FREE_STR(&v_51); _fx_free_T3BBS(&v_50); FX_FREE_STR(&obj_filename_0); FX_FREE_STR(&c_filename_0); FX_FREE_STR(&status_j_0); FX_FREE_STR(&v_49); FX_FREE_STR(&v_48); FX_FREE_STR(&v_47); fx_free_exn(&exn_1); fx_free_exn(&exn_0); FX_FREE_STR(&str_old_0); FX_FREE_STR(&str_new_0); _fx_free_T3BBS(&v_46); FX_FREE_STR(&output_fname_c_0); FX_FREE_STR(&output_fname_1); FX_FREE_STR(&ext_0); FX_FREE_STR(&comp_0); _fx_free_Ta2S(&v_45); FX_FREE_STR(&output_fname_0); FX_FREE_STR(&cmod_cname_0); if (cmod_ccode_0) { _fx_free_LN15C_form__cstmt_t(&cmod_ccode_0); } if (pragma_clibs_0) { _fx_free_LT2SR10Ast__loc_t(&pragma_clibs_0); } _fx_free_R17C_form__cmodule_t(&__pat___0); FX_CHECK_EXN_PARALLEL(fx_status, par_status_0); } FX_UPDATE_EXN_PARALLEL(par_status_0, _fx_cleanup); _fx_make_T5BBLSBLS(false, false, 0, ok_1, 0, &__fold_result___0); int_ ni_1 = FX_ARR_SIZE(results_0, 0); _fx_T5BBLSBS* ptr_results_0 = FX_PTR_1D(_fx_T5BBLSBS, results_0, 0); for (int_ i_2 = 0; i_2 < ni_1; i_2++) { _fx_T5BBLSBS __pat___2 = {0}; _fx_LS clibs_j_0 = 0; fx_str_t obj_0 = {0}; _fx_T5BBLSBLS v_64 = {0}; _fx_LS all_clibs_1 = 0; _fx_LS objs_1 = 0; _fx_LS v_65 = 0; _fx_T5BBLSBLS v_66 = {0}; _fx_copy_T5BBLSBS(ptr_results_0 + i_2, &__pat___2); FX_COPY_PTR(__pat___2.t2, &clibs_j_0); fx_copy_str(&__pat___2.t4, &obj_0); _fx_copy_T5BBLSBLS(&__fold_result___0, &v_64); FX_COPY_PTR(v_64.t2, &all_clibs_1); FX_COPY_PTR(v_64.t4, &objs_1); FX_CALL(_fx_M8CompilerFM7__add__LS2LSLS(clibs_j_0, all_clibs_1, &v_65, 0), _fx_catch_4); FX_CALL(_fx_cons_LS(&obj_0, objs_1, false, &objs_1), _fx_catch_4); _fx_make_T5BBLSBLS(v_64.t0 || __pat___2.t0, v_64.t1 || __pat___2.t1, v_65, v_64.t3 && __pat___2.t3, objs_1, &v_66); _fx_free_T5BBLSBLS(&__fold_result___0); _fx_copy_T5BBLSBLS(&v_66, &__fold_result___0); _fx_catch_4: ; _fx_free_T5BBLSBLS(&v_66); if (v_65) { _fx_free_LS(&v_65); } if (objs_1) { _fx_free_LS(&objs_1); } if (all_clibs_1) { _fx_free_LS(&all_clibs_1); } _fx_free_T5BBLSBLS(&v_64); FX_FREE_STR(&obj_0); if (clibs_j_0) { _fx_free_LS(&clibs_j_0); } _fx_free_T5BBLSBS(&__pat___2); FX_CHECK_EXN(_fx_cleanup); } _fx_copy_T5BBLSBLS(&__fold_result___0, &v_26); bool any_cpp_0 = v_26.t0; bool any_recompiled_0 = v_26.t1; FX_COPY_PTR(v_26.t2, &all_clibs_0); bool ok_2 = v_26.t3; FX_COPY_PTR(v_26.t4, &objs_0); bool v_67; bool t_0; if (ok_2) { t_0 = !any_recompiled_0; } else { t_0 = false; } if (t_0) { fx_copy_str(&_fx_g12Options__opt.app_filename, &v_27); FX_CALL(_fx_M8FilenameFM6existsB1S(&v_27, &v_67, 0), _fx_cleanup); } else { v_67 = false; } if (v_67) { fx_copy_str(&_fx_g12Options__opt.app_filename, &v_28); FX_CALL(_fx_F6stringS1S(&v_28, &v_29, 0), _fx_cleanup); fx_str_t slit_85 = FX_MAKE_STR(" is up-to-date\n"); { const fx_str_t strs_16[] = { v_29, slit_85 }; FX_CALL(fx_strjoin(0, 0, 0, strs_16, 2, &v_30), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_30, 0), _fx_cleanup); *fx_result = ok_2; } else if (!ok_2) { *fx_result = ok_2; } else { fx_str_t slit_86 = FX_MAKE_STR("FICUS_LINK_LIBRARIES"); FX_CALL(_fx_M3SysFM6getenvS1S(&slit_86, &custom_clibs_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.clibs, &v_31); if (FX_STR_LENGTH(v_31) == 0) { fx_copy_str(&custom_clibs_0, &custom_clibs_1); } else { fx_copy_str(&_fx_g12Options__opt.clibs, &v_32); fx_str_t slit_87 = FX_MAKE_STR(" "); { const fx_str_t strs_17[] = { custom_clibs_0, slit_87, v_32 }; FX_CALL(fx_strjoin(0, 0, 0, strs_17, 3, &custom_clibs_1), _fx_cleanup); } } if (all_clibs_0 == 0) { fx_copy_str(&custom_clibs_1, &custom_clibs_2); } else { FX_CALL(_fx_M8CompilerFM3revLS1LS(all_clibs_0, &v_33, 0), _fx_cleanup); _fx_LS lstend_1 = 0; _fx_LS lst_1 = v_33; for (; lst_1; lst_1 = lst_1->tl) { fx_str_t concat_str_0 = {0}; fx_str_t* l_0 = &lst_1->hd; { const fx_str_t strs_18[] = { link_lib_opt_0, *l_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_18, 2, &concat_str_0), _fx_catch_5); } _fx_LS node_1 = 0; FX_CALL(_fx_cons_LS(&concat_str_0, 0, false, &node_1), _fx_catch_5); FX_LIST_APPEND(v_34, lstend_1, node_1); _fx_catch_5: ; FX_FREE_STR(&concat_str_0); FX_CHECK_EXN(_fx_cleanup); } fx_str_t slit_88 = FX_MAKE_STR(" "); FX_CALL(_fx_M8CompilerFM4joinS2SLS(&slit_88, v_34, &v_35, 0), _fx_cleanup); fx_str_t slit_89 = FX_MAKE_STR(" "); { const fx_str_t strs_19[] = { custom_clibs_1, slit_89, v_35 }; FX_CALL(fx_strjoin(0, 0, 0, strs_19, 3, &custom_clibs_2), _fx_cleanup); } } fx_str_t slit_90 = FX_MAKE_STR(" "); { const fx_str_t strs_20[] = { clibs_3, slit_90, custom_clibs_2 }; FX_CALL(fx_strjoin(0, 0, 0, strs_20, 3, &clibs_4), _fx_cleanup); } FX_CALL(_fx_F6stringS1S(&clibs_4, &v_36, 0), _fx_cleanup); fx_str_t slit_91 = FX_MAKE_STR("Linking the app with flags="); { const fx_str_t strs_21[] = { slit_91, v_36 }; FX_CALL(fx_strjoin(0, 0, 0, strs_21, 2, &v_37), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_37, 0), _fx_cleanup); if (any_cpp_0) { fx_copy_str(&cpp_comp_0, &v_38); } else { fx_copy_str(&c_comp_0, &v_38); } fx_copy_str(&_fx_g12Options__opt.app_filename, &v_39); fx_str_t slit_92 = FX_MAKE_STR(" "); { const fx_str_t strs_22[] = { v_38, slit_92, appname_opt_0, v_39 }; FX_CALL(fx_strjoin(0, 0, 0, strs_22, 4, &cmd_0), _fx_cleanup); } fx_str_t slit_93 = FX_MAKE_STR(" "); FX_CALL(_fx_M8CompilerFM4joinS2SLS(&slit_93, objs_0, &v_40, 0), _fx_cleanup); fx_str_t slit_94 = FX_MAKE_STR(" "); fx_str_t slit_95 = FX_MAKE_STR(" "); { const fx_str_t strs_23[] = { cmd_0, slit_94, v_40, slit_95, clibs_4 }; FX_CALL(fx_strjoin(0, 0, 0, strs_23, 5, &cmd_1), _fx_cleanup); } int_ v_68; FX_CALL(_fx_M3SysFM7commandi1S(&cmd_1, &v_68, 0), _fx_cleanup); *fx_result = v_68 == 0; } _fx_cleanup: ; FX_FREE_STR(&osinfo_0); FX_FREE_STR(&runtime_include_path_0); FX_FREE_STR(&runtime_lib_path_0); FX_FREE_STR(&runtime_impl_0); FX_FREE_STR(&build_root_dir_0); FX_FREE_STR(&build_dir_0); _fx_free_Ta9S(&v_0); FX_FREE_STR(&opt_flags_0); FX_FREE_STR(&v_1); FX_FREE_STR(&v_2); FX_FREE_STR(&v_3); FX_FREE_STR(&v_4); FX_FREE_STR(&cflags_0); _fx_free_Ta4S(&v_5); _fx_free_Ta2S(&v_6); FX_FREE_STR(&omp_cflags_0); FX_FREE_STR(&omp_lib_0); _fx_free_Ta3S(&v_7); FX_FREE_STR(&v_8); FX_FREE_STR(&v_9); FX_FREE_STR(&libpath_0); FX_FREE_STR(&cflags_1); FX_FREE_STR(&clibs_0); FX_FREE_STR(&omp_flags_0); FX_FREE_STR(&os_0); FX_FREE_STR(&libpath_1); FX_FREE_STR(&cflags_2); FX_FREE_STR(&clibs_1); FX_FREE_STR(&ggdb_opt_0); FX_FREE_STR(&v_10); FX_FREE_STR(&v_11); FX_FREE_STR(&v_12); FX_FREE_STR(&v_13); FX_FREE_STR(&v_14); FX_FREE_STR(&cflags_3); FX_FREE_STR(&v_15); FX_FREE_STR(&v_16); FX_FREE_STR(&v_17); FX_FREE_STR(&v_18); FX_FREE_STR(&clibs_2); FX_FREE_STR(&c_comp_0); FX_FREE_STR(&cpp_comp_0); FX_FREE_STR(&obj_ext_0); FX_FREE_STR(&obj_opt_0); FX_FREE_STR(&appname_opt_0); FX_FREE_STR(&link_lib_opt_0); FX_FREE_STR(&cflags_4); FX_FREE_STR(&clibs_3); FX_FREE_STR(&custom_cflags_0); FX_FREE_STR(&v_19); FX_FREE_STR(&custom_cflags_1); FX_FREE_STR(&v_20); FX_FREE_STR(&cflags_5); FX_FREE_STR(&v_21); FX_FREE_STR(&v_22); FX_FREE_STR(&v_23); _fx_free_R14Ast__pragmas_t(&v_24); _fx_free_R17C_form__cmodule_t(&runtime_pseudo_cmod_0); if (cmods_1) { _fx_free_LR17C_form__cmodule_t(&cmods_1); } FX_FREE_ARR(&v_25); FX_FREE_ARR(&results_0); _fx_free_T5BBLSBLS(&__fold_result___0); _fx_free_T5BBLSBLS(&v_26); if (all_clibs_0) { _fx_free_LS(&all_clibs_0); } if (objs_0) { _fx_free_LS(&objs_0); } FX_FREE_STR(&v_27); FX_FREE_STR(&v_28); FX_FREE_STR(&v_29); FX_FREE_STR(&v_30); FX_FREE_STR(&custom_clibs_0); FX_FREE_STR(&v_31); FX_FREE_STR(&custom_clibs_1); FX_FREE_STR(&v_32); FX_FREE_STR(&custom_clibs_2); if (v_33) { _fx_free_LS(&v_33); } if (v_34) { _fx_free_LS(&v_34); } FX_FREE_STR(&v_35); FX_FREE_STR(&clibs_4); FX_FREE_STR(&v_36); FX_FREE_STR(&v_37); FX_FREE_STR(&v_38); FX_FREE_STR(&v_39); FX_FREE_STR(&cmd_0); FX_FREE_STR(&v_40); FX_FREE_STR(&cmd_1); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM11process_allB1S(fx_str_t* fname0_0, bool* fx_result, void* fx_fv) { fx_exn_t exn_0 = {0}; _fx_LE __fold_result___0 = 0; _fx_LE v_0 = 0; fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; int fx_status = 0; FX_CALL(_fx_M3AstFM8init_allv0(0), _fx_cleanup); _fx_T2SLS v_3 = {0}; fx_str_t ficus_root_0 = {0}; _fx_LS ficus_path_0 = 0; fx_str_t v_4 = {0}; fx_str_t v_5 = {0}; fx_str_t v_6 = {0}; fx_exn_t v_7 = {0}; _fx_LT2iLi graph_0 = 0; _fx_Li v_8 = 0; _fx_Li v_9 = 0; _fx_Li v_10 = 0; _fx_LS v_11 = 0; fx_str_t modules_used_0 = {0}; fx_str_t parsing_complete_0 = {0}; fx_str_t v_12 = {0}; fx_str_t v_13 = {0}; fx_str_t v_14 = {0}; fx_str_t v_15 = {0}; fx_str_t v_16 = {0}; fx_str_t v_17 = {0}; fx_str_t v_18 = {0}; fx_str_t v_19 = {0}; _fx_T2LR17K_form__kmodule_tB v_20 = {0}; _fx_LR17K_form__kmodule_t kmods_0 = 0; _fx_LR17K_form__kmodule_t kmods_1 = 0; fx_str_t v_21 = {0}; fx_str_t v_22 = {0}; fx_str_t v_23 = {0}; _fx_T2LR17K_form__kmodule_tB v_24 = {0}; fx_str_t v_25 = {0}; fx_str_t v_26 = {0}; fx_str_t v_27 = {0}; _fx_LR17K_form__kmodule_t kmods_2 = 0; fx_str_t v_28 = {0}; fx_str_t v_29 = {0}; fx_str_t v_30 = {0}; _fx_T2LR17C_form__cmodule_tB v_31 = {0}; _fx_LR17C_form__cmodule_t cmods_0 = 0; fx_str_t appname_0 = {0}; fx_str_t v_32 = {0}; fx_str_t appname_1 = {0}; _fx_LS v_33 = 0; fx_str_t cmd_0 = {0}; _fx_LE __fold_result___1 = 0; _fx_LE v_34 = 0; fx_str_t v_35 = {0}; fx_str_t v_36 = {0}; FX_CALL(_fx_M8CompilerFM15find_ficus_dirsT2SLS0(&v_3, 0), _fx_catch_7); fx_copy_str(&v_3.t0, &ficus_root_0); FX_COPY_PTR(v_3.t1, &ficus_path_0); if (FX_STR_LENGTH(ficus_root_0) == 0) { FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_major__, &v_4, 0), _fx_catch_7); FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_minor__, &v_5, 0), _fx_catch_7); fx_str_t slit_0 = FX_MAKE_STR("Ficus root directory is not found.\n" U"Please, add the directory \'lib\' containing Builtins.fx to\n" U"\'FICUS_PATH\' environment variable or make sure that either\n" U"1. \'ficus\' executable is put in a directory <ficus_root>/bin\n" U"and there are <ficus_root>/runtime and <ficus_root>/lib.\n" U"2. or \'ficus\' executable is in (/usr|/usr/local|/opt|...)/bin and\n" U" there are (/usr|...)/lib/ficus-"); fx_str_t slit_1 = FX_MAKE_STR("."); fx_str_t slit_2 = FX_MAKE_STR("/{runtime, lib}"); { const fx_str_t strs_0[] = { slit_0, v_4, slit_1, v_5, slit_2 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 5, &v_6), _fx_catch_7); } FX_CALL(_fx_F9make_FailE1S(&v_6, &v_7), _fx_catch_7); FX_THROW(&v_7, true, _fx_catch_7); } bool ok_0; FX_CALL(_fx_M8CompilerFM9parse_allB2SLS(fname0_0, ficus_path_0, &ok_0, 0), _fx_catch_7); if (!ok_0) { FX_THROW(&_fx_E30Compiler__CumulativeParseErrorv, false, _fx_catch_7); } _fx_LT2iLi lstend_0 = 0; int_ ni_0 = FX_ARR_SIZE(_fx_g16Ast__all_modules, 0); _fx_N16Ast__defmodule_t* ptr_all_modules_0 = FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, 0); for (int_ i_0 = 0; i_0 < ni_0; i_0++) { _fx_N16Ast__defmodule_t minfo_0 = 0; _fx_Li v_37 = 0; _fx_T2iLi tup_0 = {0}; FX_COPY_PTR(ptr_all_modules_0[i_0], &minfo_0); FX_COPY_PTR(minfo_0->u.defmodule_t.t5, &v_37); _fx_make_T2iLi(minfo_0->u.defmodule_t.t2, v_37, &tup_0); _fx_LT2iLi node_0 = 0; FX_CALL(_fx_cons_LT2iLi(&tup_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(graph_0, lstend_0, node_0); _fx_catch_0: ; _fx_free_T2iLi(&tup_0); FX_FREE_LIST_SIMPLE(&v_37); if (minfo_0) { _fx_free_N16Ast__defmodule_t(&minfo_0); } FX_CHECK_EXN(_fx_catch_7); } FX_CALL(_fx_M8CompilerFM8toposortLi1LT2iLi(graph_0, &v_8, 0), _fx_catch_7); if (v_8 != 0) { FX_COPY_PTR(v_8->tl, &v_9); } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_7); } FX_CHECK_EXN(_fx_catch_7); if (v_9 != 0) { FX_COPY_PTR(v_9->tl, &v_10); } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_7); } FX_CHECK_EXN(_fx_catch_7); FX_FREE_LIST_SIMPLE(&_fx_g23Ast__all_modules_sorted); FX_COPY_PTR(v_10, &_fx_g23Ast__all_modules_sorted); if (_fx_g12Options__opt.print_ast0) { _fx_Li lst_0 = _fx_g23Ast__all_modules_sorted; for (; lst_0; lst_0 = lst_0->tl) { _fx_N16Ast__defmodule_t minfo_1 = 0; int_ m_0 = lst_0->hd; FX_CALL(_fx_M3AstFM10get_moduleN16Ast__defmodule_t1i(m_0, &minfo_1, 0), _fx_catch_1); FX_CALL(_fx_M6Ast_ppFM10pprint_modv1N16Ast__defmodule_t(minfo_1, 0), _fx_catch_1); _fx_catch_1: ; if (minfo_1) { _fx_free_N16Ast__defmodule_t(&minfo_1); } FX_CHECK_EXN(_fx_catch_7); } } _fx_LS lstend_1 = 0; _fx_Li lst_1 = _fx_g23Ast__all_modules_sorted; for (; lst_1; lst_1 = lst_1->tl) { fx_str_t res_0 = {0}; int_ m_idx_0 = lst_1->hd; _fx_R9Ast__id_t v_38; FX_CALL(_fx_M3AstFM15get_module_nameRM4id_t1i(m_idx_0, &v_38, 0), _fx_catch_2); FX_CALL(_fx_M3AstFM2ppS1RM4id_t(&v_38, &res_0, 0), _fx_catch_2); _fx_LS node_1 = 0; FX_CALL(_fx_cons_LS(&res_0, 0, false, &node_1), _fx_catch_2); FX_LIST_APPEND(v_11, lstend_1, node_1); _fx_catch_2: ; FX_FREE_STR(&res_0); FX_CHECK_EXN(_fx_catch_7); } fx_str_t slit_3 = FX_MAKE_STR(", "); FX_CALL(_fx_F4joinS2SLS(&slit_3, v_11, &modules_used_0, 0), _fx_catch_7); if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_4 = FX_MAKE_STR(""); FX_CALL(_fx_F6stringS1S(&slit_4, &v_12, 0), _fx_catch_7); fx_str_t slit_5 = FX_MAKE_STR("Parsing complete"); FX_CALL(_fx_F6stringS1S(&slit_5, &v_13, 0), _fx_catch_7); fx_str_t slit_6 = FX_MAKE_STR(""); { const fx_str_t strs_1[] = { v_12, v_13, slit_6 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 3, &parsing_complete_0), _fx_catch_7); } } else { fx_str_t slit_7 = FX_MAKE_STR("Parsing complete"); fx_copy_str(&slit_7, &parsing_complete_0); } FX_CALL(_fx_F6stringS1S(&parsing_complete_0, &v_14, 0), _fx_catch_7); FX_CALL(_fx_F6stringS1S(&modules_used_0, &v_15, 0), _fx_catch_7); fx_str_t slit_8 = FX_MAKE_STR(". Modules used: "); { const fx_str_t strs_2[] = { v_14, slit_8, v_15 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 3, &v_16), _fx_catch_7); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_16, 0), _fx_catch_7); _fx_free_LE(&_fx_g21Ast__all_compile_errs); _fx_g21Ast__all_compile_errs = 0; _fx_Li lst_2 = _fx_g23Ast__all_modules_sorted; for (; lst_2; lst_2 = lst_2->tl) { int_ m_1 = lst_2->hd; FX_CALL(_fx_M13Ast_typecheckFM9check_modv1i(m_1, 0), _fx_catch_3); _fx_catch_3: ; FX_CHECK_EXN(_fx_catch_7); } bool ok_1 = _fx_g21Ast__all_compile_errs == 0; if (ok_1) { if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_9 = FX_MAKE_STR(""); FX_CALL(_fx_F6stringS1S(&slit_9, &v_18, 0), _fx_catch_7); fx_str_t slit_10 = FX_MAKE_STR("Type checking complete"); FX_CALL(_fx_F6stringS1S(&slit_10, &v_19, 0), _fx_catch_7); fx_str_t slit_11 = FX_MAKE_STR(""); { const fx_str_t strs_3[] = { v_18, v_19, slit_11 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 3, &v_17), _fx_catch_7); } } else { fx_str_t slit_12 = FX_MAKE_STR("Type checking complete"); fx_copy_str(&slit_12, &v_17); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_17, 0), _fx_catch_7); if (_fx_g12Options__opt.print_ast) { _fx_Li lst_3 = _fx_g23Ast__all_modules_sorted; for (; lst_3; lst_3 = lst_3->tl) { _fx_N16Ast__defmodule_t minfo_2 = 0; int_ m_2 = lst_3->hd; FX_CALL(_fx_M3AstFM10get_moduleN16Ast__defmodule_t1i(m_2, &minfo_2, 0), _fx_catch_4); FX_CALL(_fx_M6Ast_ppFM10pprint_modv1N16Ast__defmodule_t(minfo_2, 0), _fx_catch_4); _fx_catch_4: ; if (minfo_2) { _fx_free_N16Ast__defmodule_t(&minfo_2); } FX_CHECK_EXN(_fx_catch_7); } } } if (ok_1) { _fx_free_LE(&_fx_g21Ast__all_compile_errs); _fx_g21Ast__all_compile_errs = 0; FX_CALL(_fx_M6K_formFM13init_all_idksv0(0), _fx_catch_7); FX_CALL(_fx_M11K_normalizeFM21normalize_all_modulesLR17K_form__kmodule_t1Li(_fx_g23Ast__all_modules_sorted, &kmods_0, 0), _fx_catch_7); _fx_make_T2LR17K_form__kmodule_tB(kmods_0, _fx_g21Ast__all_compile_errs == 0, &v_20); } else { _fx_make_T2LR17K_form__kmodule_tB(0, false, &v_20); } FX_COPY_PTR(v_20.t0, &kmods_1); bool ok_2 = v_20.t1; if (ok_2) { if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_13 = FX_MAKE_STR(""); FX_CALL(_fx_F6stringS1S(&slit_13, &v_22, 0), _fx_catch_7); fx_str_t slit_14 = FX_MAKE_STR("K-normalization complete"); FX_CALL(_fx_F6stringS1S(&slit_14, &v_23, 0), _fx_catch_7); fx_str_t slit_15 = FX_MAKE_STR(""); { const fx_str_t strs_4[] = { v_22, v_23, slit_15 }; FX_CALL(fx_strjoin(0, 0, 0, strs_4, 3, &v_21), _fx_catch_7); } } else { fx_str_t slit_16 = FX_MAKE_STR("K-normalization complete"); fx_copy_str(&slit_16, &v_21); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_21, 0), _fx_catch_7); if (_fx_g12Options__opt.print_k0) { FX_CALL(_fx_M4K_ppFM8pp_kmodsv1LR17K_form__kmodule_t(kmods_1, 0), _fx_catch_7); } } if (ok_2) { if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_17 = FX_MAKE_STR(""); FX_CALL(_fx_F6stringS1S(&slit_17, &v_26, 0), _fx_catch_7); fx_str_t slit_18 = FX_MAKE_STR("K-form optimization started"); FX_CALL(_fx_F6stringS1S(&slit_18, &v_27, 0), _fx_catch_7); fx_str_t slit_19 = FX_MAKE_STR(""); { const fx_str_t strs_5[] = { v_26, v_27, slit_19 }; FX_CALL(fx_strjoin(0, 0, 0, strs_5, 3, &v_25), _fx_catch_7); } } else { fx_str_t slit_20 = FX_MAKE_STR("K-form optimization started"); fx_copy_str(&slit_20, &v_25); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_25, 0), _fx_catch_7); FX_CALL(_fx_M8CompilerFM14k_optimize_allT2LR17K_form__kmodule_tB1LR17K_form__kmodule_t(kmods_1, &v_24, 0), _fx_catch_7); } else { _fx_make_T2LR17K_form__kmodule_tB(0, false, &v_24); } FX_COPY_PTR(v_24.t0, &kmods_2); bool ok_3 = v_24.t1; if (ok_3) { if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_21 = FX_MAKE_STR(""); FX_CALL(_fx_F6stringS1S(&slit_21, &v_29, 0), _fx_catch_7); fx_str_t slit_22 = FX_MAKE_STR("K-form optimization complete"); FX_CALL(_fx_F6stringS1S(&slit_22, &v_30, 0), _fx_catch_7); fx_str_t slit_23 = FX_MAKE_STR(""); { const fx_str_t strs_6[] = { v_29, v_30, slit_23 }; FX_CALL(fx_strjoin(0, 0, 0, strs_6, 3, &v_28), _fx_catch_7); } } else { fx_str_t slit_24 = FX_MAKE_STR("K-form optimization complete"); fx_copy_str(&slit_24, &v_28); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_28, 0), _fx_catch_7); if (_fx_g12Options__opt.print_k) { FX_CALL(_fx_M4K_ppFM8pp_kmodsv1LR17K_form__kmodule_t(kmods_2, 0), _fx_catch_7); } } bool ok_4; if (!_fx_g12Options__opt.gen_c) { ok_4 = ok_3; } else { if (ok_3) { FX_CALL(_fx_M8CompilerFM7k2c_allT2LR17C_form__cmodule_tB1LR17K_form__kmodule_t(kmods_2, &v_31, 0), _fx_catch_7); } else { _fx_make_T2LR17C_form__cmodule_tB(0, false, &v_31); } FX_COPY_PTR(v_31.t0, &cmods_0); bool ok_5 = v_31.t1; bool t_0; if (ok_5) { if (_fx_g12Options__opt.make_app) { t_0 = true; } else { t_0 = _fx_g12Options__opt.run_app; } } else { t_0 = false; } bool ok_6; if (t_0) { FX_CALL(_fx_M8CompilerFM6run_ccB2LR17C_form__cmodule_tS(cmods_0, &ficus_root_0, &ok_6, 0), _fx_catch_7); } else { ok_6 = ok_5; } bool t_1; if (ok_6) { t_1 = _fx_g12Options__opt.run_app; } else { t_1 = false; } if (t_1) { fx_copy_str(&_fx_g12Options__opt.app_filename, &appname_0); FX_CALL(_fx_M8FilenameFM6getcwdS0(&v_32, 0), _fx_catch_7); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_32, &appname_0, &appname_1, 0), _fx_catch_7); FX_COPY_PTR(_fx_g12Options__opt.app_args, &v_33); FX_CALL(_fx_cons_LS(&appname_1, v_33, false, &v_33), _fx_catch_7); fx_str_t slit_25 = FX_MAKE_STR(" "); FX_CALL(_fx_F4joinS2SLS(&slit_25, v_33, &cmd_0, 0), _fx_catch_7); int_ v_39; FX_CALL(_fx_M3SysFM7commandi1S(&cmd_0, &v_39, 0), _fx_catch_7); ok_4 = v_39 == 0; } else { ok_4 = ok_6; } } if (!ok_4) { int_ nerrs_0 = _fx_M8CompilerFM6lengthi1LE(_fx_g21Ast__all_compile_errs, 0); if (nerrs_0 != 0) { _fx_LE lst_4 = _fx_g21Ast__all_compile_errs; for (; lst_4; lst_4 = lst_4->tl) { _fx_LE r_0 = 0; fx_exn_t* a_0 = &lst_4->hd; FX_COPY_PTR(__fold_result___1, &r_0); FX_CALL(_fx_cons_LE(a_0, r_0, false, &r_0), _fx_catch_5); _fx_free_LE(&__fold_result___1); FX_COPY_PTR(r_0, &__fold_result___1); _fx_catch_5: ; if (r_0) { _fx_free_LE(&r_0); } FX_CHECK_EXN(_fx_catch_7); } FX_COPY_PTR(__fold_result___1, &v_34); _fx_LE lst_5 = v_34; for (; lst_5; lst_5 = lst_5->tl) { fx_exn_t* x_0 = &lst_5->hd; FX_CALL(_fx_M3AstFM17print_compile_errv1E(x_0, 0), _fx_catch_6); _fx_catch_6: ; FX_CHECK_EXN(_fx_catch_7); } FX_CALL(_fx_F6stringS1i(nerrs_0, &v_35, 0), _fx_catch_7); fx_str_t slit_26 = FX_MAKE_STR("\n"); fx_str_t slit_27 = FX_MAKE_STR(" errors occured during type checking."); { const fx_str_t strs_7[] = { slit_26, v_35, slit_27 }; FX_CALL(fx_strjoin(0, 0, 0, strs_7, 3, &v_36), _fx_catch_7); } _fx_F12print_stringv1S(&v_36, 0); fx_str_t slit_28 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_28, 0); } } *fx_result = ok_4; _fx_catch_7: ; _fx_free_T2SLS(&v_3); FX_FREE_STR(&ficus_root_0); if (ficus_path_0) { _fx_free_LS(&ficus_path_0); } FX_FREE_STR(&v_4); FX_FREE_STR(&v_5); FX_FREE_STR(&v_6); fx_free_exn(&v_7); if (graph_0) { _fx_free_LT2iLi(&graph_0); } FX_FREE_LIST_SIMPLE(&v_8); FX_FREE_LIST_SIMPLE(&v_9); FX_FREE_LIST_SIMPLE(&v_10); if (v_11) { _fx_free_LS(&v_11); } FX_FREE_STR(&modules_used_0); FX_FREE_STR(&parsing_complete_0); FX_FREE_STR(&v_12); FX_FREE_STR(&v_13); FX_FREE_STR(&v_14); FX_FREE_STR(&v_15); FX_FREE_STR(&v_16); FX_FREE_STR(&v_17); FX_FREE_STR(&v_18); FX_FREE_STR(&v_19); _fx_free_T2LR17K_form__kmodule_tB(&v_20); if (kmods_0) { _fx_free_LR17K_form__kmodule_t(&kmods_0); } if (kmods_1) { _fx_free_LR17K_form__kmodule_t(&kmods_1); } FX_FREE_STR(&v_21); FX_FREE_STR(&v_22); FX_FREE_STR(&v_23); _fx_free_T2LR17K_form__kmodule_tB(&v_24); FX_FREE_STR(&v_25); FX_FREE_STR(&v_26); FX_FREE_STR(&v_27); if (kmods_2) { _fx_free_LR17K_form__kmodule_t(&kmods_2); } FX_FREE_STR(&v_28); FX_FREE_STR(&v_29); FX_FREE_STR(&v_30); _fx_free_T2LR17C_form__cmodule_tB(&v_31); if (cmods_0) { _fx_free_LR17C_form__cmodule_t(&cmods_0); } FX_FREE_STR(&appname_0); FX_FREE_STR(&v_32); FX_FREE_STR(&appname_1); if (v_33) { _fx_free_LS(&v_33); } FX_FREE_STR(&cmd_0); if (__fold_result___1) { _fx_free_LE(&__fold_result___1); } if (v_34) { _fx_free_LE(&v_34); } FX_FREE_STR(&v_35); FX_FREE_STR(&v_36); if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_0); fx_status = 0; int_ nerrs_1 = _fx_M8CompilerFM6lengthi1LE(_fx_g21Ast__all_compile_errs, 0); if (nerrs_1 != 0) { _fx_LE lst_6 = _fx_g21Ast__all_compile_errs; for (; lst_6; lst_6 = lst_6->tl) { _fx_LE r_1 = 0; fx_exn_t* a_1 = &lst_6->hd; FX_COPY_PTR(__fold_result___0, &r_1); FX_CALL(_fx_cons_LE(a_1, r_1, false, &r_1), _fx_catch_8); _fx_free_LE(&__fold_result___0); FX_COPY_PTR(r_1, &__fold_result___0); _fx_catch_8: ; if (r_1) { _fx_free_LE(&r_1); } FX_CHECK_EXN(_fx_cleanup); } FX_COPY_PTR(__fold_result___0, &v_0); _fx_LE lst_7 = v_0; for (; lst_7; lst_7 = lst_7->tl) { fx_exn_t* x_1 = &lst_7->hd; FX_CALL(_fx_M3AstFM17print_compile_errv1E(x_1, 0), _fx_catch_9); _fx_catch_9: ; FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_F6stringS1i(nerrs_1, &v_1, 0), _fx_cleanup); fx_str_t slit_29 = FX_MAKE_STR("\n"); fx_str_t slit_30 = FX_MAKE_STR(" errors occured during type checking."); { const fx_str_t strs_8[] = { slit_29, v_1, slit_30 }; FX_CALL(fx_strjoin(0, 0, 0, strs_8, 3, &v_2), _fx_cleanup); } _fx_F12print_stringv1S(&v_2, 0); fx_str_t slit_31 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_31, 0); } int tag_0 = exn_0.tag; if (tag_0 == _FX_EXN_E4Fail) { fx_str_t v_40 = {0}; fx_str_t v_41 = {0}; fx_str_t v_42 = {0}; FX_CALL(_fx_F6stringS1S(&_fx_g15Compiler__error, &v_40, 0), _fx_catch_10); FX_CALL(_fx_F6stringS1S(&FX_EXN_DATA(_fx_E4Fail_data_t, exn_0.data), &v_41, 0), _fx_catch_10); fx_str_t slit_32 = FX_MAKE_STR(": "); { const fx_str_t strs_9[] = { v_40, slit_32, v_41 }; FX_CALL(fx_strjoin(0, 0, 0, strs_9, 3, &v_42), _fx_catch_10); } _fx_F12print_stringv1S(&v_42, 0); fx_str_t slit_33 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_33, 0); _fx_catch_10: ; FX_FREE_STR(&v_42); FX_FREE_STR(&v_41); FX_FREE_STR(&v_40); } else if (tag_0 == _FX_EXN_E17Ast__CompileError) { FX_CALL(_fx_M3AstFM17print_compile_errv1E(&exn_0, 0), _fx_catch_11); _fx_catch_11: ; } else if (tag_0 != _FX_EXN_E30Compiler__CumulativeParseError) { fx_str_t v_43 = {0}; fx_str_t v_44 = {0}; fx_str_t v_45 = {0}; FX_CALL(_fx_F6stringS1S(&_fx_g15Compiler__error, &v_43, 0), _fx_catch_12); FX_CALL(_fx_F6stringS1E(&exn_0, &v_44, 0), _fx_catch_12); fx_str_t slit_34 = FX_MAKE_STR("\n" U"\n"); fx_str_t slit_35 = FX_MAKE_STR(": Exception "); fx_str_t slit_36 = FX_MAKE_STR(" occured"); { const fx_str_t strs_10[] = { slit_34, v_43, slit_35, v_44, slit_36 }; FX_CALL(fx_strjoin(0, 0, 0, strs_10, 5, &v_45), _fx_catch_12); } _fx_F12print_stringv1S(&v_45, 0); fx_str_t slit_37 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_37, 0); _fx_catch_12: ; FX_FREE_STR(&v_45); FX_FREE_STR(&v_44); FX_FREE_STR(&v_43); } FX_CHECK_EXN(_fx_cleanup); *fx_result = false; } _fx_cleanup: ; fx_free_exn(&exn_0); if (__fold_result___0) { _fx_free_LE(&__fold_result___0); } if (v_0) { _fx_free_LE(&v_0); } FX_FREE_STR(&v_1); FX_FREE_STR(&v_2); return fx_status; } FX_EXTERN_C int fx_init_Compiler(void) { FX_REG_SIMPLE_EXN("Compiler.CumulativeParseError", _FX_EXN_E30Compiler__CumulativeParseError, _fx_E30Compiler__CumulativeParseError_info, _fx_E30Compiler__CumulativeParseErrorv); int fx_status = 0; FX_CALL(_fx_M3SysFM9colortermB0(&_fx_g21Compiler__iscolorterm, 0), _fx_cleanup); fx_str_t slit_0 = FX_MAKE_STR("error"); FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g16Compiler__MsgRed, &slit_0, &_fx_g15Compiler__error, 0), _fx_cleanup); _fx_cleanup: ; return fx_status; } FX_EXTERN_C void fx_deinit_Compiler(void) { FX_FREE_STR(&_fx_g15Compiler__error); }
optimizer.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include "cint.h" #include "cvhf.h" #include "optimizer.h" #define MAX(I,J) ((I) > (J) ? (I) : (J)) int int2e_sph(); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); void CVHFinit_optimizer(CVHFOpt **opt, int *atm, int natm, int *bas, int nbas, double *env) { CVHFOpt *opt0 = (CVHFOpt *)malloc(sizeof(CVHFOpt)); opt0->nbas = nbas; opt0->direct_scf_cutoff = 1e-14; opt0->q_cond = NULL; opt0->dm_cond = NULL; opt0->fprescreen = &CVHFnoscreen; opt0->r_vkscreen = &CVHFr_vknoscreen; *opt = opt0; } void CVHFdel_optimizer(CVHFOpt **opt) { CVHFOpt *opt0 = *opt; if (!opt0) { return; } if (!opt0->q_cond) { free(opt0->q_cond); } if (!opt0->dm_cond) { free(opt0->dm_cond); } free(opt0); *opt = NULL; } int CVHFnoscreen(int *shls, CVHFOpt *opt, int *atm, int *bas, double *env) { return 1; } int CVHFnr_schwarz_cond(int *shls, CVHFOpt *opt, int *atm, int *bas, double *env) { if (!opt) { return 1; } int i = shls[0]; int j = shls[1]; int k = shls[2]; int l = shls[3]; size_t n = opt->nbas; assert(opt->q_cond); assert(i < n); assert(j < n); assert(k < n); assert(l < n); double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l]; return qijkl > opt->direct_scf_cutoff; } int CVHFnrs8_prescreen(int *shls, CVHFOpt *opt, int *atm, int *bas, double *env) { if (!opt) { return 1; // no screen } int i = shls[0]; int j = shls[1]; int k = shls[2]; int l = shls[3]; size_t n = opt->nbas; double *q_cond = opt->q_cond; double *dm_cond = opt->dm_cond; assert(q_cond); assert(dm_cond); assert(i < n); assert(j < n); assert(k < n); assert(l < n); double qijkl = q_cond[i*n+j] * q_cond[k*n+l]; double direct_scf_cutoff = opt->direct_scf_cutoff; return qijkl > direct_scf_cutoff &&((4*dm_cond[j*n+i]*qijkl > direct_scf_cutoff) || (4*dm_cond[l*n+k]*qijkl > direct_scf_cutoff) || ( dm_cond[j*n+k]*qijkl > direct_scf_cutoff) || ( dm_cond[j*n+l]*qijkl > direct_scf_cutoff) || ( dm_cond[i*n+k]*qijkl > direct_scf_cutoff) || ( dm_cond[i*n+l]*qijkl > direct_scf_cutoff)); } int CVHFnrs8_vj_prescreen(int *shls, CVHFOpt *opt, int *atm, int *bas, double *env) { if (!opt) { return 1; // no screen } int i = shls[0]; int j = shls[1]; int k = shls[2]; int l = shls[3]; size_t n = opt->nbas; assert(opt->q_cond); assert(opt->dm_cond); assert(i < n); assert(j < n); assert(k < n); assert(l < n); double direct_scf_cutoff = opt->direct_scf_cutoff; double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l]; return qijkl > direct_scf_cutoff &&((4*qijkl*opt->dm_cond[j*n+i] > direct_scf_cutoff) || (4*qijkl*opt->dm_cond[l*n+k] > direct_scf_cutoff)); } int CVHFnrs8_vk_prescreen(int *shls, CVHFOpt *opt, int *atm, int *bas, double *env) { if (!opt) { return 1; // no screen } int i = shls[0]; int j = shls[1]; int k = shls[2]; int l = shls[3]; size_t n = opt->nbas; double *q_cond = opt->q_cond; double *dm_cond = opt->dm_cond; assert(q_cond); assert(dm_cond); assert(i < n); assert(j < n); assert(k < n); assert(l < n); double qijkl = q_cond[i*n+j] * q_cond[k*n+l]; double direct_scf_cutoff = opt->direct_scf_cutoff; return qijkl > direct_scf_cutoff &&(( dm_cond[j*n+k]*qijkl > direct_scf_cutoff) || ( dm_cond[j*n+l]*qijkl > direct_scf_cutoff) || ( dm_cond[i*n+k]*qijkl > direct_scf_cutoff) || ( dm_cond[i*n+l]*qijkl > direct_scf_cutoff)); } // return flag to decide whether transpose01324 int CVHFr_vknoscreen(int *shls, CVHFOpt *opt, double **dms_cond, int n_dm, double *dm_atleast, int *atm, int *bas, double *env) { int idm; for (idm = 0; idm < n_dm; idm++) { dms_cond[idm] = NULL; } *dm_atleast = 0; return 1; } int CVHFnr3c2e_vj_pass1_prescreen(int *shls, CVHFOpt *opt, int *atm, int *bas, double *env) { if (!opt) { return 1; // no screen } size_t n = opt->nbas; int i = shls[0]; int j = shls[1]; // Be careful with the range of basis k, which is between nbas and // nbas+nauxbas. See shls_slice in df_jk.get_j function. int k = shls[2] - n; assert(opt->q_cond); assert(opt->dm_cond); assert(i < n); assert(j < n); assert(k < n); double direct_scf_cutoff = opt->direct_scf_cutoff; double qijkl = opt->q_cond[i*n+j] * opt->q_cond[n*n+k]; return qijkl > direct_scf_cutoff && (4*qijkl*opt->dm_cond[j*n+i] > direct_scf_cutoff); } int CVHFnr3c2e_vj_pass2_prescreen(int *shls, CVHFOpt *opt, int *atm, int *bas, double *env) { if (!opt) { return 1; // no screen } size_t n = opt->nbas; int i = shls[0]; int j = shls[1]; // Be careful with the range of basis k, which is between nbas and // nbas+nauxbas. See shls_slice in df_jk.get_j function. int k = shls[2] - n; assert(opt->q_cond); assert(opt->dm_cond); assert(i < n); assert(j < n); assert(k < n); double direct_scf_cutoff = opt->direct_scf_cutoff; double qijkl = opt->q_cond[i*n+j] * opt->q_cond[n*n+k]; return qijkl > direct_scf_cutoff && (4*qijkl*opt->dm_cond[k] > direct_scf_cutoff); } int CVHFnr3c2e_schwarz_cond(int *shls, CVHFOpt *opt, int *atm, int *bas, double *env) { if (!opt) { return 1; // no screen } size_t n = opt->nbas; int i = shls[0]; int j = shls[1]; // Be careful with the range of basis k, which is between nbas and // nbas+nauxbas. See shls_slice in df_jk.get_j function. int k = shls[2] - n; assert(opt->q_cond); assert(opt->dm_cond); assert(i < n); assert(j < n); assert(k < n); double qijkl = opt->q_cond[i*n+j] * opt->q_cond[n*n+k]; return qijkl > opt->direct_scf_cutoff; } void CVHFset_direct_scf_cutoff(CVHFOpt *opt, double cutoff) { opt->direct_scf_cutoff = cutoff; } double CVHFget_direct_scf_cutoff(CVHFOpt *opt) { return opt->direct_scf_cutoff; } void CVHFsetnr_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { /* This memory is released in void CVHFdel_optimizer, Don't know * why valgrind raises memory leak here */ if (opt->q_cond) { free(opt->q_cond); } // nbas in the input arguments may different to opt->nbas. // Use opt->nbas because it is used in the prescreen function nbas = opt->nbas; opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas); CVHFset_int2e_q_cond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env); } /* * Non-relativistic 2-electron integrals */ void CVHFset_int2e_q_cond(int (*intor)(), CINTOpt *cintopt, double *q_cond, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { int shls_slice[] = {0, nbas}; const int cache_size = GTOmax_cache_size(intor, shls_slice, 1, atm, natm, bas, nbas, env); #pragma omp parallel { double qtmp, tmp; size_t ij, i, j, di, dj, ish, jsh; size_t Nbas = nbas; int shls[4]; double *cache = malloc(sizeof(double) * cache_size); di = 0; for (ish = 0; ish < nbas; ish++) { dj = ao_loc[ish+1] - ao_loc[ish]; di = MAX(di, dj); } double *buf = malloc(sizeof(double) * di*di*di*di); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < Nbas*(Nbas+1)/2; ij++) { ish = (size_t)(sqrt(2*ij+.25) - .5 + 1e-7); jsh = ij - ish*(ish+1)/2; di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; shls[0] = ish; shls[1] = jsh; shls[2] = ish; shls[3] = jsh; qtmp = 1e-100; if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { tmp = fabs(buf[i+di*j+di*dj*i+di*dj*di*j]); qtmp = MAX(qtmp, tmp); } } qtmp = sqrt(qtmp); } q_cond[ish*nbas+jsh] = qtmp; q_cond[jsh*nbas+ish] = qtmp; } free(buf); free(cache); } } void CVHFset_q_cond(CVHFOpt *opt, double *q_cond, int len) { if (opt->q_cond) { free(opt->q_cond); } opt->q_cond = (double *)malloc(sizeof(double) * len); memcpy(opt->q_cond, q_cond, sizeof(double) * len); } void CVHFsetnr_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call free(opt->dm_cond); } // nbas in the input arguments may different to opt->nbas. // Use opt->nbas because it is used in the prescreen function nbas = opt->nbas; opt->dm_cond = (double *)malloc(sizeof(double) * nbas*nbas); memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas); const size_t nao = ao_loc[nbas]; double dmax, tmp; size_t i, j, ish, jsh, iset; double *pdm; for (ish = 0; ish < nbas; ish++) { for (jsh = 0; jsh <= ish; jsh++) { dmax = 0; for (iset = 0; iset < nset; iset++) { pdm = dm + nao*nao*iset; for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) { for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) { // symmetrize dm_cond because nrs8_prescreen only tests the lower (or upper) // triangular part of dm_cond. Without the symmetrization, some integrals may be // incorrectly skipped. tmp = .5 * (fabs(pdm[i*nao+j]) + fabs(pdm[j*nao+i])); dmax = MAX(dmax, tmp); } } } opt->dm_cond[ish*nbas+jsh] = dmax; opt->dm_cond[jsh*nbas+ish] = dmax; } } } void CVHFset_dm_cond(CVHFOpt *opt, double *dm_cond, int len) { if (opt->dm_cond) { free(opt->dm_cond); } opt->dm_cond = (double *)malloc(sizeof(double) * len); memcpy(opt->dm_cond, dm_cond, sizeof(double) * len); } /* ************************************************* */ void CVHFnr_optimizer(CVHFOpt **vhfopt, int (*intor)(), CINTOpt *cintopt, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { CVHFinit_optimizer(vhfopt, atm, natm, bas, nbas, env); (*vhfopt)->fprescreen = &CVHFnrs8_prescreen; CVHFsetnr_direct_scf(*vhfopt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env); }
2Dpfold.c
/* * minimum free energy * RNA secondary structure with * basepair distance d_1 to reference structure 1 and distance d_2 to reference structure 2 * */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ctype.h> #include <string.h> #include <float.h> /* #defines FLT_MAX ... */ #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/2Dpfold.h" /* ################################# # GLOBAL VARIABLES # ################################# */ /* ################################# # PRIVATE VARIABLES # ################################# */ /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE void crosslink(TwoDpfold_vars *vars); PRIVATE void pf2D_linear(vrna_fold_compound_t *vc); PRIVATE void pf2D_circ(vrna_fold_compound_t *vc); PRIVATE char *pbacktrack_circ(vrna_fold_compound_t *vc, int d1, int d2); PRIVATE void backtrack(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int i, unsigned int j); PRIVATE void backtrack_qm(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int i, unsigned int j); PRIVATE void backtrack_qm1(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int i, unsigned int j); PRIVATE void backtrack_qm2(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int k); PRIVATE void backtrack_qcH(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2); PRIVATE void backtrack_qcI(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2); PRIVATE void backtrack_qcM(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2); PRIVATE void adjustArrayBoundaries(FLT_OR_DBL ***array, int *k_min, int *k_max, int **l_min, int **l_max, int k_min_real, int k_max_real, int *l_min_real, int *l_max_real); INLINE PRIVATE void preparePosteriorBoundaries(int size, int shift, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void updatePosteriorBoundaries(int d1, int d2, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void prepareBoundaries(int min_k_pre, int max_k_pre, int min_l_pre, int max_l_pre, int bpdist, int *min_k, int *max_k, int **min_l, int **max_l); INLINE PRIVATE void prepareArray(FLT_OR_DBL ***array, int min_k, int max_k, int *min_l, int *max_l); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ PUBLIC vrna_sol_TwoD_pf_t * vrna_pf_TwoD(vrna_fold_compound_t *vc, int distance1, int distance2) { unsigned int maxD1 = 0, maxD2 = 0, counter = 0; int cnt1, cnt2, k_min, k_max, l_min, l_max, ndx; FLT_OR_DBL q = 0.; vrna_sol_TwoD_pf_t *output; vrna_md_t *md; vrna_mx_pf_t *matrices; maxD1 = vc->maxD1; maxD2 = vc->maxD2; matrices = vc->exp_matrices; md = &(vc->exp_params->model_details); if (distance1 >= 0) { if ((unsigned int)distance1 > maxD1) vrna_message_warning("vrna_pf_TwoD@2Dpfold.c: limiting maximum basepair distance 1 to %u\n", maxD1); else maxD1 = (unsigned int)distance1; } if (distance2 >= 0) { if ((unsigned int)distance2 > maxD2) vrna_message_warning("vrna_pf_TwoD@2Dpfold.c: limiting maximum basepair distance 2 to %u\n", maxD2); else maxD2 = (unsigned int)distance2; } vc->maxD1 = maxD1; vc->maxD2 = maxD2; output = (vrna_sol_TwoD_pf_t *)vrna_alloc((((maxD1 + 1) * (maxD2 + 2)) / 2 + 2) * sizeof(vrna_sol_TwoD_pf_t)); pf2D_linear(vc); if (md->circ) pf2D_circ(vc); ndx = vc->iindx[1] - vc->length; k_min = (md->circ) ? matrices->k_min_Q_c : matrices->k_min_Q[ndx]; k_max = (md->circ) ? matrices->k_max_Q_c : matrices->k_max_Q[ndx]; for (cnt1 = k_min; cnt1 <= k_max; cnt1++) { l_min = (md->circ) ? matrices->l_min_Q_c[cnt1] : matrices->l_min_Q[ndx][cnt1]; l_max = (md->circ) ? matrices->l_max_Q_c[cnt1] : matrices->l_max_Q[ndx][cnt1]; for (cnt2 = l_min; cnt2 <= l_max; cnt2 += 2) { q = (md->circ) ? matrices->Q_c[cnt1][cnt2 / 2] : matrices->Q[ndx][cnt1][cnt2 / 2]; if (q == 0.) continue; output[counter].k = cnt1; output[counter].l = cnt2; output[counter].q = q; counter++; } } /* store entry for remaining partition if it exists */ q = (md->circ) ? matrices->Q_c_rem : matrices->Q_rem[ndx]; if (q != 0.) { output[counter].k = -1; output[counter].l = -1; output[counter].q = q; counter++; } /* insert end-marker entry */ output[counter].k = output[counter].l = INF; counter++; /* resize to actual dataset amount */ output = (vrna_sol_TwoD_pf_t *)vrna_realloc(output, sizeof(vrna_sol_TwoD_pf_t) * counter); return output; } #if 0 PUBLIC FLT_OR_DBL ** TwoDpfold(TwoDpfold_vars *vars, int distance1, int distance2) { unsigned int i; unsigned int maxD1 = 0; unsigned int maxD2 = 0; unsigned int mm; int cnt1, cnt2; FLT_OR_DBL **output; initialize_TwoDpfold_vars(vars); vars->S = encode_sequence(vars->sequence, 0); vars->S1 = encode_sequence(vars->sequence, 1); make_ptypes2(vars); for (i = 1; i <= (unsigned int)vars->reference_pt1[0]; i++) if (i < (unsigned int)vars->reference_pt1[i]) maxD1++; for (i = 1; i <= (unsigned int)vars->reference_pt2[0]; i++) if (i < (unsigned int)vars->reference_pt2[i]) maxD2++; mm = maximumMatching(vars->sequence); maxD1 += mm; maxD2 += mm; if (distance1 >= 0) { if ((unsigned int)distance1 > maxD1) fprintf(stderr, "limiting maximum basepair distance 1 to %u\n", maxD1); maxD1 = (unsigned int)distance1; } if (distance2 >= 0) { if ((unsigned int)distance2 > maxD2) fprintf(stderr, "limiting maximum basepair distance 2 to %u\n", maxD2); maxD2 = (unsigned int)distance2; } vars->maxD1 = maxD1; vars->maxD2 = maxD2; output = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * (maxD1 + 1)); pf2D_linear(vars); int ndx = vars->my_iindx[1] - vars->seq_length; for (cnt1 = vars->k_min_values[ndx]; cnt1 <= MIN2(vars->k_max_values[ndx], vars->maxD1); cnt1++) { output[cnt1] = (FLT_OR_DBL *)vrna_alloc((vars->maxD2 + 1) * sizeof(FLT_OR_DBL)); for (cnt2 = vars->l_min_values[ndx][cnt1]; cnt2 <= MIN2(vars->l_max_values[ndx][cnt1], vars->maxD2); cnt2 += 2) output[cnt1][cnt2] = vars->Q[ndx][cnt1][cnt2 / 2]; } return output; } PUBLIC FLT_OR_DBL ** TwoDpfold_circ(TwoDpfold_vars *vars, int distance1, int distance2) { unsigned int i; unsigned int maxD1 = 0; unsigned int maxD2 = 0; unsigned int mm; int cnt1, cnt2; FLT_OR_DBL **output; initialize_TwoDpfold_vars(vars); vars->S = encode_sequence(vars->sequence, 0); vars->S1 = encode_sequence(vars->sequence, 1); make_ptypes2(vars); for (i = 1; i <= (unsigned int)vars->reference_pt1[0]; i++) if (i < (unsigned int)vars->reference_pt1[i]) maxD1++; for (i = 1; i <= (unsigned int)vars->reference_pt2[0]; i++) if (i < (unsigned int)vars->reference_pt2[i]) maxD2++; mm = maximumMatching(vars->sequence); maxD1 += mm; maxD2 += mm; if (distance1 >= 0) { if ((unsigned int)distance1 > maxD1) fprintf(stderr, "limiting maximum basepair distance 1 to %u\n", maxD1); maxD1 = (unsigned int)distance1; } if (distance2 >= 0) { if ((unsigned int)distance2 > maxD2) fprintf(stderr, "limiting maximum basepair distance 2 to %u\n", maxD2); maxD2 = (unsigned int)distance2; } vars->maxD1 = maxD1; vars->maxD2 = maxD2; output = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * (maxD1 + 1)); pf2D_linear(vars); pf2D_circ(vars); for (cnt1 = vars->k_min_values_qc; cnt1 <= MIN2(vars->k_max_values_qc, vars->maxD1); cnt1++) { output[cnt1] = (FLT_OR_DBL *)vrna_alloc((vars->maxD2 + 1) * sizeof(FLT_OR_DBL)); for (cnt2 = vars->l_min_values_qc[cnt1]; cnt2 <= MIN2(vars->l_max_values_qc[cnt1], vars->maxD2); cnt2 += 2) output[cnt1][cnt2] = vars->Q_c[cnt1][cnt2 / 2]; } return output; } #endif PRIVATE void pf2D_linear(vrna_fold_compound_t *vc) { char *sequence, *ptype; short *S1, *reference_pt1, *reference_pt2; unsigned int *referenceBPs1, *referenceBPs2, d, i, j, ij, seq_length, maxD1, maxD2, *mm1, *mm2, *bpdist; int *my_iindx, *jindx, circ, cnt1, cnt2, cnt3, cnt4, *rtype; double max_real; FLT_OR_DBL *scale, Qmax; vrna_exp_param_t *pf_params; vrna_mx_pf_t *matrices; vrna_md_t *md; max_real = (sizeof(FLT_OR_DBL) == sizeof(float)) ? FLT_MAX : DBL_MAX; pf_params = vc->exp_params; md = &(pf_params->model_details); matrices = vc->exp_matrices; sequence = vc->sequence; seq_length = vc->length; maxD1 = vc->maxD1; maxD2 = vc->maxD2; S1 = vc->sequence_encoding; ptype = vc->ptype; rtype = &(md->rtype[0]); scale = matrices->scale; reference_pt1 = vc->reference_pt1; reference_pt2 = vc->reference_pt2; my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; dangles = md->dangles; circ = md->circ; mm1 = vc->mm1; mm2 = vc->mm2; bpdist = vc->bpdist; Qmax = 0.; /*array initialization ; qb,qm,q * qb,qm,q (i,j) are stored as ((n+1-i)*(n-i) div 2 + n+1-j */ for (j = 1; j <= seq_length; j++) for (i = (j > TURN ? (j - TURN) : 1); i <= j; i++) { ij = my_iindx[i] - j; matrices->k_min_Q[ij] = 0; matrices->k_max_Q[ij] = 0; matrices->l_min_Q[ij] = (int *)vrna_alloc(sizeof(int)); matrices->l_max_Q[ij] = (int *)vrna_alloc(sizeof(int)); matrices->l_min_Q[ij][0] = 0; matrices->l_max_Q[ij][0] = 0; matrices->Q[ij] = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *)); matrices->Q[ij][0] = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL)); matrices->Q[ij][0][0] = 1.0 * scale[j - i + 1]; } for (d = TURN + 2; d <= seq_length; d++) { /* i,j in [1..seq_length] */ #ifdef _OPENMP #pragma omp parallel for private(i, j, ij, cnt1, cnt2, cnt3, cnt4) #endif for (j = d; j <= seq_length; j++) { unsigned int k, l, kl, u, ii, dij; int no_close, type, type_2, tt, da, db, base_da, base_db; FLT_OR_DBL temp2, aux_en; i = j - d + 1; ij = my_iindx[i] - j; dij = j - i - 1; type = ptype[jindx[j] + i]; no_close = (((type == 3) || (type == 4)) && no_closingGU); if (type) { /* we have a pair */ int k_min_Q_B, k_max_Q_B, l_min_Q_B, l_max_Q_B; int k_min_post_b, k_max_post_b, *l_min_post_b, *l_max_post_b; int update_b = 0; if (!matrices->Q_B[ij]) { update_b = 1; k_min_Q_B = l_min_Q_B = 0; k_max_Q_B = mm1[ij] + referenceBPs1[ij]; l_max_Q_B = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(k_min_Q_B, k_max_Q_B, l_min_Q_B, l_max_Q_B, bpdist[ij], &matrices->k_min_Q_B[ij], &matrices->k_max_Q_B[ij], &matrices->l_min_Q_B[ij], &matrices->l_max_Q_B[ij] ); preparePosteriorBoundaries(matrices->k_max_Q_B[ij] - matrices->k_min_Q_B[ij] + 1, matrices->k_min_Q_B[ij], &k_min_post_b, &k_max_post_b, &l_min_post_b, &l_max_post_b ); prepareArray(&matrices->Q_B[ij], matrices->k_min_Q_B[ij], matrices->k_max_Q_B[ij], matrices->l_min_Q_B[ij], matrices->l_max_Q_B[ij] ); } /* hairpin ----------------------------------------------*/ /* get distance to reference if closing the hairpin * d1a = dbp(T1_{i,j}, {i,j}) */ base_da = ((unsigned int)reference_pt1[i] != j) ? 1 : -1; base_db = ((unsigned int)reference_pt2[i] != j) ? 1 : -1; da = base_da + referenceBPs1[ij]; db = base_db + referenceBPs2[ij]; if (!no_close) { if ((da >= 0) && (db >= 0)) { if (((unsigned int)da <= maxD1) && ((unsigned int)db <= maxD2)) { matrices->Q_B[ij][da][db / 2] = exp_E_Hairpin(dij, type, S1[i + 1], S1[j - 1], sequence + i - 1, pf_params) * scale[dij + 2]; if (update_b) { updatePosteriorBoundaries(da, db, &k_min_post_b, &k_max_post_b, &l_min_post_b, &l_max_post_b ); } } else { matrices->Q_B_rem[ij] = exp_E_Hairpin(dij, type, S1[i + 1], S1[j - 1], sequence + i - 1, pf_params) * scale[dij + 2]; } } } /*-------------------------------------------------------- * check for elementary structures involving more than one * closing pair. * --------------------------------------------------------*/ for (k = i + 1; k <= MIN2(j - 2 - TURN, i + MAXLOOP + 1); k++) { unsigned int minl, ln_pre; minl = k + TURN + 1; ln_pre = dij + k; if (ln_pre > minl + MAXLOOP) minl = ln_pre - MAXLOOP - 1; for (l = minl; l < j; l++) { kl = my_iindx[k] - l; type_2 = ptype[jindx[l] + k]; if (type_2 == 0) continue; type_2 = rtype[type_2]; aux_en = exp_E_IntLoop(k - i - 1, j - l - 1, type, type_2, S1[i + 1], S1[j - 1], S1[k - 1], S1[l + 1], pf_params) * scale[k - i + j - l]; /* get distance to reference if closing the interior loop * d2 = dbp(S_{i,j}, S_{k,l} + {i,j}) */ da = base_da + referenceBPs1[ij] - referenceBPs1[kl]; db = base_db + referenceBPs2[ij] - referenceBPs2[kl]; if (matrices->Q_B_rem[kl]) matrices->Q_B_rem[ij] += matrices->Q_B_rem[kl] * aux_en; if (!matrices->Q_B[kl]) continue; for (cnt1 = matrices->k_min_Q_B[kl]; cnt1 <= matrices->k_max_Q_B[kl]; cnt1++) for (cnt2 = matrices->l_min_Q_B[kl][cnt1]; cnt2 <= matrices->l_max_Q_B[kl][cnt1]; cnt2 += 2) { if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) { matrices->Q_B[ij][cnt1 + da][(cnt2 + db) / 2] += matrices->Q_B[kl][cnt1][cnt2 / 2] * aux_en; if (update_b) { updatePosteriorBoundaries(da + cnt1, db + cnt2, &k_min_post_b, &k_max_post_b, &l_min_post_b, &l_max_post_b ); } } else { matrices->Q_B_rem[ij] += matrices->Q_B[kl][cnt1][cnt2 / 2] * aux_en; } } } /* end l-loop */ } /* end k-loop */ /* multi-loop contribution ------------------------*/ if (!no_close) { for (u = i + TURN + 2; u < j - TURN - 2; u++) { tt = rtype[type]; temp2 = pf_params->expMLclosing * exp_E_MLstem(tt, S1[j - 1], S1[i + 1], pf_params) * scale[2]; if (matrices->Q_M_rem[my_iindx[i + 1] - u]) { if (matrices->Q_M1[jindx[j - 1] + u + 1]) { for (cnt1 = matrices->k_min_Q_M1[jindx[j - 1] + u + 1]; cnt1 <= matrices->k_max_Q_M1[jindx[j - 1] + u + 1]; cnt1++) for (cnt2 = matrices->l_min_Q_M1[jindx[j - 1] + u + 1][cnt1]; cnt2 <= matrices->l_max_Q_M1[jindx[j - 1] + u + 1][cnt1]; cnt2 += 2) matrices->Q_B_rem[ij] += matrices->Q_M_rem[my_iindx[i + 1] - u] * matrices->Q_M1[jindx[j - 1] + u + 1][cnt1][cnt2 / 2] * temp2; } if (matrices->Q_M1_rem[jindx[j - 1] + u + 1]) matrices->Q_B_rem[ij] += matrices->Q_M_rem[my_iindx[i + 1] - u] * matrices->Q_M1_rem[jindx[j - 1] + u + 1] * temp2; } if (matrices->Q_M1_rem[jindx[j - 1] + u + 1]) { if (matrices->Q_M[my_iindx[i + 1] - u]) { for (cnt1 = matrices->k_min_Q_M[my_iindx[i + 1] - u]; cnt1 <= matrices->k_max_Q_M[my_iindx[i + 1] - u]; cnt1++) for (cnt2 = matrices->l_min_Q_M[my_iindx[i + 1] - u][cnt1]; cnt2 <= matrices->l_max_Q_M[my_iindx[i + 1] - u][cnt1]; cnt2 += 2) matrices->Q_B_rem[ij] += matrices->Q_M[my_iindx[i + 1] - u][cnt1][cnt2 / 2] * matrices->Q_M1_rem[jindx[j - 1] + u + 1] * temp2; } } /* get distance to reference if closing the multiloop * dist3 = dbp(S_{i,j}, {i,j} + S_{i+1,u} + S_{u+1,j-1}) */ da = base_da + referenceBPs1[ij] - referenceBPs1[my_iindx[i + 1] - u] - referenceBPs1[my_iindx[u + 1] - j + 1]; db = base_db + referenceBPs2[ij] - referenceBPs2[my_iindx[i + 1] - u] - referenceBPs2[my_iindx[u + 1] - j + 1]; if (!matrices->Q_M[my_iindx[i + 1] - u]) continue; if (!matrices->Q_M1[jindx[j - 1] + u + 1]) continue; for (cnt1 = matrices->k_min_Q_M[my_iindx[i + 1] - u]; cnt1 <= matrices->k_max_Q_M[my_iindx[i + 1] - u]; cnt1++) for (cnt2 = matrices->l_min_Q_M[my_iindx[i + 1] - u][cnt1]; cnt2 <= matrices->l_max_Q_M[my_iindx[i + 1] - u][cnt1]; cnt2 += 2) { for (cnt3 = matrices->k_min_Q_M1[jindx[j - 1] + u + 1]; cnt3 <= matrices->k_max_Q_M1[jindx[j - 1] + u + 1]; cnt3++) for (cnt4 = matrices->l_min_Q_M1[jindx[j - 1] + u + 1][cnt3]; cnt4 <= matrices->l_max_Q_M1[jindx[j - 1] + u + 1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) { matrices->Q_B[ij][cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += matrices->Q_M[my_iindx[i + 1] - u][cnt1][cnt2 / 2] * matrices->Q_M1[jindx[j - 1] + u + 1][cnt3][cnt4 / 2] * temp2; if (update_b) { updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &k_min_post_b, &k_max_post_b, &l_min_post_b, &l_max_post_b ); } } else { matrices->Q_B_rem[ij] += matrices->Q_M[my_iindx[i + 1] - u][cnt1][cnt2 / 2] * matrices->Q_M1[jindx[j - 1] + u + 1][cnt3][cnt4 / 2] * temp2; } } } } } if (update_b) { adjustArrayBoundaries(&matrices->Q_B[ij], &matrices->k_min_Q_B[ij], &matrices->k_max_Q_B[ij], &matrices->l_min_Q_B[ij], &matrices->l_max_Q_B[ij], k_min_post_b, k_max_post_b, l_min_post_b, l_max_post_b ); } } /* end >> if (pair) << */ /* free ends ? -----------------------------------------*/ int k_min_Q_M, k_max_Q_M, l_min_Q_M, l_max_Q_M; int k_min_post_m, k_max_post_m, *l_min_post_m, *l_max_post_m; int update_m = 0; int k_min_Q_M1, k_max_Q_M1, l_min_Q_M1, l_max_Q_M1; int k_min_post_m1, k_max_post_m1, *l_min_post_m1, *l_max_post_m1; int update_m1 = 0; if (!matrices->Q_M[ij]) { update_m = 1; k_min_Q_M = l_min_Q_M = 0; k_max_Q_M = mm1[ij] + referenceBPs1[ij]; l_max_Q_M = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(k_min_Q_M, k_max_Q_M, l_min_Q_M, l_max_Q_M, bpdist[ij], &matrices->k_min_Q_M[ij], &matrices->k_max_Q_M[ij], &matrices->l_min_Q_M[ij], &matrices->l_max_Q_M[ij] ); preparePosteriorBoundaries(matrices->k_max_Q_M[ij] - matrices->k_min_Q_M[ij] + 1, matrices->k_min_Q_M[ij], &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); prepareArray(&matrices->Q_M[ij], matrices->k_min_Q_M[ij], matrices->k_max_Q_M[ij], matrices->l_min_Q_M[ij], matrices->l_max_Q_M[ij] ); } if (!matrices->Q_M1[jindx[j] + i]) { update_m1 = 1; k_min_Q_M1 = l_min_Q_M1 = 0; k_max_Q_M1 = mm1[ij] + referenceBPs1[ij]; l_max_Q_M1 = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(k_min_Q_M1, k_max_Q_M1, l_min_Q_M1, l_max_Q_M1, bpdist[ij], &matrices->k_min_Q_M1[jindx[j] + i], &matrices->k_max_Q_M1[jindx[j] + i], &matrices->l_min_Q_M1[jindx[j] + i], &matrices->l_max_Q_M1[jindx[j] + i] ); preparePosteriorBoundaries(matrices->k_max_Q_M1[jindx[j] + i] - matrices->k_min_Q_M1[jindx[j] + i] + 1, matrices->k_min_Q_M1[jindx[j] + i], &k_min_post_m1, &k_max_post_m1, &l_min_post_m1, &l_max_post_m1 ); prepareArray(&matrices->Q_M1[jindx[j] + i], matrices->k_min_Q_M1[jindx[j] + i], matrices->k_max_Q_M1[jindx[j] + i], matrices->l_min_Q_M1[jindx[j] + i], matrices->l_max_Q_M1[jindx[j] + i] ); } /* j is unpaired */ da = referenceBPs1[ij] - referenceBPs1[ij + 1]; db = referenceBPs2[ij] - referenceBPs2[ij + 1]; if (matrices->Q_M_rem[ij + 1]) matrices->Q_M_rem[ij] += matrices->Q_M_rem[ij + 1] * pf_params->expMLbase * scale[1]; if (matrices->Q_M[ij + 1]) { for (cnt1 = matrices->k_min_Q_M[ij + 1]; cnt1 <= matrices->k_max_Q_M[ij + 1]; cnt1++) { for (cnt2 = matrices->l_min_Q_M[ij + 1][cnt1]; cnt2 <= matrices->l_max_Q_M[ij + 1][cnt1]; cnt2 += 2) { if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) { matrices->Q_M[ij][cnt1 + da][(cnt2 + db) / 2] += matrices->Q_M[ij + 1][cnt1][cnt2 / 2] * pf_params->expMLbase * scale[1]; if (update_m) { updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); } } else { matrices->Q_M_rem[ij] += matrices->Q_M[ij + 1][cnt1][cnt2 / 2] * pf_params->expMLbase * scale[1]; } } } } if (matrices->Q_M1_rem[jindx[j - 1] + i]) matrices->Q_M1_rem[jindx[j] + i] += matrices->Q_M1_rem[jindx[j - 1] + i] * pf_params->expMLbase * scale[1]; if (matrices->Q_M1[jindx[j - 1] + i]) { for (cnt1 = matrices->k_min_Q_M1[jindx[j - 1] + i]; cnt1 <= matrices->k_max_Q_M1[jindx[j - 1] + i]; cnt1++) for (cnt2 = matrices->l_min_Q_M1[jindx[j - 1] + i][cnt1]; cnt2 <= matrices->l_max_Q_M1[jindx[j - 1] + i][cnt1]; cnt2 += 2) { if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) { matrices->Q_M1[jindx[j] + i][cnt1 + da][(cnt2 + db) / 2] += matrices->Q_M1[jindx[j - 1] + i][cnt1][cnt2 / 2] * pf_params->expMLbase * scale[1]; if (update_m1) { updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &k_min_post_m1, &k_max_post_m1, &l_min_post_m1, &l_max_post_m1 ); } } else { matrices->Q_M1_rem[jindx[j] + i] += matrices->Q_M1[jindx[j - 1] + i][cnt1][cnt2 / 2] * pf_params->expMLbase * scale[1]; } } } /* j pairs with i */ if ((!no_close) && type) { FLT_OR_DBL aux_en = exp_E_MLstem(type, (i > 1) || circ ? S1[i - 1] : -1, (j < seq_length) || circ ? S1[j + 1] : -1, pf_params); if (matrices->Q_B_rem[ij]) { matrices->Q_M_rem[ij] += matrices->Q_B_rem[ij] * aux_en; matrices->Q_M1_rem[jindx[j] + i] += matrices->Q_B_rem[ij] * aux_en; } if (matrices->Q_B[ij]) { for (cnt1 = matrices->k_min_Q_B[ij]; cnt1 <= matrices->k_max_Q_B[ij]; cnt1++) for (cnt2 = matrices->l_min_Q_B[ij][cnt1]; cnt2 <= matrices->l_max_Q_B[ij][cnt1]; cnt2 += 2) { matrices->Q_M[ij][cnt1][cnt2 / 2] += matrices->Q_B[ij][cnt1][cnt2 / 2] * aux_en; if (update_m) { updatePosteriorBoundaries(cnt1, cnt2, &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); } matrices->Q_M1[jindx[j] + i][cnt1][cnt2 / 2] += matrices->Q_B[ij][cnt1][cnt2 / 2] * aux_en; if (update_m1) { updatePosteriorBoundaries(cnt1, cnt2, &k_min_post_m1, &k_max_post_m1, &l_min_post_m1, &l_max_post_m1 ); } } } } /* j pairs with k: i<k<j */ ii = my_iindx[i]; for (k = i + 1; k <= j; k++) { tt = ptype[jindx[j] + k]; temp2 = exp_E_MLstem(tt, S1[k - 1], (j < seq_length) || circ ? S1[j + 1] : -1, pf_params); if (matrices->Q_B_rem[my_iindx[k] - j]) { matrices->Q_M_rem[ij] += matrices->Q_B_rem[my_iindx[k] - j] * pow(pf_params->expMLbase, (double)(k - i)) * scale[k - i] * temp2; if (matrices->Q_M[ii - k + 1]) { for (cnt1 = matrices->k_min_Q_M[ii - k + 1]; cnt1 <= matrices->k_max_Q_M[ii - k + 1]; cnt1++) for (cnt2 = matrices->l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= matrices->l_max_Q_M[ii - k + 1][cnt1]; cnt2 += 2) matrices->Q_M_rem[ij] += matrices->Q_M[ii - k + 1][cnt1][cnt2 / 2] * matrices->Q_B_rem[my_iindx[k] - j] * temp2; } if (matrices->Q_M_rem[ii - k + 1]) matrices->Q_M_rem[ij] += matrices->Q_M_rem[ii - k + 1] * matrices->Q_B_rem[my_iindx[k] - j] * temp2; } if (matrices->Q_M_rem[ii - k + 1]) { if (matrices->Q_B[my_iindx[k] - j]) { for (cnt1 = matrices->k_min_Q_B[my_iindx[k] - j]; cnt1 <= matrices->k_max_Q_B[my_iindx[k] - j]; cnt1++) for (cnt2 = matrices->l_min_Q_B[my_iindx[k] - j][cnt1]; cnt2 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt1]; cnt2 += 2) matrices->Q_M_rem[ij] += matrices->Q_M_rem[my_iindx[k] - j] * matrices->Q_B[my_iindx[k] - j][cnt1][cnt2 / 2] * temp2; } } /* add contributions of QM(i,k-1)*QB(k,j)*e^b and * e^((k-i) * c) * QB(k,j) * e^b * therefor we need d1a = dbp(T1_{i,j}, T1_{i,k-1} + T1_{k,j}), * d1b = dbp(T2_{i,j}, T2_{i,k-1} + T2_{k,j}) * d1c = dbp(T1_{i,j}, T1_{k,j})circ = 0; * d1d = dbp(T2_{i,j}, T2_{k,j}) */ da = referenceBPs1[ij] - referenceBPs1[my_iindx[k] - j]; db = referenceBPs2[ij] - referenceBPs2[my_iindx[k] - j]; if (!matrices->Q_B[my_iindx[k] - j]) continue; for (cnt1 = matrices->k_min_Q_B[my_iindx[k] - j]; cnt1 <= matrices->k_max_Q_B[my_iindx[k] - j]; cnt1++) for (cnt2 = matrices->l_min_Q_B[my_iindx[k] - j][cnt1]; cnt2 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt1]; cnt2 += 2) { if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) { matrices->Q_M[ij][cnt1 + da][(cnt2 + db) / 2] += matrices->Q_B[my_iindx[k] - j][cnt1][cnt2 / 2] * pow(pf_params->expMLbase, (double)(k - i)) * scale[k - i] * temp2; if (update_m) { updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); } } else { matrices->Q_M_rem[ij] += matrices->Q_B[my_iindx[k] - j][cnt1][cnt2 / 2] * pow(pf_params->expMLbase, (double)(k - i)) * scale[k - i] * temp2; } } if (!matrices->Q_M[ii - k + 1]) continue; da -= referenceBPs1[ii - k + 1]; db -= referenceBPs2[ii - k + 1]; for (cnt1 = matrices->k_min_Q_M[ii - k + 1]; cnt1 <= matrices->k_max_Q_M[ii - k + 1]; cnt1++) for (cnt2 = matrices->l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= matrices->l_max_Q_M[ii - k + 1][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_Q_B[my_iindx[k] - j]; cnt3 <= matrices->k_max_Q_B[my_iindx[k] - j]; cnt3++) for (cnt4 = matrices->l_min_Q_B[my_iindx[k] - j][cnt3]; cnt4 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) { matrices->Q_M[ij][cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += matrices->Q_M[ii - k + 1][cnt1][cnt2 / 2] * matrices->Q_B[my_iindx[k] - j][cnt3][cnt4 / 2] * temp2; if (update_m) { updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &k_min_post_m, &k_max_post_m, &l_min_post_m, &l_max_post_m ); } } else { matrices->Q_M_rem[ij] += matrices->Q_M[ii - k + 1][cnt1][cnt2 / 2] * matrices->Q_B[my_iindx[k] - j][cnt3][cnt4 / 2] * temp2; } } } if (update_m) { adjustArrayBoundaries(&matrices->Q_M[ij], &matrices->k_min_Q_M[ij], &matrices->k_max_Q_M[ij], &matrices->l_min_Q_M[ij], &matrices->l_max_Q_M[ij], k_min_post_m, k_max_post_m, l_min_post_m, l_max_post_m ); } if (update_m1) { adjustArrayBoundaries(&matrices->Q_M1[jindx[j] + i], &matrices->k_min_Q_M1[jindx[j] + i], &matrices->k_max_Q_M1[jindx[j] + i], &matrices->l_min_Q_M1[jindx[j] + i], &matrices->l_max_Q_M1[jindx[j] + i], k_min_post_m1, k_max_post_m1, l_min_post_m1, l_max_post_m1 ); } /* compute contributions for Q(i,j) */ int k_min, k_max, l_min, l_max; int k_min_post, k_max_post, *l_min_post, *l_max_post; int update_q = 0; if (!matrices->Q[ij]) { update_q = 1; k_min = l_min = 0; k_max = mm1[ij] + referenceBPs1[ij]; l_max = mm2[ij] + referenceBPs2[ij]; prepareBoundaries(k_min, k_max, l_min, l_max, bpdist[ij], &matrices->k_min_Q[ij], &matrices->k_max_Q[ij], &matrices->l_min_Q[ij], &matrices->l_max_Q[ij] ); preparePosteriorBoundaries(matrices->k_max_Q[ij] - matrices->k_min_Q[ij] + 1, matrices->k_min_Q[ij], &k_min_post, &k_max_post, &l_min_post, &l_max_post ); prepareArray(&matrices->Q[ij], matrices->k_min_Q[ij], matrices->k_max_Q[ij], matrices->l_min_Q[ij], matrices->l_max_Q[ij] ); } if (type) { aux_en = exp_E_ExtLoop(type, (i > 1) || circ ? S1[i - 1] : -1, (j < seq_length) || circ ? S1[j + 1] : -1, pf_params); if (matrices->Q_B_rem[ij]) matrices->Q_rem[ij] += matrices->Q_B_rem[ij] * aux_en; if (matrices->Q_B[ij]) { for (cnt1 = matrices->k_min_Q_B[ij]; cnt1 <= matrices->k_max_Q_B[ij]; cnt1++) for (cnt2 = matrices->l_min_Q_B[ij][cnt1]; cnt2 <= matrices->l_max_Q_B[ij][cnt1]; cnt2 += 2) { matrices->Q[ij][cnt1][cnt2 / 2] += matrices->Q_B[ij][cnt1][cnt2 / 2] * aux_en; if (update_q) { updatePosteriorBoundaries(cnt1, cnt2, &k_min_post, &k_max_post, &l_min_post, &l_max_post ); } } } } /* j is unpaired */ if (matrices->Q_rem[ij + 1]) matrices->Q_rem[ij] += matrices->Q_rem[ij + 1] * scale[1]; /* da = dbp(T1_{i,j}, T1_{i,j-1}) * db = dbp(T2_{i,j}, T2_{i,j-1}) */ da = referenceBPs1[ij] - referenceBPs1[ij + 1]; db = referenceBPs2[ij] - referenceBPs2[ij + 1]; if (matrices->Q[ij + 1]) { for (cnt1 = matrices->k_min_Q[ij + 1]; cnt1 <= matrices->k_max_Q[ij + 1]; cnt1++) for (cnt2 = matrices->l_min_Q[ij + 1][cnt1]; cnt2 <= matrices->l_max_Q[ij + 1][cnt1]; cnt2 += 2) { if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) { matrices->Q[ij][cnt1 + da][(cnt2 + db) / 2] += matrices->Q[ij + 1][cnt1][cnt2 / 2] * scale[1]; if (update_q) { updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &k_min_post, &k_max_post, &l_min_post, &l_max_post ); } } else { matrices->Q_rem[ij] += matrices->Q[ij + 1][cnt1][cnt2 / 2] * scale[1]; } } } for (k = j - TURN - 1; k > i; k--) { tt = ptype[jindx[j] + k]; temp2 = exp_E_ExtLoop(tt, S1[k - 1], (j < seq_length) || circ ? S1[j + 1] : -1, pf_params); if (matrices->Q_rem[my_iindx[i] - k + 1]) { if (matrices->Q_B[my_iindx[k] - j]) { for (cnt1 = matrices->k_min_Q_B[my_iindx[k] - j]; cnt1 <= matrices->k_max_Q_B[my_iindx[k] - j]; cnt1++) for (cnt2 = matrices->l_min_Q_B[my_iindx[k] - j][cnt1]; cnt2 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt1]; cnt2 += 2) matrices->Q_rem[ij] += matrices->Q_rem[my_iindx[i] - k + 1] * matrices->Q_B[my_iindx[k] - j][cnt1][cnt2 / 2] * temp2; } if (matrices->Q_B_rem[my_iindx[k] - j]) matrices->Q_rem[ij] += matrices->Q_rem[my_iindx[i] - k + 1] * matrices->Q_B_rem[my_iindx[k] - j] * temp2; } if (matrices->Q_B_rem[my_iindx[k] - j]) { if (matrices->Q[my_iindx[i] - k + 1]) { for (cnt1 = matrices->k_min_Q[my_iindx[i] - k + 1]; cnt1 <= matrices->k_max_Q[my_iindx[i] - k + 1]; cnt1++) for (cnt2 = matrices->l_min_Q[my_iindx[i] - k + 1][cnt1]; cnt2 <= matrices->l_max_Q[my_iindx[i] - k + 1][cnt1]; cnt2 += 2) matrices->Q_rem[ij] += matrices->Q[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * matrices->Q_B_rem[my_iindx[k] - j] * temp2; } } /* da = dbp{T1_{i,j}, T1_{k,j} * db = dbp{T2_{i,j}, T2_{k,j}} */ da = referenceBPs1[ij] - referenceBPs1[my_iindx[k] - j] - referenceBPs1[my_iindx[i] - k + 1]; db = referenceBPs2[ij] - referenceBPs2[my_iindx[k] - j] - referenceBPs2[my_iindx[i] - k + 1]; if (!matrices->Q[my_iindx[i] - k + 1]) continue; if (!matrices->Q_B[my_iindx[k] - j]) continue; for (cnt1 = matrices->k_min_Q[my_iindx[i] - k + 1]; cnt1 <= matrices->k_max_Q[my_iindx[i] - k + 1]; cnt1++) for (cnt2 = matrices->l_min_Q[my_iindx[i] - k + 1][cnt1]; cnt2 <= matrices->l_max_Q[my_iindx[i] - k + 1][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_Q_B[my_iindx[k] - j]; cnt3 <= matrices->k_max_Q_B[my_iindx[k] - j]; cnt3++) for (cnt4 = matrices->l_min_Q_B[my_iindx[k] - j][cnt3]; cnt4 <= matrices->l_max_Q_B[my_iindx[k] - j][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) { matrices->Q[ij][cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += matrices->Q[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * matrices->Q_B[my_iindx[k] - j][cnt3][cnt4 / 2] * temp2; if (update_q) { updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &k_min_post, &k_max_post, &l_min_post, &l_max_post ); } } else { matrices->Q_rem[ij] += matrices->Q[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * matrices->Q_B[my_iindx[k] - j][cnt3][cnt4 / 2] * temp2; } } } if (update_q) { adjustArrayBoundaries(&matrices->Q[ij], &matrices->k_min_Q[ij], &matrices->k_max_Q[ij], &matrices->l_min_Q[ij], &matrices->l_max_Q[ij], k_min_post, k_max_post, l_min_post, l_max_post ); } #if 1 for (cnt1 = matrices->k_min_Q[ij]; cnt1 <= matrices->k_max_Q[ij]; cnt1++) { for (cnt2 = matrices->l_min_Q[ij][cnt1]; cnt2 <= matrices->l_max_Q[ij][cnt1]; cnt2 += 2) { if (matrices->Q[ij][cnt1][cnt2 / 2] > Qmax) { Qmax = matrices->Q[ij][cnt1][cnt2 / 2]; if (Qmax > max_real / 10.) vrna_message_warning("Q close to overflow: %u %u %g\n", i, j, matrices->Q[ij][cnt1][cnt2 / 2]); } if (matrices->Q[ij][cnt1][cnt2 / 2] >= max_real) vrna_message_error("overflow in pf_fold while calculating q[%u,%u]\n" "use larger pf_scale", i, j); } } #endif } /* end of j-loop */ } } /* calculate partition function for circular case */ /* NOTE: this is the postprocessing step ONLY */ /* You have to call pf2D_linear first to calculate */ /* complete circular case!!! */ PRIVATE void pf2D_circ(vrna_fold_compound_t *vc) { unsigned int d, p, q, pq, k, l, kl, u, da, db, seq_length, maxD1, maxD2, base_d1, base_d2, *mm1, *mm2, *bpdist; int *my_iindx, *jindx, type, cnt1, cnt2, cnt3, cnt4, *rtype; short *S1; unsigned int *referenceBPs1, *referenceBPs2; char *sequence, *ptype; FLT_OR_DBL *scale; vrna_exp_param_t *pf_params; /* holds all [unscaled] pf parameters */ vrna_md_t *md; vrna_mx_pf_t *matrices; pf_params = vc->exp_params; md = &(pf_params->model_details); matrices = vc->exp_matrices; sequence = vc->sequence; seq_length = vc->length; maxD1 = vc->maxD1; maxD2 = vc->maxD2; S1 = vc->sequence_encoding; ptype = vc->ptype; rtype = &(md->rtype[0]); scale = matrices->scale; my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; dangles = md->dangles; mm1 = vc->mm1; mm2 = vc->mm2; bpdist = vc->bpdist; FLT_OR_DBL ***Q_B, ***Q_M, ***Q_M1; FLT_OR_DBL *Q_B_rem, *Q_M_rem, *Q_M1_rem; int **l_min_Q_B, **l_max_Q_B, **l_min_Q_M, **l_max_Q_M, **l_min_Q_M1, **l_max_Q_M1; int *k_min_Q_B, *k_max_Q_B, *k_min_Q_M, *k_max_Q_M, *k_min_Q_M1, *k_max_Q_M1; Q_B = matrices->Q_B; l_min_Q_B = matrices->l_min_Q_B; l_max_Q_B = matrices->l_max_Q_B; k_min_Q_B = matrices->k_min_Q_B; k_max_Q_B = matrices->k_max_Q_B; Q_M = matrices->Q_M; l_min_Q_M = matrices->l_min_Q_M; l_max_Q_M = matrices->l_max_Q_M; k_min_Q_M = matrices->k_min_Q_M; k_max_Q_M = matrices->k_max_Q_M; Q_M1 = matrices->Q_M1; l_min_Q_M1 = matrices->l_min_Q_M1; l_max_Q_M1 = matrices->l_max_Q_M1; k_min_Q_M1 = matrices->k_min_Q_M1; k_max_Q_M1 = matrices->k_max_Q_M1; Q_B_rem = matrices->Q_B_rem; Q_M_rem = matrices->Q_M_rem; Q_M1_rem = matrices->Q_M1_rem; matrices->Q_c_rem = 0.; matrices->Q_cH_rem = 0.; matrices->Q_cI_rem = 0.; matrices->Q_cM_rem = 0.; /* construct qm2 matrix from qm1 entries */ #ifdef _OPENMP #pragma omp parallel for private(d, k, l, da, db, cnt1, cnt2, cnt3, cnt4) #endif for (k = 1; k < seq_length - TURN - 1; k++) { int k_min_Q_M2, k_max_Q_M2, l_min_Q_M2, l_max_Q_M2; int k_min_post_m2, k_max_post_m2, *l_min_post_m2, *l_max_post_m2; int update_m2 = 0; l_min_post_m2 = l_max_post_m2 = NULL; if (!matrices->Q_M2[k]) { update_m2 = 1; k_min_Q_M2 = l_min_Q_M2 = 0; k_max_Q_M2 = mm1[my_iindx[k] - seq_length] + referenceBPs1[my_iindx[k] - seq_length]; l_max_Q_M2 = mm2[my_iindx[k] - seq_length] + referenceBPs2[my_iindx[k] - seq_length]; prepareBoundaries(k_min_Q_M2, k_max_Q_M2, l_min_Q_M2, l_max_Q_M2, bpdist[my_iindx[k] - seq_length], &matrices->k_min_Q_M2[k], &matrices->k_max_Q_M2[k], &matrices->l_min_Q_M2[k], &matrices->l_max_Q_M2[k] ); preparePosteriorBoundaries(matrices->k_max_Q_M2[k] - matrices->k_min_Q_M2[k] + 1, matrices->k_min_Q_M2[k], &k_min_post_m2, &k_max_post_m2, &l_min_post_m2, &l_max_post_m2 ); prepareArray(&matrices->Q_M2[k], matrices->k_min_Q_M2[k], matrices->k_max_Q_M2[k], matrices->l_min_Q_M2[k], matrices->l_max_Q_M2[k] ); } /* construct Q_M2 */ for (l = k + TURN + 1; l < seq_length - TURN - 1; l++) { if (Q_M1_rem[jindx[l] + k]) { if (Q_M1[jindx[seq_length] + l + 1]) { for (cnt1 = k_min_Q_M1[jindx[seq_length] + l + 1]; cnt1 <= k_max_Q_M1[jindx[seq_length] + l + 1]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[seq_length] + l + 1][cnt1]; cnt2 <= l_max_Q_M1[jindx[seq_length] + l + 1][cnt1]; cnt2 += 2) matrices->Q_M2_rem[k] += Q_M1_rem[jindx[l] + k] * Q_M1[jindx[seq_length] + l + 1][cnt1][cnt2 / 2]; } if (Q_M1_rem[jindx[seq_length] + l + 1]) matrices->Q_M2_rem[k] += Q_M1_rem[jindx[l] + k] * Q_M1_rem[jindx[seq_length] + l + 1]; } if (Q_M1_rem[jindx[seq_length] + l + 1]) { if (Q_M1[jindx[l] + k]) { for (cnt1 = k_min_Q_M1[jindx[l] + k]; cnt1 <= k_max_Q_M1[jindx[l] + k]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1]; cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1]; cnt2 += 2) matrices->Q_M2_rem[k] += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1_rem[jindx[seq_length] + l + 1]; } } if (matrices->Q_M1[jindx[l] + k] && matrices->Q_M1[jindx[seq_length] + l + 1]) { da = referenceBPs1[my_iindx[k] - seq_length] - referenceBPs1[my_iindx[k] - l] - referenceBPs1[my_iindx[l + 1] - seq_length]; db = referenceBPs2[my_iindx[k] - seq_length] - referenceBPs2[my_iindx[k] - l] - referenceBPs2[my_iindx[l + 1] - seq_length]; for (cnt1 = k_min_Q_M1[jindx[l] + k]; cnt1 <= k_max_Q_M1[jindx[l] + k]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1]; cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1]; cnt2 += 2) { for (cnt3 = k_min_Q_M1[jindx[seq_length] + l + 1]; cnt3 <= k_max_Q_M1[jindx[seq_length] + l + 1]; cnt3++) for (cnt4 = l_min_Q_M1[jindx[seq_length] + l + 1][cnt3]; cnt4 <= l_max_Q_M1[jindx[seq_length] + l + 1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) { matrices->Q_M2[k][cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1[jindx[seq_length] + l + 1][cnt3][cnt4 / 2]; if (update_m2) { updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &k_min_post_m2, &k_max_post_m2, &l_min_post_m2, &l_max_post_m2 ); } } else { matrices->Q_M2_rem[k] += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1[jindx[seq_length] + l + 1][cnt3][cnt4 / 2]; } } } } } if (update_m2) { adjustArrayBoundaries(&matrices->Q_M2[k], &matrices->k_min_Q_M2[k], &matrices->k_max_Q_M2[k], &matrices->l_min_Q_M2[k], &matrices->l_max_Q_M2[k], k_min_post_m2, k_max_post_m2, l_min_post_m2, l_max_post_m2 ); } } base_d1 = referenceBPs1[my_iindx[1] - seq_length]; base_d2 = referenceBPs2[my_iindx[1] - seq_length]; int min_k, max_k, max_l, min_l; int min_k_real, max_k_real, min_k_real_qcH, max_k_real_qcH, min_k_real_qcI, max_k_real_qcI, min_k_real_qcM, max_k_real_qcM; int *min_l_real, *max_l_real, *min_l_real_qcH, *max_l_real_qcH, *min_l_real_qcI, *max_l_real_qcI, *min_l_real_qcM, *max_l_real_qcM; int update_c, update_cH, update_cI, update_cM; max_l_real_qcM = min_l_real_qcM = NULL; max_l_real_qcI = min_l_real_qcI = NULL; max_l_real_qcH = min_l_real_qcH = NULL; max_l_real = min_l_real = NULL; update_c = update_cH = update_cI = update_cM = 0; min_k = min_l = 0; max_k = mm1[my_iindx[1] - seq_length] + referenceBPs1[my_iindx[1] - seq_length]; max_l = mm2[my_iindx[1] - seq_length] + referenceBPs2[my_iindx[1] - seq_length]; #ifdef _OPENMP #pragma omp sections { #pragma omp section { #endif if (!matrices->Q_c) { update_c = 1; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_Q_c, &matrices->k_max_Q_c, &matrices->l_min_Q_c, &matrices->l_max_Q_c ); prepareArray(&matrices->Q_c, matrices->k_min_Q_c, matrices->k_max_Q_c, matrices->l_min_Q_c, matrices->l_max_Q_c ); preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } #ifdef _OPENMP } #pragma omp section { #endif if (!matrices->Q_cH) { update_cH = 1; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_Q_cH, &matrices->k_max_Q_cH, &matrices->l_min_Q_cH, &matrices->l_max_Q_cH ); prepareArray(&matrices->Q_cH, matrices->k_min_Q_cH, matrices->k_max_Q_cH, matrices->l_min_Q_cH, matrices->l_max_Q_cH ); preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real_qcH, &max_k_real_qcH, &min_l_real_qcH, &max_l_real_qcH ); } #ifdef _OPENMP } #pragma omp section { #endif if (!matrices->Q_cI) { update_cI = 1; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_Q_cI, &matrices->k_max_Q_cI, &matrices->l_min_Q_cI, &matrices->l_max_Q_cI ); prepareArray(&matrices->Q_cI, matrices->k_min_Q_cI, matrices->k_max_Q_cI, matrices->l_min_Q_cI, matrices->l_max_Q_cI ); preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real_qcI, &max_k_real_qcI, &min_l_real_qcI, &max_l_real_qcI ); } #ifdef _OPENMP } #pragma omp section { #endif if (!matrices->Q_cM) { update_cM = 1; prepareBoundaries(min_k, max_k, min_l, max_l, bpdist[my_iindx[1] - seq_length], &matrices->k_min_Q_cM, &matrices->k_max_Q_cM, &matrices->l_min_Q_cM, &matrices->l_max_Q_cM ); prepareArray(&matrices->Q_cM, matrices->k_min_Q_cM, matrices->k_max_Q_cM, matrices->l_min_Q_cM, matrices->l_max_Q_cM ); preparePosteriorBoundaries(max_k - min_k + 1, min_k, &min_k_real_qcM, &max_k_real_qcM, &min_l_real_qcM, &max_l_real_qcM ); } #ifdef _OPENMP } } #endif for (d = TURN + 2; d <= seq_length; d++) /* i,j in [1..length] */ #ifdef _OPENMP #pragma omp parallel for private(p, q, pq, k, l, kl, u, da, db, type, cnt1, cnt2, cnt3, cnt4) #endif for (q = d; q <= seq_length; q++) { FLT_OR_DBL qot; char loopseq[10]; p = q - d + 1; pq = my_iindx[p] - q; /* 1. get exterior hairpin contribution */ u = seq_length - q + p - 1; if (u < TURN) continue; type = ptype[jindx[q] + p]; if (!type) continue; if (((type == 3) || (type == 4)) && no_closingGU) continue; /* cause we want to calc the exterior loops, we need the reversed pair type from now on */ type = rtype[type]; if (u < 7) { strcpy(loopseq, sequence + q - 1); strncat(loopseq, sequence, p); } /* get distance to reference if closing the hairpin * da = dbp(T1_[1,n}, T1_{p,q}) * db = dbp(T2_{1,n}, T2_{p,q}) */ da = base_d1 - referenceBPs1[pq]; db = base_d2 - referenceBPs2[pq]; qot = exp_E_Hairpin(u, type, S1[q + 1], S1[p - 1], loopseq, pf_params) * scale[u]; if (Q_B_rem[pq]) matrices->Q_cH_rem += Q_B_rem[pq] * qot; if (Q_B[pq]) { for (cnt1 = k_min_Q_B[pq]; cnt1 <= k_max_Q_B[pq]; cnt1++) for (cnt2 = l_min_Q_B[pq][cnt1]; cnt2 <= l_max_Q_B[pq][cnt1]; cnt2 += 2) { if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) { matrices->Q_cH[cnt1 + da][(cnt2 + db) / 2] += Q_B[pq][cnt1][cnt2 / 2] * qot; if (update_cH) { updatePosteriorBoundaries(cnt1 + da, cnt2 + db, &min_k_real_qcH, &max_k_real_qcH, &min_l_real_qcH, &max_l_real_qcH ); } } else { matrices->Q_cH_rem += Q_B[pq][cnt1][cnt2 / 2] * qot; } } } /* 2. exterior interior loops, i "define" the (k,l) pair as "outer pair" */ /* so "outer type" is rtype[type[k,l]] and inner type is type[p,q] */ if (Q_B_rem[pq]) { for (k = q + 1; k < seq_length; k++) { unsigned int ln1, lstart, ln_pre; ln1 = k - q - 1; if (ln1 + p - 1 > MAXLOOP) break; lstart = k + TURN + 1; ln_pre = ln1 + p + seq_length; if (ln_pre > lstart + MAXLOOP) lstart = ln_pre - MAXLOOP - 1; for (l = lstart; l <= seq_length; l++) { unsigned int ln2; int type2; kl = my_iindx[k] - l; ln2 = (p - 1) + (seq_length - l); if ((ln1 + ln2) > MAXLOOP) continue; type2 = ptype[jindx[l] + k]; if (!type2) continue; qot = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[l + 1], S1[k - 1], S1[p - 1], S1[q + 1], pf_params) * scale[ln1 + ln2]; if (Q_B_rem[kl]) matrices->Q_cI_rem += Q_B_rem[pq] * Q_B_rem[kl] * qot; if (Q_B[kl]) { for (cnt1 = k_min_Q_B[kl]; cnt1 <= k_max_Q_B[kl]; cnt1++) for (cnt2 = l_min_Q_B[kl][cnt1]; cnt2 <= l_max_Q_B[kl][cnt1]; cnt2 += 2) matrices->Q_cI_rem += Q_B_rem[pq] * Q_B[kl][cnt1][cnt2 / 2] * qot; } } } } if (Q_B[pq]) { for (k = q + 1; k < seq_length; k++) { unsigned int ln1, lstart, ln_pre; ln1 = k - q - 1; if (ln1 + p - 1 > MAXLOOP) break; lstart = k + TURN + 1; ln_pre = ln1 + p + seq_length; if (ln_pre > lstart + MAXLOOP) lstart = ln_pre - MAXLOOP - 1; for (l = lstart; l <= seq_length; l++) { unsigned int ln2; int type2; kl = my_iindx[k] - l; ln2 = (p - 1) + (seq_length - l); if ((ln1 + ln2) > MAXLOOP) continue; type2 = ptype[jindx[l] + k]; if (!type2) continue; qot = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[l + 1], S1[k - 1], S1[p - 1], S1[q + 1], pf_params) * scale[ln1 + ln2]; if (Q_B_rem[kl]) { for (cnt1 = k_min_Q_B[pq]; cnt1 <= k_max_Q_B[pq]; cnt1++) for (cnt2 = l_min_Q_B[pq][cnt1]; cnt2 <= l_max_Q_B[pq][cnt1]; cnt2 += 2) matrices->Q_cI_rem += Q_B[pq][cnt1][cnt2 / 2] * Q_B_rem[kl] * qot; } if (!Q_B[kl]) continue; /* get distance to reference if closing the interior loop * d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{k,l}) * d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{k,l}) */ da = base_d1 - referenceBPs1[pq] - referenceBPs1[kl]; db = base_d2 - referenceBPs2[pq] - referenceBPs2[kl]; for (cnt1 = k_min_Q_B[pq]; cnt1 <= k_max_Q_B[pq]; cnt1++) for (cnt2 = l_min_Q_B[pq][cnt1]; cnt2 <= l_max_Q_B[pq][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_B[kl]; cnt3 <= k_max_Q_B[kl]; cnt3++) for (cnt4 = l_min_Q_B[kl][cnt3]; cnt4 <= l_max_Q_B[kl][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) { matrices->Q_cI[cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += Q_B[pq][cnt1][cnt2 / 2] * Q_B[kl][cnt3][cnt4 / 2] * qot; if (update_cI) { updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &min_k_real_qcI, &max_k_real_qcI, &min_l_real_qcI, &max_l_real_qcI ); } } else { matrices->Q_cI_rem += Q_B[pq][cnt1][cnt2 / 2] * Q_B[kl][cnt3][cnt4 / 2] * qot; } } } } } } if (update_cH) { adjustArrayBoundaries(&matrices->Q_cH, &matrices->k_min_Q_cH, &matrices->k_max_Q_cH, &matrices->l_min_Q_cH, &matrices->l_max_Q_cH, min_k_real_qcH, max_k_real_qcH, min_l_real_qcH, max_l_real_qcH ); } if (update_cI) { adjustArrayBoundaries(&matrices->Q_cI, &matrices->k_min_Q_cI, &matrices->k_max_Q_cI, &matrices->l_min_Q_cI, &matrices->l_max_Q_cI, min_k_real_qcI, max_k_real_qcI, min_l_real_qcI, max_l_real_qcI ); } /* 3. Multiloops */ if (seq_length > 2 * TURN - 3) { #ifdef _OPENMP #pragma omp parallel for private(k, da, db, cnt1, cnt2, cnt3, cnt4) #endif for (k = TURN + 2; k < seq_length - 2 * TURN - 3; k++) { if (Q_M_rem[my_iindx[1] - k]) { if (matrices->Q_M2[k + 1]) { for (cnt1 = matrices->k_min_Q_M2[k + 1]; cnt1 <= matrices->k_max_Q_M2[k + 1]; cnt1++) for (cnt2 = matrices->l_min_Q_M2[k + 1][cnt1]; cnt2 <= matrices->l_max_Q_M2[k + 1][cnt1]; cnt2 += 2) matrices->Q_cM_rem += Q_M_rem[my_iindx[1] - k] * matrices->Q_M2[k + 1][cnt1][cnt2 / 2] * pf_params->expMLclosing; } if (matrices->Q_M2_rem[k + 1]) matrices->Q_cM_rem += Q_M_rem[my_iindx[1] - k] * matrices->Q_M2_rem[k + 1] * pf_params->expMLclosing; } if (matrices->Q_M2_rem[k + 1]) { if (Q_M[my_iindx[1] - k]) { for (cnt1 = k_min_Q_M[my_iindx[1] - k]; cnt1 <= k_max_Q_M[my_iindx[1] - k]; cnt1++) for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1]; cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1]; cnt2 += 2) matrices->Q_cM_rem += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * matrices->Q_M2_rem[k + 1] * pf_params->expMLclosing; } } /* get distancies to references * d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n}) * d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n}) */ da = base_d1 - referenceBPs1[my_iindx[1] - k] - referenceBPs1[my_iindx[k + 1] - seq_length]; db = base_d2 - referenceBPs2[my_iindx[1] - k] - referenceBPs2[my_iindx[k + 1] - seq_length]; if (Q_M[my_iindx[1] - k] && matrices->Q_M2[k + 1]) { for (cnt1 = k_min_Q_M[my_iindx[1] - k]; cnt1 <= k_max_Q_M[my_iindx[1] - k]; cnt1++) for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1]; cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1]; cnt2 += 2) for (cnt3 = matrices->k_min_Q_M2[k + 1]; cnt3 <= matrices->k_max_Q_M2[k + 1]; cnt3++) for (cnt4 = matrices->l_min_Q_M2[k + 1][cnt3]; cnt4 <= matrices->l_max_Q_M2[k + 1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) <= maxD1) && ((cnt2 + cnt4 + db) <= maxD2)) { matrices->Q_cM[cnt1 + cnt3 + da][(cnt2 + cnt4 + db) / 2] += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * matrices->Q_M2[k + 1][cnt3][cnt4 / 2] * pf_params->expMLclosing; if (update_cM) { updatePosteriorBoundaries(cnt1 + cnt3 + da, cnt2 + cnt4 + db, &min_k_real_qcM, &max_k_real_qcM, &min_l_real_qcM, &max_l_real_qcM ); } } else { matrices->Q_cM_rem += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * matrices->Q_M2[k + 1][cnt3][cnt4 / 2] * pf_params->expMLclosing; } } } } } if (update_cM) { adjustArrayBoundaries(&matrices->Q_cM, &matrices->k_min_Q_cM, &matrices->k_max_Q_cM, &matrices->l_min_Q_cM, &matrices->l_max_Q_cM, min_k_real_qcM, max_k_real_qcM, min_l_real_qcM, max_l_real_qcM ); } for (cnt1 = matrices->k_min_Q_cH; cnt1 <= matrices->k_max_Q_cH; cnt1++) for (cnt2 = matrices->l_min_Q_cH[cnt1]; cnt2 <= matrices->l_max_Q_cH[cnt1]; cnt2 += 2) { matrices->Q_c[cnt1][cnt2 / 2] += matrices->Q_cH[cnt1][cnt2 / 2]; if (update_c) { updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } for (cnt1 = matrices->k_min_Q_cI; cnt1 <= matrices->k_max_Q_cI; cnt1++) for (cnt2 = matrices->l_min_Q_cI[cnt1]; cnt2 <= matrices->l_max_Q_cI[cnt1]; cnt2 += 2) { matrices->Q_c[cnt1][cnt2 / 2] += matrices->Q_cI[cnt1][cnt2 / 2]; if (update_c) { updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } for (cnt1 = matrices->k_min_Q_cM; cnt1 <= matrices->k_max_Q_cM; cnt1++) for (cnt2 = matrices->l_min_Q_cM[cnt1]; cnt2 <= matrices->l_max_Q_cM[cnt1]; cnt2 += 2) { matrices->Q_c[cnt1][cnt2 / 2] += matrices->Q_cM[cnt1][cnt2 / 2]; if (update_c) { updatePosteriorBoundaries(cnt1, cnt2, &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } matrices->Q_c_rem = matrices->Q_cH_rem + matrices->Q_cI_rem + matrices->Q_cM_rem; /* add the case were structure is unfolded chain */ if ((referenceBPs1[my_iindx[1] - seq_length] <= maxD1) && (referenceBPs2[my_iindx[1] - seq_length] <= maxD2)) { matrices->Q_c[referenceBPs1[my_iindx[1] - seq_length]][referenceBPs2[my_iindx[1] - seq_length] / 2] += 1.0 * scale[seq_length]; if (update_c) { updatePosteriorBoundaries(referenceBPs1[my_iindx[1] - seq_length], referenceBPs2[my_iindx[1] - seq_length], &min_k_real, &max_k_real, &min_l_real, &max_l_real ); } } else { matrices->Q_c_rem += 1.0 * scale[seq_length]; } adjustArrayBoundaries(&matrices->Q_c, &matrices->k_min_Q_c, &matrices->k_max_Q_c, &matrices->l_min_Q_c, &matrices->l_max_Q_c, min_k_real, max_k_real, min_l_real, max_l_real ); } /* * ################################################### * stochastic backtracking * ################################################### */ PUBLIC char * vrna_pbacktrack_TwoD(vrna_fold_compound_t *vc, int d1, int d2) { return vrna_pbacktrack5_TwoD(vc, d1, d2, vc->length); } PUBLIC char * vrna_pbacktrack5_TwoD(vrna_fold_compound_t *vc, int d1, int d2, unsigned int length) { char *pstruc, *ptype; short *S1; unsigned int i, j, n, start, maxD1, maxD2, da, db, *referenceBPs1, *referenceBPs2; int *my_iindx, *jindx, ij, cnt1, cnt2, cnt3, cnt4, type, **l_min_Q, **l_max_Q, **l_min_Q_B, **l_max_Q_B, *k_min_Q, *k_max_Q, *k_min_Q_B, *k_max_Q_B; FLT_OR_DBL r, qt, *scale, ***Q, ***Q_B, *Q_rem, *Q_B_rem; vrna_exp_param_t *pf_params; vrna_md_t *md; vrna_mx_pf_t *matrices; n = vc->length; pf_params = vc->exp_params; md = &(pf_params->model_details); matrices = vc->exp_matrices; maxD1 = vc->maxD1; maxD2 = vc->maxD2; my_iindx = vc->iindx; jindx = vc->jindx; scale = matrices->scale; ptype = vc->ptype; S1 = vc->sequence_encoding; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; Q = matrices->Q; l_min_Q = matrices->l_min_Q; l_max_Q = matrices->l_max_Q; k_min_Q = matrices->k_min_Q; k_max_Q = matrices->k_max_Q; Q_B = matrices->Q_B; l_min_Q_B = matrices->l_min_Q_B; l_max_Q_B = matrices->l_max_Q_B; k_min_Q_B = matrices->k_min_Q_B; k_max_Q_B = matrices->k_max_Q_B; Q_rem = matrices->Q_rem; Q_B_rem = matrices->Q_B_rem; cnt1 = cnt2 = cnt3 = cnt4 = -1; if (md->circ) { if (n != length) vrna_message_error("vrna_pbacktrack_TwoD@2Dfold.c: cotranscriptional backtracking for circular RNAs not supported!"); return pbacktrack_circ(vc, d1, d2); } if (length > n) vrna_message_error("vrna_pbacktrack_TwoD@2Dpfold.c: requested transcript length exceeds sequence length!"); #if 0 if (d1 > maxD1) vrna_message_error("pbacktrack@2Dpfold.c: distance to 1st reference structure to high!"); if (d2 > maxD2) vrna_message_error("pbacktrack@2Dpfold.c: distance to 2nd reference structure to high!"); #endif /* check whether the chosen neighborhood exists at all */ int dumb = 1; ij = my_iindx[1] - length; if ((d1 == -1) && (Q_rem[ij] != 0.)) { dumb = 0; } else { if ((k_min_Q[ij] <= d1) && (k_max_Q[ij] >= d1)) { int l_min = l_min_Q[ij][d1]; if ((d2 % 2) == (l_min % 2)) if ((l_min <= d2) && (l_max_Q[ij][d1] >= d2)) dumb = 0; } } if (dumb) { vrna_message_error("neighborhood %d:%d is not in scope of calculated partition function!\n" "pbacktrack@2Dpfold.c: exiting...", d1, d2); } pstruc = vrna_alloc((length + 1) * sizeof(char)); for (i = 0; i < length; i++) pstruc[i] = '.'; pstruc[i] = '\0'; start = 1; while (start < length) { int sn = my_iindx[start] - length; /* find i position of first pair */ FLT_OR_DBL qln_i = 0, qln_i1 = 0; if (d1 == -1) { qln_i = Q_rem[sn]; /* open chain ? */ if ((maxD1 > referenceBPs1[sn]) && (maxD2 > referenceBPs2[sn])) { r = vrna_urn() * qln_i; if (scale[length - start + 1] > r) return pstruc; } /* lets see if we find a base pair with i involved */ for (i = start; i < length; i++) { r = vrna_urn() * qln_i; qln_i1 = Q_rem[my_iindx[i + 1] - length]; da = referenceBPs1[sn] - referenceBPs1[my_iindx[i + 1] - length]; db = referenceBPs2[sn] - referenceBPs2[my_iindx[i + 1] - length]; for (cnt1 = k_min_Q[my_iindx[i + 1] - length]; cnt1 <= k_max_Q[my_iindx[i + 1] - length]; cnt1++) for (cnt2 = l_min_Q[my_iindx[i + 1] - length][cnt1]; cnt2 <= l_max_Q[my_iindx[i + 1] - length][cnt1]; cnt2 += 2) if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) qln_i1 += Q[my_iindx[i + 1] - length][cnt1][cnt2 / 2]; if (r > qln_i1 * scale[1]) break; qln_i = qln_i1; } if (i >= length) break; /* no more pairs */ /* i is paired, find pairing partner j */ r = vrna_urn() * (qln_i - qln_i1 * scale[1]); for (qt = 0, j = i + TURN + 1; j < length; j++) { ij = my_iindx[i] - j; type = ptype[jindx[j] + i]; if (type) { cnt1 = cnt2 = cnt3 = cnt4 = -1; double qkl = exp_E_ExtLoop(type, (i > 1) ? S1[i - 1] : -1, S1[j + 1], pf_params); if (Q_B_rem[ij] != 0.) { if (Q_rem[my_iindx[j + 1] - length] != 0.) { qt += qkl * Q_B_rem[ij] * Q_rem[my_iindx[j + 1] - length]; if (qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } if (Q[my_iindx[j + 1] - length]) { for (cnt3 = k_min_Q[my_iindx[j + 1] - length]; cnt3 <= k_max_Q[my_iindx[j + 1] - length]; cnt3++) for (cnt4 = l_min_Q[my_iindx[j + 1] - length][cnt3]; cnt4 <= l_max_Q[my_iindx[j + 1] - length][cnt3]; cnt4 += 2) { qt += qkl * Q_B_rem[ij] * Q[my_iindx[j + 1] - length][cnt3][cnt4 / 2]; if (qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } } } if (Q_rem[my_iindx[j + 1] - length] != 0.) { cnt3 = cnt4 = -1; if (Q_B[ij]) { for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) { qt += qkl * Q_B[ij][cnt1][cnt2 / 2] * Q_rem[my_iindx[j + 1] - length]; if (qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } } } /* if we still search for pairing partner j, we go on here... */ if (Q_B[ij] && Q[my_iindx[j + 1] - length]) { da = referenceBPs1[sn] - referenceBPs1[ij] - referenceBPs1[my_iindx[j + 1] - length]; db = referenceBPs2[sn] - referenceBPs2[ij] - referenceBPs2[my_iindx[j + 1] - length]; for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q[my_iindx[j + 1] - length]; cnt3 <= k_max_Q[my_iindx[j + 1] - length]; cnt3++) for (cnt4 = l_min_Q[my_iindx[j + 1] - length][cnt3]; cnt4 <= l_max_Q[my_iindx[j + 1] - length][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) { qt += qkl * Q_B[ij][cnt1][cnt2 / 2] * Q[my_iindx[j + 1] - length][cnt3][cnt4 / 2]; if (qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } } } /* end if(type) */ } /* end for(j) */ cnt1 = cnt2 = cnt3 = cnt4 = -1; /* dont forget the case where i pairs with n */ j = length; ij = my_iindx[i] - j; type = ptype[jindx[j] + i]; if (type) { double qkl = exp_E_ExtLoop(type, (i > 1) ? S1[i - 1] : -1, (j < n) ? S1[j + 1] : -1, pf_params); if (Q_B_rem[ij] != 0.) { qt += qkl * Q_B_rem[ij]; if (qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } /* if we still search for pairing partner j, we go on here... */ if (Q_B[ij]) { da = referenceBPs1[sn] - referenceBPs1[ij]; db = referenceBPs2[sn] - referenceBPs2[ij]; for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) { qt += qkl * Q_B[ij][cnt1][cnt2 / 2]; if (qt >= r) goto pbacktrack_ext_loop_early_escape_rem; } } } /* end if(type) */ j++; pbacktrack_ext_loop_early_escape_rem: if (j == length + 1) vrna_message_error("pbacktrack@2Dpfold.c: backtracking failed in ext loop (rem)"); /* finally start backtracking the first exterior stem */ backtrack(vc, pstruc, cnt1, cnt2, i, j); if (j == length) break; start = j + 1; d1 = cnt3; d2 = cnt4; } /* end if d1 ==-1 */ else { qln_i = Q[sn][d1][d2 / 2]; /* open chain ? */ if ((d1 == referenceBPs1[sn]) && (d2 == referenceBPs2[sn])) { r = vrna_urn() * qln_i; if (scale[length - start + 1] > r) return pstruc; } for (i = start; i < length; i++) { r = vrna_urn() * qln_i; da = referenceBPs1[sn] - referenceBPs1[my_iindx[i + 1] - length]; db = referenceBPs2[sn] - referenceBPs2[my_iindx[i + 1] - length]; qln_i1 = 0; if (d1 >= da && d2 >= db) { if ( (d1 - da >= k_min_Q[my_iindx[i + 1] - length]) && (d1 - da <= k_max_Q[my_iindx[i + 1] - length])) { if ( (d2 - db >= l_min_Q[my_iindx[i + 1] - length][d1 - da]) && (d2 - db <= l_max_Q[my_iindx[i + 1] - length][d1 - da])) qln_i1 += Q[my_iindx[i + 1] - length][d1 - da][(d2 - db) / 2]; } } if (r > qln_i1 * scale[1]) break; /* i is paired */ qln_i = qln_i1; } if (i >= length) break; /* no more pairs */ /* now find the pairing partner j */ r = vrna_urn() * (qln_i - qln_i1 * scale[1]); for (qt = 0, j = i + 1; j < length; j++) { int type; ij = my_iindx[i] - j; type = ptype[jindx[j] + i]; if (type) { double qkl = 1.0; qkl *= exp_E_ExtLoop(type, (i > 1) ? S1[i - 1] : -1, S1[j + 1], pf_params); da = referenceBPs1[sn] - referenceBPs1[ij] - referenceBPs1[my_iindx[j + 1] - length]; db = referenceBPs2[sn] - referenceBPs2[ij] - referenceBPs2[my_iindx[j + 1] - length]; if ((d1 >= da) && (d2 >= db) && Q_B[ij] && Q[my_iindx[j + 1] - length]) { for (cnt1 = k_min_Q_B[ij]; cnt1 <= MIN2(k_max_Q_B[ij], d1 - da); cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= MIN2(l_max_Q_B[ij][cnt1], d2 - db); cnt2 += 2) if ((d1 - da - cnt1 >= k_min_Q[my_iindx[j + 1] - length]) && (d1 - da - cnt1 <= k_max_Q[my_iindx[j + 1] - length])) { if ((d2 - db - cnt2 >= l_min_Q[my_iindx[j + 1] - length][d1 - da - cnt1]) && (d2 - db - cnt2 <= l_max_Q[my_iindx[j + 1] - length][d1 - da - cnt1])) { qt += qkl * Q_B[ij][cnt1][cnt2 / 2] * Q[my_iindx[j + 1] - length][d1 - da - cnt1][(d2 - db - cnt2) / 2]; if (qt >= r) goto pbacktrack_ext_loop_early_escape; } } } } } /* now dont forget the case j==n */ j = length; ij = my_iindx[i] - j; int type = ptype[jindx[j] + i]; if (type) { double qkl = 1.0; qkl *= exp_E_ExtLoop(type, (i > 1) ? S1[i - 1] : -1, (j < n) ? S1[j + 1] : -1, pf_params); da = referenceBPs1[sn] - referenceBPs1[ij]; db = referenceBPs2[sn] - referenceBPs2[ij]; if (d1 >= da && d2 >= db) { cnt1 = d1 - da; cnt2 = d2 - db; if ((cnt1 >= k_min_Q_B[ij]) && (cnt1 <= k_max_Q_B[ij])) { if ((cnt2 >= l_min_Q_B[ij][cnt1]) && (cnt2 <= l_max_Q_B[ij][cnt1])) { qt += qkl * Q_B[ij][cnt1][cnt2 / 2]; if (qt >= r) goto pbacktrack_ext_loop_early_escape; /* j is paired */ } } } } j++; pbacktrack_ext_loop_early_escape: if (j == length + 1) vrna_message_error("pbacktrack@2Dpfold.c: backtracking failed in ext loop"); backtrack(vc, pstruc, cnt1, cnt2, i, j); if (j == length) break; start = j + 1; d1 -= cnt1 + da; d2 -= cnt2 + db; } /* end if d1!=-1 */ } return pstruc; } PRIVATE char * pbacktrack_circ(vrna_fold_compound_t *vc, int d1, int d2) { char *pstruc; unsigned int i, n, maxD1, maxD2, *referenceBPs1, *referenceBPs2; int *my_iindx, k_min_Q_c, k_max_Q_c, k_min_Q_cH, k_max_Q_cH, k_min_Q_cI, k_max_Q_cI, k_min_Q_cM, k_max_Q_cM, *l_min_Q_c, *l_max_Q_c, *l_min_Q_cH, *l_max_Q_cH, *l_min_Q_cI, *l_max_Q_cI, *l_min_Q_cM, *l_max_Q_cM; FLT_OR_DBL r, *scale, qot, **Q_c, **Q_cH, **Q_cI, **Q_cM, Q_c_rem, Q_cH_rem, Q_cI_rem, Q_cM_rem; vrna_mx_pf_t *matrices; matrices = vc->exp_matrices; n = vc->length; maxD1 = vc->maxD1; maxD2 = vc->maxD2; my_iindx = vc->iindx; scale = matrices->scale; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; Q_c = matrices->Q_c; l_min_Q_c = matrices->l_min_Q_c; l_max_Q_c = matrices->l_max_Q_c; k_min_Q_c = matrices->k_min_Q_c; k_max_Q_c = matrices->k_max_Q_c; Q_cH = matrices->Q_cH; l_min_Q_cH = matrices->l_min_Q_cH; l_max_Q_cH = matrices->l_max_Q_cH; k_min_Q_cH = matrices->k_min_Q_cH; k_max_Q_cH = matrices->k_max_Q_cH; Q_cI = matrices->Q_cI; l_min_Q_cI = matrices->l_min_Q_cI; l_max_Q_cI = matrices->l_max_Q_cI; k_min_Q_cI = matrices->k_min_Q_cI; k_max_Q_cI = matrices->k_max_Q_cI; Q_cM = matrices->Q_cM; l_min_Q_cM = matrices->l_min_Q_cM; l_max_Q_cM = matrices->l_max_Q_cM; k_min_Q_cM = matrices->k_min_Q_cM; k_max_Q_cM = matrices->k_max_Q_cM; Q_c_rem = matrices->Q_c_rem; Q_cH_rem = matrices->Q_cH_rem; Q_cI_rem = matrices->Q_cI_rem; Q_cM_rem = matrices->Q_cM_rem; /* check whether the chosen neighborhood exists at all */ int dumb = 1; if ((d1 == -1) && (Q_c_rem != 0.)) { dumb = 0; } else { if ((k_min_Q_c <= d1) && (k_max_Q_c >= d1)) { int l_min = l_min_Q_c[d1]; if ((d2 % 2) == (l_min % 2)) if ((l_min <= d2) && (l_max_Q_c[d1] >= d2)) dumb = 0; } } if (dumb) { vrna_message_error("neighborhood %d:%d is not in scope of calculated partition function!\n" "pbacktrack_circ@2Dpfold.c: exiting cheerless...", d1, d2); } pstruc = vrna_alloc((n + 1) * sizeof(char)); for (i = 0; i < n; i++) pstruc[i] = '.'; pstruc[i] = '\0'; /* now we come to the actual backtracking process */ qot = 0.; /* backtrack in rest-partition */ if (d1 == -1) { r = vrna_urn() * Q_c_rem; /* open chain ? */ if ((referenceBPs1[my_iindx[1] - n] > maxD1) || (referenceBPs2[my_iindx[1] - n] > maxD2)) { qot = 1.0 * scale[n]; if (qot >= r) goto pbacktrack_circ_escape; } qot += Q_cH_rem; if (qot >= r) { backtrack_qcH(vc, pstruc, d1, d2); goto pbacktrack_circ_escape; } qot += Q_cI_rem; if (qot >= r) { backtrack_qcI(vc, pstruc, d1, d2); goto pbacktrack_circ_escape; } qot += Q_cM_rem; if (qot >= r) { backtrack_qcM(vc, pstruc, d1, d2); goto pbacktrack_circ_escape; } vrna_message_error("pbacktrack_circ@2Dpfold.c: backtracking failed in exterior loop! Exiting cheerless..."); } /* normal backtracking */ else { r = vrna_urn() * Q_c[d1][d2 / 2]; /* open chain ? */ if ((referenceBPs1[my_iindx[1] - n] == d1) && (referenceBPs2[my_iindx[1] - n] == d2)) { qot += 1.0 * scale[n]; if (qot >= r) goto pbacktrack_circ_escape; } /* exterior hairpin loop ? */ if ((k_min_Q_cH <= d1) && (k_max_Q_cH >= d1)) { int l_min = l_min_Q_cH[d1]; if ((d2 % 2) == (l_min % 2)) { if ((l_min <= d2) && (l_max_Q_cH[d1] >= d2)) { qot += Q_cH[d1][d2 / 2]; if (qot >= r) { backtrack_qcH(vc, pstruc, d1, d2); goto pbacktrack_circ_escape; } } } } /* exterior interior loop ? */ if ((k_min_Q_cI <= d1) && (k_max_Q_cI >= d1)) { int l_min = l_min_Q_cI[d1]; if ((d2 % 2) == (l_min % 2)) { if ((l_min <= d2) && (l_max_Q_cI[d1] >= d2)) { qot += Q_cI[d1][d2 / 2]; if (qot >= r) { backtrack_qcI(vc, pstruc, d1, d2); goto pbacktrack_circ_escape; } } } } /* exterior multibranch loop ? */ if ((k_min_Q_cM <= d1) && (k_max_Q_cM >= d1)) { int l_min = l_min_Q_cM[d1]; if ((d2 % 2) == (l_min % 2)) { if ((l_min <= d2) && (l_max_Q_cM[d1] >= d2)) { qot += Q_cM[d1][d2 / 2]; if (qot >= r) { backtrack_qcM(vc, pstruc, d1, d2); goto pbacktrack_circ_escape; } } } } } pbacktrack_circ_escape: return pstruc; } PRIVATE void backtrack_qcH(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2) { char *ptype, *sequence; short *S1; unsigned int i, j, n, maxD1, maxD2, base_d1, base_d2, da, db, *referenceBPs1, *referenceBPs2; int u, *my_iindx, *jindx, ij, cnt1, cnt2, type, **l_min_Q_B, **l_max_Q_B, *k_min_Q_B, *k_max_Q_B, *rtype; FLT_OR_DBL r, qt, *scale, qot, ***Q_B, **Q_cH, *Q_B_rem, Q_cH_rem; vrna_exp_param_t *pf_params; vrna_md_t *md; vrna_mx_pf_t *matrices; pf_params = vc->exp_params; md = &(pf_params->model_details); matrices = vc->exp_matrices; sequence = vc->sequence; n = vc->length; my_iindx = vc->iindx; jindx = vc->jindx; scale = matrices->scale; ptype = vc->ptype; rtype = &(md->rtype[0]); S1 = vc->sequence_encoding; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; maxD1 = vc->maxD1; maxD2 = vc->maxD2; Q_B_rem = matrices->Q_B_rem; Q_B = matrices->Q_B; l_min_Q_B = matrices->l_min_Q_B; l_max_Q_B = matrices->l_max_Q_B; k_min_Q_B = matrices->k_min_Q_B; k_max_Q_B = matrices->k_max_Q_B; Q_cH_rem = matrices->Q_cH_rem; Q_cH = matrices->Q_cH; qot = qt = 0.; base_d1 = referenceBPs1[my_iindx[1] - n]; base_d2 = referenceBPs2[my_iindx[1] - n]; if (d1 == -1) { r = vrna_urn() * Q_cH_rem; for (i = 1; i < n; i++) for (j = i + TURN + 1; j <= n; j++) { char loopseq[10]; ij = my_iindx[i] - j; u = n - j + i - 1; if (u < TURN) continue; type = ptype[jindx[j] + i]; if (!type) continue; if (((type == 3) || (type == 4)) && no_closingGU) continue; type = rtype[type]; if (u < 7) { strcpy(loopseq, sequence + j - 1); strncat(loopseq, sequence, i); } qt = exp_E_Hairpin(u, type, S1[j + 1], S1[i - 1], loopseq, pf_params) * scale[u]; if (Q_B_rem[ij]) { qot += Q_B_rem[ij] * qt; if (qot >= r) { backtrack(vc, pstruc, d1, d2, i, j); return; } } da = base_d1 - referenceBPs1[ij]; db = base_d2 - referenceBPs2[ij]; if (Q_B[ij]) { for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) { if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) { qot += Q_B[ij][cnt1][cnt2 / 2] * qt; if (qot >= r) { backtrack(vc, pstruc, cnt1, cnt2, i, j); return; } } } } } } else { r = vrna_urn() * Q_cH[d1][d2 / 2]; for (i = 1; i < n; i++) for (j = i + TURN + 1; j <= n; j++) { char loopseq[10]; ij = my_iindx[i] - j; if (!Q_B[ij]) continue; u = n - j + i - 1; if (u < TURN) continue; type = ptype[jindx[j] + i]; if (!type) continue; if (((type == 3) || (type == 4)) && no_closingGU) continue; type = rtype[type]; if (u < 7) { strcpy(loopseq, sequence + j - 1); strncat(loopseq, sequence, i); } qt = exp_E_Hairpin(u, type, S1[j + 1], S1[i - 1], loopseq, pf_params) * scale[u]; da = base_d1 - referenceBPs1[ij]; db = base_d2 - referenceBPs2[ij]; for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) { if (((cnt1 + da) == d1) && ((cnt2 + db) == d2)) { qot += Q_B[ij][cnt1][cnt2 / 2] * qt; if (qot >= r) { backtrack(vc, pstruc, cnt1, cnt2, i, j); return; } } } } } vrna_message_error("backtrack_qcH@2Dpfold.c: failed to find closing pair!"); } PRIVATE void backtrack_qcI(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2) { char *ptype; short *S1; unsigned int i, j, ij, p, q, pq, n, maxD1, maxD2, base_d1, base_d2, da, db, *referenceBPs1, *referenceBPs2; int *my_iindx, *jindx, cnt1, cnt2, cnt3, cnt4, type, **l_min_Q_B, **l_max_Q_B, *k_min_Q_B, *k_max_Q_B, *rtype; FLT_OR_DBL r, qt, *scale, qot, ***Q_B, *Q_B_rem, **Q_cI, Q_cI_rem; vrna_exp_param_t *pf_params; vrna_md_t *md; vrna_mx_pf_t *matrices; pf_params = vc->exp_params; md = &(pf_params->model_details); matrices = vc->exp_matrices; n = vc->length; my_iindx = vc->iindx; jindx = vc->jindx; scale = matrices->scale; ptype = vc->ptype; rtype = &(md->rtype[0]); S1 = vc->sequence_encoding; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; maxD1 = vc->maxD1; maxD2 = vc->maxD2; Q_B = matrices->Q_B; l_min_Q_B = matrices->l_min_Q_B; l_max_Q_B = matrices->l_max_Q_B; k_min_Q_B = matrices->k_min_Q_B; k_max_Q_B = matrices->k_max_Q_B; Q_cI = matrices->Q_cI; Q_B_rem = matrices->Q_B_rem; Q_cI_rem = matrices->Q_cI_rem; qot = qt = 0.; base_d1 = referenceBPs1[my_iindx[1] - n]; base_d2 = referenceBPs2[my_iindx[1] - n]; if (d1 == -1) { r = vrna_urn() * Q_cI_rem; for (i = 1; i < n; i++) for (j = i + TURN + 1; j <= n; j++) { ij = my_iindx[i] - j; type = rtype[(unsigned int)ptype[jindx[j] + i]]; if (!type) continue; if (Q_B_rem[ij]) { for (p = j + 1; p < n; p++) { unsigned int ln1, qstart, ln_pre; ln1 = p - j - 1; if (ln1 + i - 1 > MAXLOOP) break; qstart = p + TURN + 1; ln_pre = ln1 + i + n; if (ln_pre > qstart + MAXLOOP) qstart = ln_pre - MAXLOOP - 1; for (q = qstart; q <= n; q++) { unsigned int ln2; int type2; pq = my_iindx[p] - q; ln2 = (i - 1) + (n - q); if ((ln1 + ln2) > MAXLOOP) continue; type2 = ptype[jindx[q] + p]; if (!type2) continue; qt = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[q + 1], S1[p - 1], S1[i - 1], S1[j + 1], pf_params) * scale[ln1 + ln2]; if (Q_B_rem[pq]) { qot += Q_B_rem[ij] * Q_B_rem[pq] * qt; if (qot > r) { backtrack(vc, pstruc, d1, d2, i, j); backtrack(vc, pstruc, d1, d2, p, q); return; } } if (Q_B[pq]) { for (cnt1 = k_min_Q_B[pq]; cnt1 <= k_max_Q_B[pq]; cnt1++) for (cnt2 = l_min_Q_B[pq][cnt1]; cnt2 <= l_max_Q_B[pq][cnt1]; cnt2 += 2) { qot += Q_B_rem[ij] * Q_B[pq][cnt1][cnt2 / 2] * qt; if (qot > r) { backtrack(vc, pstruc, d1, d2, i, j); backtrack(vc, pstruc, cnt1, cnt2, p, q); return; } } } } } } if (Q_B[ij]) { for (p = j + 1; p < n; p++) { unsigned int ln1, qstart, ln_pre; ln1 = p - j - 1; if (ln1 + i - 1 > MAXLOOP) break; qstart = p + TURN + 1; ln_pre = ln1 + i + n; if (ln_pre > qstart + MAXLOOP) qstart = ln_pre - MAXLOOP - 1; for (q = qstart; q <= n; q++) { unsigned int ln2; int type2; pq = my_iindx[p] - q; ln2 = (i - 1) + (n - q); if ((ln1 + ln2) > MAXLOOP) continue; type2 = ptype[jindx[q] + p]; if (!type2) continue; qt = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[q + 1], S1[p - 1], S1[i - 1], S1[j + 1], pf_params) * scale[ln1 + ln2]; if (Q_B_rem[pq]) { for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) { qot += Q_B[ij][cnt1][cnt2 / 2] * Q_B_rem[pq] * qt; if (qot > r) { backtrack(vc, pstruc, cnt1, cnt2, i, j); backtrack(vc, pstruc, d1, d2, p, q); return; } } } if (Q_B[pq]) { da = base_d1 - referenceBPs1[ij] - referenceBPs1[pq]; db = base_d2 - referenceBPs2[ij] - referenceBPs2[pq]; for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_B[pq]; cnt3 <= k_max_Q_B[pq]; cnt3++) for (cnt4 = l_min_Q_B[pq][cnt3]; cnt4 <= l_max_Q_B[pq][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) { qot += Q_B[ij][cnt1][cnt2 / 2] * Q_B[pq][cnt3][cnt4 / 2] * qt; if (qot > r) { backtrack(vc, pstruc, cnt1, cnt2, i, j); backtrack(vc, pstruc, cnt3, cnt4, p, q); return; } } } } } } } } } else { r = vrna_urn() * Q_cI[d1][d2 / 2]; for (i = 1; i < n; i++) for (j = i + TURN + 1; j <= n; j++) { ij = my_iindx[i] - j; type = rtype[(unsigned int)ptype[jindx[j] + i]]; if (!type) continue; if (!Q_B[ij]) continue; for (p = j + 1; p < n; p++) { unsigned int ln1, qstart, ln_pre; ln1 = p - j - 1; if (ln1 + i - 1 > MAXLOOP) break; qstart = p + TURN + 1; ln_pre = ln1 + i + n; if (ln_pre > qstart + MAXLOOP) qstart = ln_pre - MAXLOOP - 1; for (q = qstart; q <= n; q++) { unsigned int ln2; int type2; pq = my_iindx[p] - q; if (!Q_B[pq]) continue; ln2 = (i - 1) + (n - q); if ((ln1 + ln2) > MAXLOOP) continue; type2 = ptype[jindx[q] + p]; if (!type2) continue; qt = exp_E_IntLoop(ln2, ln1, rtype[type2], type, S1[q + 1], S1[p - 1], S1[i - 1], S1[j + 1], pf_params) * scale[ln1 + ln2]; da = base_d1 - referenceBPs1[ij] - referenceBPs1[pq]; db = base_d2 - referenceBPs2[ij] - referenceBPs2[pq]; for (cnt1 = k_min_Q_B[ij]; cnt1 <= k_max_Q_B[ij]; cnt1++) for (cnt2 = l_min_Q_B[ij][cnt1]; cnt2 <= l_max_Q_B[ij][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_B[pq]; cnt3 <= k_max_Q_B[pq]; cnt3++) for (cnt4 = l_min_Q_B[pq][cnt3]; cnt4 <= l_max_Q_B[pq][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) == d1) && ((cnt2 + cnt4 + db) == d2)) { qot += Q_B[ij][cnt1][cnt2 / 2] * Q_B[pq][cnt3][cnt4 / 2] * qt; if (qot > r) { backtrack(vc, pstruc, cnt1, cnt2, i, j); backtrack(vc, pstruc, cnt3, cnt4, p, q); return; } } } } } } } } PRIVATE void backtrack_qcM(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2) { unsigned int k, n, maxD1, maxD2, base_d1, base_d2, da, db, *referenceBPs1, *referenceBPs2; int *my_iindx, cnt1, cnt2, cnt3, cnt4, **l_min_Q_M, **l_max_Q_M, **l_min_Q_M2, **l_max_Q_M2, *k_min_Q_M, *k_max_Q_M, *k_min_Q_M2, *k_max_Q_M2; FLT_OR_DBL r, qt, qot, ***Q_M, ***Q_M2, **Q_cM, *Q_M_rem, *Q_M2_rem, Q_cM_rem; vrna_exp_param_t *pf_params; vrna_mx_pf_t *matrices; pf_params = vc->exp_params; matrices = vc->exp_matrices; n = vc->length; my_iindx = vc->iindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; maxD1 = vc->maxD1; maxD2 = vc->maxD2; Q_cM = matrices->Q_cM; Q_M = matrices->Q_M; l_min_Q_M = matrices->l_min_Q_M; l_max_Q_M = matrices->l_max_Q_M; k_min_Q_M = matrices->k_min_Q_M; k_max_Q_M = matrices->k_max_Q_M; Q_M2 = matrices->Q_M2; l_min_Q_M2 = matrices->l_min_Q_M2; l_max_Q_M2 = matrices->l_max_Q_M2; k_min_Q_M2 = matrices->k_min_Q_M2; k_max_Q_M2 = matrices->k_max_Q_M2; Q_cM_rem = matrices->Q_cM_rem; Q_M_rem = matrices->Q_M_rem; Q_M2_rem = matrices->Q_M2_rem; base_d1 = referenceBPs1[my_iindx[1] - n]; base_d2 = referenceBPs2[my_iindx[1] - n]; qot = qt = 0.; if (d1 == -1) { r = vrna_urn() * Q_cM_rem; for (k = TURN + 2; k < n - 2 * TURN - 3; k++) { if (Q_M_rem[my_iindx[1] - k]) { if (Q_M2[k + 1]) { for (cnt1 = k_min_Q_M2[k + 1]; cnt1 <= k_max_Q_M2[k + 1]; cnt1++) for (cnt2 = l_min_Q_M2[k + 1][cnt1]; cnt2 <= l_max_Q_M2[k + 1][cnt1]; cnt2 += 2) { qot += Q_M_rem[my_iindx[1] - k] * Q_M2[k + 1][cnt1][cnt2 / 2] * pf_params->expMLclosing; if (qot > r) { backtrack_qm(vc, pstruc, d1, d2, 1, k); backtrack_qm2(vc, pstruc, cnt1, cnt2, k + 1); return; } } } if (Q_M2_rem[k + 1]) { qot += Q_M_rem[my_iindx[1] - k] * Q_M2_rem[k + 1] * pf_params->expMLclosing; if (qot > r) { backtrack_qm(vc, pstruc, d1, d2, 1, k); backtrack_qm2(vc, pstruc, d1, d2, k + 1); return; } } } if (Q_M2_rem[k + 1]) { if (Q_M[my_iindx[1] - k]) { for (cnt1 = k_min_Q_M[my_iindx[1] - k]; cnt1 <= k_max_Q_M[my_iindx[1] - k]; cnt1++) for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1]; cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1]; cnt2 += 2) { qot += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * Q_M2_rem[k + 1] * pf_params->expMLclosing; if (qot > r) { backtrack_qm(vc, pstruc, cnt1, cnt2, 1, k); backtrack_qm2(vc, pstruc, d1, d2, k + 1); return; } } } } da = base_d1 - referenceBPs1[my_iindx[1] - k] - referenceBPs1[my_iindx[k + 1] - n]; db = base_d2 - referenceBPs2[my_iindx[1] - k] - referenceBPs2[my_iindx[k + 1] - n]; if (Q_M[my_iindx[1] - k] && Q_M2[k + 1]) { for (cnt1 = k_min_Q_M[my_iindx[1] - k]; cnt1 <= k_max_Q_M[my_iindx[1] - k]; cnt1++) for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1]; cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_M2[k + 1]; cnt3 <= k_max_Q_M2[k + 1]; cnt3++) for (cnt4 = l_min_Q_M2[k + 1][cnt3]; cnt4 <= l_max_Q_M2[k + 1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) { qot += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * Q_M2[k + 1][cnt3][cnt4 / 2] * pf_params->expMLclosing; if (qot > r) { backtrack_qm(vc, pstruc, cnt1, cnt2, 1, k); backtrack_qm2(vc, pstruc, cnt3, cnt4, k + 1); return; } } } } } } else { r = vrna_urn() * Q_cM[d1][d2 / 2]; for (k = TURN + 2; k < n - 2 * TURN - 3; k++) { da = base_d1 - referenceBPs1[my_iindx[1] - k] - referenceBPs1[my_iindx[k + 1] - n]; db = base_d2 - referenceBPs2[my_iindx[1] - k] - referenceBPs2[my_iindx[k + 1] - n]; if (Q_M[my_iindx[1] - k] && Q_M2[k + 1]) { for (cnt1 = k_min_Q_M[my_iindx[1] - k]; cnt1 <= k_max_Q_M[my_iindx[1] - k]; cnt1++) for (cnt2 = l_min_Q_M[my_iindx[1] - k][cnt1]; cnt2 <= l_max_Q_M[my_iindx[1] - k][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_M2[k + 1]; cnt3 <= k_max_Q_M2[k + 1]; cnt3++) for (cnt4 = l_min_Q_M2[k + 1][cnt3]; cnt4 <= l_max_Q_M2[k + 1][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + da) == d1) && ((cnt2 + cnt4 + db) == d2)) { qot += Q_M[my_iindx[1] - k][cnt1][cnt2 / 2] * Q_M2[k + 1][cnt3][cnt4 / 2] * pf_params->expMLclosing; if (qot > r) { backtrack_qm(vc, pstruc, cnt1, cnt2, 1, k); backtrack_qm2(vc, pstruc, cnt3, cnt4, k + 1); return; } } } } } vrna_message_error("backtrack_qcM@2Dpfold.c: backtracking failed"); } PRIVATE void backtrack_qm2(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int k) { unsigned int l, n, maxD1, maxD2, da, db, *referenceBPs1, *referenceBPs2; int *my_iindx, *jindx, cnt1, cnt2, cnt3, cnt4, *k_min_Q_M1, *k_max_Q_M1, **l_min_Q_M1, **l_max_Q_M1; FLT_OR_DBL r, qt, qot, ***Q_M2, ***Q_M1, *Q_M2_rem, *Q_M1_rem; vrna_mx_pf_t *matrices; matrices = vc->exp_matrices; n = vc->length; my_iindx = vc->iindx; jindx = vc->jindx; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; maxD1 = vc->maxD1; maxD2 = vc->maxD2; Q_M1_rem = matrices->Q_M1_rem; Q_M1 = matrices->Q_M1; l_min_Q_M1 = matrices->l_min_Q_M1; l_max_Q_M1 = matrices->l_max_Q_M1; k_min_Q_M1 = matrices->k_min_Q_M1; k_max_Q_M1 = matrices->k_max_Q_M1; Q_M2_rem = matrices->Q_M2_rem; Q_M2 = matrices->Q_M2; qot = qt = 0.; if (d1 == -1) { r = vrna_urn() * Q_M2_rem[k]; for (l = k + TURN + 1; l < n - TURN - 1; l++) { if (Q_M1_rem[jindx[l] + k]) { if (Q_M1[jindx[n] + l + 1]) { for (cnt1 = k_min_Q_M1[jindx[n] + l + 1]; cnt1 <= k_max_Q_M1[jindx[n] + l + 1]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[n] + l + 1][cnt1]; cnt2 <= l_max_Q_M1[jindx[n] + l + 1][cnt1]; cnt2 += 2) { qot += Q_M1_rem[jindx[l] + k] * Q_M1[jindx[n] + l + 1][cnt1][cnt2 / 2]; if (qot > r) { backtrack_qm1(vc, pstruc, d1, d2, k, l); backtrack_qm1(vc, pstruc, cnt1, cnt2, l + 1, n); return; } } } if (Q_M1_rem[jindx[n] + l + 1]) { qot += Q_M1_rem[jindx[l] + k] * Q_M1_rem[jindx[n] + l + 1]; if (qot > r) { backtrack_qm1(vc, pstruc, d1, d2, k, l); backtrack_qm1(vc, pstruc, d1, d2, l + 1, n); return; } } } if (Q_M1_rem[jindx[n] + l + 1]) { if (Q_M1[jindx[l] + k]) { for (cnt1 = k_min_Q_M1[jindx[l] + k]; cnt1 <= k_max_Q_M1[jindx[l] + k]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1]; cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1]; cnt2 += 2) { qot += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1_rem[jindx[n] + l + 1]; if (qot > r) { backtrack_qm1(vc, pstruc, cnt1, cnt2, k, l); backtrack_qm1(vc, pstruc, d1, d2, l + 1, n); return; } } } } if (!Q_M1[jindx[l] + k]) continue; if (!Q_M1[jindx[n] + l + 1]) continue; da = referenceBPs1[my_iindx[k] - n] - referenceBPs1[my_iindx[k] - l] - referenceBPs1[my_iindx[l + 1] - n]; db = referenceBPs2[my_iindx[k] - n] - referenceBPs2[my_iindx[k] - l] - referenceBPs2[my_iindx[l + 1] - n]; for (cnt1 = k_min_Q_M1[jindx[l] + k]; cnt1 <= k_max_Q_M1[jindx[l] + k]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1]; cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1]; cnt2 += 2) { for (cnt3 = k_min_Q_M1[jindx[n] + l + 1]; cnt3 <= k_max_Q_M1[jindx[n] + l + 1]; cnt3++) for (cnt4 = l_min_Q_M1[jindx[n] + l + 1][cnt3]; cnt4 <= l_max_Q_M1[jindx[n] + l + 1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) { qot += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1[jindx[n] + l + 1][cnt3][cnt4 / 2]; if (qot > r) { backtrack_qm1(vc, pstruc, cnt1, cnt2, k, l); backtrack_qm1(vc, pstruc, cnt3, cnt4, l + 1, n); return; } } } } } } else { r = vrna_urn() * Q_M2[k][d1][d2 / 2]; for (l = k + TURN + 1; l < n - TURN - 1; l++) { if (!Q_M1[jindx[l] + k]) continue; if (!Q_M1[jindx[n] + l + 1]) continue; da = referenceBPs1[my_iindx[k] - n] - referenceBPs1[my_iindx[k] - l] - referenceBPs1[my_iindx[l + 1] - n]; db = referenceBPs2[my_iindx[k] - n] - referenceBPs2[my_iindx[k] - l] - referenceBPs2[my_iindx[l + 1] - n]; for (cnt1 = k_min_Q_M1[jindx[l] + k]; cnt1 <= k_max_Q_M1[jindx[l] + k]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[l] + k][cnt1]; cnt2 <= l_max_Q_M1[jindx[l] + k][cnt1]; cnt2 += 2) { for (cnt3 = k_min_Q_M1[jindx[n] + l + 1]; cnt3 <= k_max_Q_M1[jindx[n] + l + 1]; cnt3++) for (cnt4 = l_min_Q_M1[jindx[n] + l + 1][cnt3]; cnt4 <= l_max_Q_M1[jindx[n] + l + 1][cnt3]; cnt4 += 2) { if (((cnt1 + cnt3 + da) == d1) && ((cnt2 + cnt4 + db) == d2)) { qot += Q_M1[jindx[l] + k][cnt1][cnt2 / 2] * Q_M1[jindx[n] + l + 1][cnt3][cnt4 / 2]; if (qot > r) { backtrack_qm1(vc, pstruc, cnt1, cnt2, k, l); backtrack_qm1(vc, pstruc, cnt3, cnt4, l + 1, n); return; } } } } } } vrna_message_error("backtrack_qm2@2Dpfold.c: backtracking failed"); } PRIVATE void backtrack(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int i, unsigned int j) { FLT_OR_DBL *scale; unsigned int maxD1, maxD2, base_d1, base_d2, da, db; unsigned int *referenceBPs1, *referenceBPs2; char *ptype, *sequence; short *S1, *reference_pt1, *reference_pt2; int *my_iindx, *jindx, ij, cnt1, cnt2, cnt3, cnt4, *rtype; vrna_exp_param_t *pf_params; /* holds all [unscaled] pf parameters */ vrna_md_t *md; vrna_mx_pf_t *matrices; pf_params = vc->exp_params; md = &(pf_params->model_details); matrices = vc->exp_matrices; sequence = vc->sequence; maxD1 = vc->maxD1; maxD2 = vc->maxD2; my_iindx = vc->iindx; jindx = vc->jindx; scale = matrices->scale; ptype = vc->ptype; rtype = &(md->rtype[0]); S1 = vc->sequence_encoding; reference_pt1 = vc->reference_pt1; reference_pt2 = vc->reference_pt2; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; FLT_OR_DBL ***Q_B, ***Q_M, ***Q_M1, *Q_B_rem, *Q_M_rem, *Q_M1_rem; int *k_min_Q_M, *k_max_Q_M, *k_min_Q_M1, *k_max_Q_M1, *k_min_Q_B, *k_max_Q_B; int **l_min_Q_M, **l_max_Q_M, **l_min_Q_M1, **l_max_Q_M1, **l_min_Q_B, **l_max_Q_B; Q_B = matrices->Q_B; k_min_Q_B = matrices->k_min_Q_B; k_max_Q_B = matrices->k_max_Q_B; l_min_Q_B = matrices->l_min_Q_B; l_max_Q_B = matrices->l_max_Q_B; Q_M = matrices->Q_M; k_min_Q_M = matrices->k_min_Q_M; k_max_Q_M = matrices->k_max_Q_M; l_min_Q_M = matrices->l_min_Q_M; l_max_Q_M = matrices->l_max_Q_M; Q_M1 = matrices->Q_M1; k_min_Q_M1 = matrices->k_min_Q_M1; k_max_Q_M1 = matrices->k_max_Q_M1; l_min_Q_M1 = matrices->l_min_Q_M1; l_max_Q_M1 = matrices->l_max_Q_M1; Q_B_rem = matrices->Q_B_rem; Q_M_rem = matrices->Q_M_rem; Q_M1_rem = matrices->Q_M1_rem; cnt1 = cnt2 = cnt3 = cnt4 = -1; do { double r, qbt1 = 0.; unsigned int k, l, u, u1; int type; pstruc[i - 1] = '('; pstruc[j - 1] = ')'; r = 0.; ij = my_iindx[i] - j; l = INF; if (d1 == -1) { r = vrna_urn() * Q_B_rem[ij]; if (r == 0.) vrna_message_error("backtrack@2Dpfold.c: backtracking failed\n"); type = ptype[jindx[j] + i]; u = j - i - 1; base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1; base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1; da = base_d1 + referenceBPs1[ij]; db = base_d2 + referenceBPs2[ij]; /* hairpin ? */ if ((da > maxD1) || (db > maxD2)) if (!(((type == 3) || (type == 4)) && no_closingGU)) qbt1 = exp_E_Hairpin(u, type, S1[i + 1], S1[j - 1], sequence + i - 1, pf_params) * scale[u + 2]; if (qbt1 >= r) return; /* found the hairpin we're done */ /* lets see if we form an interior loop */ for (k = i + 1; k <= MIN2(i + MAXLOOP + 1, j - TURN - 2); k++) { unsigned int u_pre, lmin; u1 = k - i - 1; lmin = k + TURN + 1; u_pre = u1 + j; /* lmin = MAX2(k + TURN + 1, u1 + j - 1 - MAXLOOP) */ if (u_pre > lmin + MAXLOOP) lmin = u_pre - 1 - MAXLOOP; for (l = lmin; l < j; l++) { int type_2; type_2 = ptype[jindx[l] + k]; if (type_2) { cnt1 = cnt2 = -1; da = base_d1 + referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[k] - l]; db = base_d2 + referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[k] - l]; type_2 = rtype[type_2]; FLT_OR_DBL tmp_en = exp_E_IntLoop(u1, j - l - 1, type, type_2, S1[i + 1], S1[j - 1], S1[k - 1], S1[l + 1], pf_params) * scale[u1 + j - l + 1]; if (Q_B_rem[my_iindx[k] - l] != 0.) { qbt1 += Q_B_rem[my_iindx[k] - l] * tmp_en; if (qbt1 > r) goto backtrack_int_early_escape_rem; } if (Q_B[my_iindx[k] - l]) { for (cnt1 = k_min_Q_B[my_iindx[k] - l]; cnt1 <= k_max_Q_B[my_iindx[k] - l]; cnt1++) for (cnt2 = l_min_Q_B[my_iindx[k] - l][cnt1]; cnt2 <= l_max_Q_B[my_iindx[k] - l][cnt1]; cnt2 += 2) if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) { qbt1 += Q_B[my_iindx[k] - l][cnt1][cnt2 / 2] * tmp_en; if (qbt1 > r) goto backtrack_int_early_escape_rem; } } } } } backtrack_int_early_escape_rem: if (l < j) { i = k; j = l; d1 = cnt1; d2 = cnt2; } else { break; } } else { if ((d1 >= k_min_Q_B[ij]) && (d1 <= k_max_Q_B[ij])) if ((d2 >= l_min_Q_B[ij][d1]) && (d2 <= l_max_Q_B[ij][d1])) r = vrna_urn() * Q_B[ij][d1][d2 / 2]; if (r == 0.) vrna_message_error("backtrack@2Dpfold.c: backtracking failed\n"); type = ptype[jindx[j] + i]; u = j - i - 1; base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1; base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1; da = base_d1 + referenceBPs1[ij]; db = base_d2 + referenceBPs2[ij]; /*hairpin contribution*/ if ((da == d1) && (db == d2)) if (!(((type == 3) || (type == 4)) && no_closingGU)) qbt1 = exp_E_Hairpin(u, type, S1[i + 1], S1[j - 1], sequence + i - 1, pf_params) * scale[u + 2]; if (qbt1 >= r) return; /* found the hairpin we're done */ for (k = i + 1; k <= MIN2(i + MAXLOOP + 1, j - TURN - 2); k++) { unsigned int u_pre, lmin; u1 = k - i - 1; lmin = k + TURN + 1; u_pre = u1 + j; /* lmin = MAX2(k + TURN + 1, u1 + j - 1 - MAXLOOP) */ if (u_pre > lmin + MAXLOOP) lmin = u_pre - 1 - MAXLOOP; for (l = lmin; l < j; l++) { int type_2; type_2 = ptype[jindx[l] + k]; if (type_2) { da = base_d1 + referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[k] - l]; db = base_d2 + referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[k] - l]; type_2 = rtype[type_2]; FLT_OR_DBL tmp_en = exp_E_IntLoop(u1, j - l - 1, type, type_2, S1[i + 1], S1[j - 1], S1[k - 1], S1[l + 1], pf_params) * scale[u1 + j - l + 1]; if (d1 >= da && d2 >= db) { if ((d1 - da >= k_min_Q_B[my_iindx[k] - l]) && (d1 - da <= k_max_Q_B[my_iindx[k] - l])) { if ((d2 - db >= l_min_Q_B[my_iindx[k] - l][d1 - da]) && (d2 - db <= l_max_Q_B[my_iindx[k] - l][d1 - da])) { cnt1 = d1 - da; cnt2 = d2 - db; qbt1 += Q_B[my_iindx[k] - l][cnt1][cnt2 / 2] * tmp_en; if (qbt1 > r) goto backtrack_int_early_escape; } } } } } } backtrack_int_early_escape: if (l < j) { i = k; j = l; d1 = cnt1; d2 = cnt2; } else { break; } } } while (1); /* backtrack in multi-loop */ { double r, qt; unsigned int k, ii, jj; base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1; base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1; base_d1 += referenceBPs1[my_iindx[i] - j]; base_d2 += referenceBPs2[my_iindx[i] - j]; i++; j--; /* find the first split index */ ii = my_iindx[i]; /* ii-j=[i,j] */ jj = jindx[j]; /* jj+i=[j,i] */ if (d1 == -1) { /* get total contribution for current part */ for (qt = 0., k = i + 1; k < j; k++) { if (Q_M_rem[ii - k + 1] != 0.) { if (Q_M1[jj + k]) { for (cnt1 = k_min_Q_M1[jj + k]; cnt1 <= k_max_Q_M1[jj + k]; cnt1++) for (cnt2 = l_min_Q_M1[jj + k][cnt1]; cnt2 <= l_max_Q_M1[jj + k][cnt1]; cnt2 += 2) qt += Q_M_rem[ii - k + 1] * Q_M1[jj + k][cnt1][cnt2 / 2]; } if (Q_M1_rem[jj + k] != 0.) qt += Q_M_rem[ii - k + 1] * Q_M1_rem[jj + k]; } if (Q_M1_rem[jj + k] != 0.) { if (Q_M[ii - k + 1]) { for (cnt1 = k_min_Q_M[ii - k + 1]; cnt1 <= k_max_Q_M[ii - k + 1]; cnt1++) for (cnt2 = l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= l_max_Q_M[ii - k + 1][cnt1]; cnt2 += 2) qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1_rem[jj + k]; } } /* calculate introduced distance to reference structures */ if (!Q_M[ii - k + 1]) continue; if (!Q_M1[jj + k]) continue; da = base_d1 - referenceBPs1[my_iindx[i] - k + 1] - referenceBPs1[my_iindx[k] - j]; db = base_d2 - referenceBPs2[my_iindx[i] - k + 1] - referenceBPs2[my_iindx[k] - j]; /* collect all contributing energies */ for (cnt1 = k_min_Q_M[ii - k + 1]; cnt1 <= k_max_Q_M[ii - k + 1]; cnt1++) for (cnt2 = l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= l_max_Q_M[ii - k + 1][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_M1[jj + k]; cnt3 <= k_max_Q_M1[jj + k]; cnt3++) for (cnt4 = l_min_Q_M1[jj + k][cnt3]; cnt4 <= l_max_Q_M1[jj + k][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1[jj + k][cnt3][cnt4 / 2]; } /* throw the dice */ r = vrna_urn() * qt; for (qt = 0., k = i + 1; k < j; k++) { cnt1 = cnt2 = cnt3 = cnt4 = -1; if (Q_M_rem[ii - k + 1] != 0.) { if (Q_M1_rem[jj + k] != 0) { qt += Q_M_rem[ii - k + 1] * Q_M1_rem[jj + k]; if (qt >= r) goto backtrack_ml_early_escape; } if (Q_M1[jj + k]) { for (cnt3 = k_min_Q_M1[jj + k]; cnt3 <= k_max_Q_M1[jj + k]; cnt3++) for (cnt4 = l_min_Q_M1[jj + k][cnt3]; cnt4 <= l_max_Q_M1[jj + k][cnt3]; cnt4 += 2) { qt += Q_M_rem[ii - k + 1] * Q_M1[jj + k][cnt3][cnt4 / 2]; if (qt >= r) goto backtrack_ml_early_escape; } } } if (Q_M1_rem[jj + k] != 0.) { cnt3 = cnt4 = -1; if (Q_M[ii - k + 1]) { for (cnt1 = k_min_Q_M[ii - k + 1]; cnt1 <= k_max_Q_M[ii - k + 1]; cnt1++) for (cnt2 = l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= l_max_Q_M[ii - k + 1][cnt1]; cnt2 += 2) { qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1_rem[jj + k]; if (qt >= r) goto backtrack_ml_early_escape; } } } /* calculate introduced distance to reference structures */ da = base_d1 - referenceBPs1[my_iindx[i] - k + 1] - referenceBPs1[my_iindx[k] - j]; db = base_d2 - referenceBPs2[my_iindx[i] - k + 1] - referenceBPs2[my_iindx[k] - j]; /* collect all contributing energies */ if (!Q_M[ii - k + 1]) continue; if (!Q_M1[jj + k]) continue; for (cnt1 = k_min_Q_M[ii - k + 1]; cnt1 <= k_max_Q_M[ii - k + 1]; cnt1++) for (cnt2 = l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= l_max_Q_M[ii - k + 1][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_M1[jj + k]; cnt3 <= k_max_Q_M1[jj + k]; cnt3++) for (cnt4 = l_min_Q_M1[jj + k][cnt3]; cnt4 <= l_max_Q_M1[jj + k][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) { qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1[jj + k][cnt3][cnt4 / 2]; if (qt >= r) goto backtrack_ml_early_escape; } } } else { /* get total contribution */ for (qt = 0., k = i + 1; k < j; k++) { /* calculate introduced distance to reference structures */ da = base_d1 - referenceBPs1[my_iindx[i] - k + 1] - referenceBPs1[my_iindx[k] - j]; db = base_d2 - referenceBPs2[my_iindx[i] - k + 1] - referenceBPs2[my_iindx[k] - j]; /* collect all contributing energies */ if (d1 >= da && d2 >= db && Q_M[ii - k + 1] && Q_M1[jj + k]) { for (cnt1 = k_min_Q_M[ii - k + 1]; cnt1 <= MIN2(k_max_Q_M[ii - k + 1], d1 - da); cnt1++) for (cnt2 = l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= MIN2(l_max_Q_M[ii - k + 1][cnt1], d2 - db); cnt2 += 2) if ((d1 - cnt1 - da >= k_min_Q_M1[jj + k]) && (d1 - cnt1 - da <= k_max_Q_M1[jj + k])) if ((d2 - cnt2 - db >= l_min_Q_M1[jj + k][d1 - da - cnt1]) && (d2 - cnt2 - db <= l_max_Q_M1[jj + k][d1 - cnt1 - da])) qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1[jj + k][d1 - da - cnt1][(d2 - db - cnt2) / 2]; } } r = vrna_urn() * qt; for (qt = 0., k = i + 1; k < j; k++) { /* calculate introduced distance to reference structures */ da = base_d1 - referenceBPs1[my_iindx[i] - k + 1] - referenceBPs1[my_iindx[k] - j]; db = base_d2 - referenceBPs2[my_iindx[i] - k + 1] - referenceBPs2[my_iindx[k] - j]; /* collect all contributing energies */ if (d1 >= da && d2 >= db && Q_M[ii - k + 1] && Q_M1[jj + k]) { for (cnt1 = k_min_Q_M[ii - k + 1]; cnt1 <= MIN2(k_max_Q_M[ii - k + 1], d1 - da); cnt1++) for (cnt2 = l_min_Q_M[ii - k + 1][cnt1]; cnt2 <= MIN2(l_max_Q_M[ii - k + 1][cnt1], d2 - db); cnt2 += 2) if ((d1 - cnt1 - da >= k_min_Q_M1[jj + k]) && (d1 - cnt1 - da <= k_max_Q_M1[jj + k])) { if ((d2 - cnt2 - db >= l_min_Q_M1[jj + k][d1 - da - cnt1]) && (d2 - cnt2 - db <= l_max_Q_M1[jj + k][d1 - cnt1 - da])) { cnt3 = d1 - da - cnt1; cnt4 = d2 - db - cnt2; qt += Q_M[ii - k + 1][cnt1][cnt2 / 2] * Q_M1[jj + k][cnt3][cnt4 / 2]; if (qt >= r) goto backtrack_ml_early_escape; } } } } } if (k >= j) vrna_message_error("backtrack failed, can't find split index "); backtrack_ml_early_escape: backtrack_qm1(vc, pstruc, cnt3, cnt4, k, j); j = k - 1; backtrack_qm(vc, pstruc, cnt1, cnt2, i, j); } } PRIVATE void backtrack_qm1(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int i, unsigned int j) { /* i is paired to l, i<l<j; backtrack in qm1 to find l */ FLT_OR_DBL r, qt, *scale; unsigned int maxD1, maxD2, da, db; unsigned int *referenceBPs1, *referenceBPs2; char *ptype; short *S1; int *my_iindx, *jindx, cnt1, cnt2; vrna_exp_param_t *pf_params; /* holds all [unscaled] pf parameters */ vrna_mx_pf_t *matrices; pf_params = vc->exp_params; matrices = vc->exp_matrices; maxD1 = vc->maxD1; maxD2 = vc->maxD2; my_iindx = vc->iindx; jindx = vc->jindx; scale = matrices->scale; ptype = vc->ptype; S1 = vc->sequence_encoding; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; FLT_OR_DBL ***Q_B, ***Q_M1, *Q_B_rem, *Q_M1_rem; int *k_min_Q_M1, *k_max_Q_M1, *k_min_Q_B, *k_max_Q_B; int **l_min_Q_M1, **l_max_Q_M1, **l_min_Q_B, **l_max_Q_B; Q_B = matrices->Q_B; k_min_Q_B = matrices->k_min_Q_B; k_max_Q_B = matrices->k_max_Q_B; l_min_Q_B = matrices->l_min_Q_B; l_max_Q_B = matrices->l_max_Q_B; Q_M1 = matrices->Q_M1; k_min_Q_M1 = matrices->k_min_Q_M1; k_max_Q_M1 = matrices->k_max_Q_M1; l_min_Q_M1 = matrices->l_min_Q_M1; l_max_Q_M1 = matrices->l_max_Q_M1; Q_B_rem = matrices->Q_B_rem; Q_M1_rem = matrices->Q_M1_rem; unsigned int ii, l; int type; r = 0.; cnt1 = cnt2 = -1; /* find qm1 contribution */ if (d1 == -1) { r = vrna_urn() * Q_M1_rem[jindx[j] + i]; } else { if ((d1 >= k_min_Q_M1[jindx[j] + i]) && (d1 <= k_max_Q_M1[jindx[j] + i])) if ((d2 >= l_min_Q_M1[jindx[j] + i][d1]) && (d2 <= l_max_Q_M1[jindx[j] + i][d1])) r = vrna_urn() * Q_M1[jindx[j] + i][d1][d2 / 2]; } if (r == 0.) vrna_message_error("backtrack_qm1@2Dpfold.c: backtracking failed\n"); ii = my_iindx[i]; for (qt = 0., l = i + TURN + 1; l <= j; l++) { type = ptype[jindx[l] + i]; if (type) { FLT_OR_DBL tmp = exp_E_MLstem(type, S1[i - 1], S1[l + 1], pf_params) * pow(pf_params->expMLbase, j - l) * scale[j - l]; /* compute the introduced distance to reference structures */ da = referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[i] - l]; db = referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[i] - l]; cnt1 = cnt2 = -1; if (d1 == -1) { if (Q_B_rem[ii - l] != 0.) { qt += Q_B_rem[ii - l] * tmp; if (qt >= r) goto backtrack_qm1_early_escape; } if (Q_B[ii - l]) { for (cnt1 = k_min_Q_B[ii - l]; cnt1 <= k_max_Q_B[ii - l]; cnt1++) for (cnt2 = l_min_Q_B[ii - l][cnt1]; cnt2 <= l_max_Q_B[ii - l][cnt1]; cnt2 += 2) if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) { qt += Q_B[ii - l][cnt1][cnt2 / 2] * tmp; if (qt >= r) goto backtrack_qm1_early_escape; } } } else { /* get energy contributions */ if (d1 >= da && d2 >= db) { if ((d1 - da >= k_min_Q_B[ii - l]) && (d1 - da <= k_max_Q_B[ii - l])) { if ((d2 - db >= l_min_Q_B[ii - l][d1 - da]) && (d2 - db <= l_max_Q_B[ii - l][d1 - da])) { cnt1 = d1 - da; cnt2 = d2 - db; qt += Q_B[ii - l][cnt1][cnt2 / 2] * tmp; if (qt >= r) goto backtrack_qm1_early_escape; } } } } } } if (l > j) vrna_message_error("backtrack failed in qm1"); backtrack_qm1_early_escape: backtrack(vc, pstruc, cnt1, cnt2, i, l); } PRIVATE void backtrack_qm(vrna_fold_compound_t *vc, char *pstruc, int d1, int d2, unsigned int i, unsigned int j) { /* divide multiloop into qm and qm1 */ FLT_OR_DBL r, *scale; unsigned int maxD1, maxD2, da, db, da2, db2; unsigned int *referenceBPs1, *referenceBPs2; int *my_iindx, *jindx, cnt1, cnt2, cnt3, cnt4; vrna_exp_param_t *pf_params; /* holds all [unscaled] pf parameters */ vrna_mx_pf_t *matrices; pf_params = vc->exp_params; matrices = vc->exp_matrices; maxD1 = vc->maxD1; maxD2 = vc->maxD2; my_iindx = vc->iindx; jindx = vc->jindx; scale = matrices->scale; referenceBPs1 = vc->referenceBPs1; referenceBPs2 = vc->referenceBPs2; FLT_OR_DBL ***Q_M, ***Q_M1, *Q_M_rem, *Q_M1_rem; int *k_min_Q_M, *k_max_Q_M, *k_min_Q_M1, *k_max_Q_M1; int **l_min_Q_M, **l_max_Q_M, **l_min_Q_M1, **l_max_Q_M1; Q_M = matrices->Q_M; k_min_Q_M = matrices->k_min_Q_M; k_max_Q_M = matrices->k_max_Q_M; l_min_Q_M = matrices->l_min_Q_M; l_max_Q_M = matrices->l_max_Q_M; Q_M1 = matrices->Q_M1; k_min_Q_M1 = matrices->k_min_Q_M1; k_max_Q_M1 = matrices->k_max_Q_M1; l_min_Q_M1 = matrices->l_min_Q_M1; l_max_Q_M1 = matrices->l_max_Q_M1; Q_M_rem = matrices->Q_M_rem; Q_M1_rem = matrices->Q_M1_rem; double qmt = 0; unsigned int k; cnt1 = cnt2 = cnt3 = cnt4 = -1; r = 0.; while (j > i) { /* now backtrack [i ... j] in qm[] */ /* find qm contribution */ if (d1 == -1) { r = vrna_urn() * Q_M_rem[my_iindx[i] - j]; } else { if (Q_M[my_iindx[i] - j]) if ((d1 >= k_min_Q_M[my_iindx[i] - j]) && (d1 <= k_max_Q_M[my_iindx[i] - j])) if ((d2 >= l_min_Q_M[my_iindx[i] - j][d1]) && (d2 <= l_max_Q_M[my_iindx[i] - j][d1])) r = vrna_urn() * Q_M[my_iindx[i] - j][d1][d2 / 2]; } if (r == 0.) vrna_message_error("backtrack_qm@2Dpfold.c: backtracking failed in finding qm contribution\n"); qmt = 0.; if (d1 == -1) { if (Q_M1_rem[jindx[j] + i] != 0.) { qmt += Q_M1_rem[jindx[j] + i]; if (qmt >= r) { backtrack_qm1(vc, pstruc, d1, d2, i, j); return; } } for (k = i + 1; k <= j; k++) { FLT_OR_DBL tmp = pow(pf_params->expMLbase, k - i) * scale[k - i]; if (Q_M1_rem[jindx[j] + k] != 0.) { qmt += Q_M1_rem[jindx[j] + k] * tmp; if (qmt >= r) { backtrack_qm1(vc, pstruc, d1, d2, k, j); return; } } da2 = referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[k] - j]; db2 = referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[k] - j]; if (Q_M1[jindx[j] + k]) { for (cnt1 = k_min_Q_M1[jindx[j] + k]; cnt1 <= k_max_Q_M1[jindx[j] + k]; cnt1++) for (cnt2 = l_min_Q_M1[jindx[j] + k][cnt1]; cnt2 <= l_max_Q_M1[jindx[j] + k][cnt1]; cnt2 += 2) if (((cnt1 + da2) > maxD1) || ((cnt2 + db2) > maxD2)) { qmt += Q_M1[jindx[j] + k][cnt1][cnt2 / 2] * tmp; if (qmt >= r) { backtrack_qm1(vc, pstruc, cnt1, cnt2, k, j); return; } } } da = da2 - referenceBPs1[my_iindx[i] - k + 1]; db = db2 - referenceBPs2[my_iindx[i] - k + 1]; cnt1 = cnt2 = cnt3 = cnt4 = -1; if (Q_M_rem[my_iindx[i] - k + 1] != 0.) { if (Q_M1_rem[jindx[j] + k] != 0.) { qmt += Q_M_rem[my_iindx[i] - k + 1] * Q_M1_rem[jindx[j] + k]; if (qmt >= r) goto backtrack_qm_early_escape; } if (Q_M1[jindx[j] + k]) { for (cnt3 = k_min_Q_M1[jindx[j] + k]; cnt3 <= k_max_Q_M1[jindx[j] + k]; cnt3++) for (cnt4 = l_min_Q_M1[jindx[j] + k][cnt3]; cnt4 <= l_max_Q_M1[jindx[j] + k][cnt3]; cnt4 += 2) { qmt += Q_M_rem[my_iindx[i] - k + 1] * Q_M1[jindx[j] + k][cnt3][cnt4 / 2]; if (qmt >= r) goto backtrack_qm_early_escape; } } } if (Q_M1_rem[jindx[j] + k] != 0.) { cnt3 = cnt4 = -1; if (Q_M[my_iindx[i] - k + 1]) { for (cnt1 = k_min_Q_M[my_iindx[i] - k + 1]; cnt1 <= k_max_Q_M[my_iindx[i] - k + 1]; cnt1++) for (cnt2 = l_min_Q_M[my_iindx[i] - k + 1][cnt1]; cnt2 <= l_max_Q_M[my_iindx[i] - k + 1][cnt1]; cnt2 += 2) { qmt += Q_M[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * Q_M1_rem[jindx[j] + k]; if (qmt >= r) goto backtrack_qm_early_escape; } } } if (!Q_M[my_iindx[i] - k + 1]) continue; if (!Q_M1[jindx[j] + k]) continue; for (cnt1 = k_min_Q_M[my_iindx[i] - k + 1]; cnt1 <= k_max_Q_M[my_iindx[i] - k + 1]; cnt1++) for (cnt2 = l_min_Q_M[my_iindx[i] - k + 1][cnt1]; cnt2 <= l_max_Q_M[my_iindx[i] - k + 1][cnt1]; cnt2 += 2) for (cnt3 = k_min_Q_M1[jindx[j] + k]; cnt3 <= k_max_Q_M1[jindx[j] + k]; cnt3++) for (cnt4 = l_min_Q_M1[jindx[j] + k][cnt3]; cnt4 <= l_max_Q_M1[jindx[j] + k][cnt3]; cnt4 += 2) if (((cnt1 + cnt3 + da) > maxD1) || ((cnt2 + cnt4 + db) > maxD2)) { qmt += Q_M[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * Q_M1[jindx[j] + k][cnt3][cnt4 / 2]; if (qmt >= r) goto backtrack_qm_early_escape; } } } else { /* find corresponding qm1 contribution */ if (Q_M1[jindx[j] + i]) { if ((d1 >= k_min_Q_M1[jindx[j] + i]) && (d1 <= k_max_Q_M1[jindx[j] + i])) if ((d2 >= l_min_Q_M1[jindx[j] + i][d1]) && (d2 <= l_max_Q_M1[jindx[j] + i][d1])) qmt = Q_M1[jindx[j] + i][d1][d2 / 2]; } k = i; if (qmt < r) { for (k = i + 1; k <= j; k++) { /* calculate introduced distancies to reference structures */ da2 = referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[k] - j]; db2 = referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[k] - j]; da = da2 - referenceBPs1[my_iindx[i] - k + 1]; db = db2 - referenceBPs2[my_iindx[i] - k + 1]; FLT_OR_DBL tmp = pow(pf_params->expMLbase, k - i) * scale[k - i]; /* collect unpaired + qm1 contributions */ if (d1 >= da2 && d2 >= db2) { if ((d1 - da2 >= k_min_Q_M1[jindx[j] + k]) && (d1 - da2 <= k_max_Q_M1[jindx[j] + k])) { if ((d2 - db2 >= l_min_Q_M1[jindx[j] + k][d1 - da2]) && (d2 - db2 <= l_max_Q_M1[jindx[j] + k][d1 - da2])) { cnt3 = d1 - da2; cnt4 = d2 - db2; qmt += Q_M1[jindx[j] + k][cnt3][cnt4 / 2] * tmp; if (qmt >= r) { backtrack_qm1(vc, pstruc, cnt3, cnt4, k, j); return; } } } } /* collect qm + qm1 contributions */ if (d1 >= da && d2 >= db && Q_M[my_iindx[i] - k + 1] && Q_M1[jindx[j] + k]) { for (cnt1 = k_min_Q_M[my_iindx[i] - k + 1]; cnt1 <= MIN2(k_max_Q_M[my_iindx[i] - k + 1], d1 - da); cnt1++) for (cnt2 = l_min_Q_M[my_iindx[i] - k + 1][cnt1]; cnt2 <= MIN2(l_max_Q_M[my_iindx[i] - k + 1][cnt1], d2 - db); cnt2 += 2) if ((d1 - da - cnt1 >= k_min_Q_M1[jindx[j] + k]) && (d1 - da - cnt1 <= k_max_Q_M1[jindx[j] + k])) { if ((d2 - db - cnt2 >= l_min_Q_M1[jindx[j] + k][d1 - da - cnt1]) && (d2 - db - cnt2 <= l_max_Q_M1[jindx[j] + k][d1 - da - cnt1])) { cnt3 = d1 - da - cnt1; cnt4 = d2 - db - cnt2; qmt += Q_M[my_iindx[i] - k + 1][cnt1][cnt2 / 2] * Q_M1[jindx[j] + k][cnt3][cnt4 / 2]; if (qmt >= r) goto backtrack_qm_early_escape; } } } } } else { backtrack_qm1(vc, pstruc, d1, d2, k, j); return; } } if (k > j) vrna_message_error("backtrack_qm@2Dpfold.c: backtrack failed in qm"); backtrack_qm_early_escape: backtrack_qm1(vc, pstruc, cnt3, cnt4, k, j); if (k < i + TURN) break; /* no more pairs */ d1 = cnt1; d2 = cnt2; if (d1 == referenceBPs1[my_iindx[i] - k + 1] && d2 == referenceBPs2[my_iindx[i] - k + 1]) { /* is interval [i,k] totally unpaired? */ FLT_OR_DBL tmp = pow(pf_params->expMLbase, k - i) * scale[k - i]; r = vrna_urn() * (Q_M[my_iindx[i] - k + 1][d1][d2 / 2] + tmp); if (tmp >= r) return; /* no more pairs */ } j = k - 1; } } PRIVATE void adjustArrayBoundaries(FLT_OR_DBL ***array, int *k_min, int *k_max, int **l_min, int **l_max, int k_min_post, int k_max_post, int *l_min_post, int *l_max_post) { int cnt1; int k_diff_pre = k_min_post - *k_min; int mem_size = k_max_post - k_min_post + 1; if (k_min_post < INF) { /* free all the unused memory behind actual data */ for (cnt1 = k_max_post + 1; cnt1 <= *k_max; cnt1++) { (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } /* free unused memory before actual data */ for (cnt1 = *k_min; cnt1 < k_min_post; cnt1++) { (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } /* move data to front and thereby eliminating unused memory in front of actual data */ if (k_diff_pre > 0) { memmove((FLT_OR_DBL **)(*array), ((FLT_OR_DBL **)(*array)) + k_diff_pre, sizeof(FLT_OR_DBL *) * mem_size); memmove((int *)(*l_min), ((int *)(*l_min)) + k_diff_pre, sizeof(int) * mem_size); memmove((int *)(*l_max), ((int *)(*l_max)) + k_diff_pre, sizeof(int) * mem_size); } /* reallocating memory to actual size used */ *array += *k_min; *array = (FLT_OR_DBL **)realloc(*array, sizeof(FLT_OR_DBL *) * mem_size); *array -= k_min_post; *l_min += *k_min; *l_min = (int *)realloc(*l_min, sizeof(int) * mem_size); *l_min -= k_min_post; *l_max += *k_min; *l_max = (int *)realloc(*l_max, sizeof(int) * mem_size); *l_max -= k_min_post; for (cnt1 = k_min_post; cnt1 <= k_max_post; cnt1++) { if (l_min_post[cnt1] < INF) { /* new memsize */ mem_size = (l_max_post[cnt1] - l_min_post[cnt1] + 1) / 2 + 1; /* reshift the pointer */ (*array)[cnt1] += (*l_min)[cnt1] / 2; int shift = (l_min_post[cnt1] % 2 == (*l_min)[cnt1] % 2) ? 0 : 1; /* eliminate unused memory in front of actual data */ unsigned int start = (l_min_post[cnt1] - (*l_min)[cnt1]) / 2 + shift; if (start > 0) memmove((FLT_OR_DBL *)((*array)[cnt1]), (FLT_OR_DBL *)((*array)[cnt1]) + start, sizeof(FLT_OR_DBL) * mem_size); (*array)[cnt1] = (FLT_OR_DBL *)realloc((*array)[cnt1], sizeof(FLT_OR_DBL) * mem_size); (*array)[cnt1] -= l_min_post[cnt1] / 2; } else { /* free according memory */ (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } (*l_min)[cnt1] = l_min_post[cnt1]; (*l_max)[cnt1] = l_max_post[cnt1]; } } else { /* we have to free all unused memory */ for (cnt1 = *k_min; cnt1 <= *k_max; cnt1++) { (*array)[cnt1] += (*l_min)[cnt1] / 2; free((*array)[cnt1]); } (*l_min) += *k_min; (*l_max) += *k_min; free(*l_min); free(*l_max); (*array) += *k_min; free(*array); *array = NULL; } l_min_post += *k_min; l_max_post += *k_min; *k_min = k_min_post; *k_max = k_max_post; free(l_min_post); free(l_max_post); } PRIVATE INLINE void preparePosteriorBoundaries(int size, int shift, int *min_k, int *max_k, int **min_l, int **max_l) { int i; *min_k = INF; *max_k = 0; *min_l = (int *)vrna_alloc(sizeof(int) * size); *max_l = (int *)vrna_alloc(sizeof(int) * size); for (i = 0; i < size; i++) { (*min_l)[i] = INF; (*max_l)[i] = 0; } *min_l -= shift; *max_l -= shift; } PRIVATE INLINE void updatePosteriorBoundaries(int d1, int d2, int *min_k, int *max_k, int **min_l, int **max_l) { (*min_l)[d1] = MIN2((*min_l)[d1], d2); (*max_l)[d1] = MAX2((*max_l)[d1], d2); *min_k = MIN2(*min_k, d1); *max_k = MAX2(*max_k, d1); } PRIVATE INLINE void prepareBoundaries(int min_k_pre, int max_k_pre, int min_l_pre, int max_l_pre, int bpdist, int *min_k, int *max_k, int **min_l, int **max_l) { int cnt; int mem = max_k_pre - min_k_pre + 1; *min_k = min_k_pre; *max_k = max_k_pre; *min_l = (int *)vrna_alloc(sizeof(int) * mem); *max_l = (int *)vrna_alloc(sizeof(int) * mem); *min_l -= min_k_pre; *max_l -= min_k_pre; /* for each k guess the according minimum l*/ for (cnt = min_k_pre; cnt <= max_k_pre; cnt++) { (*min_l)[cnt] = min_l_pre; (*max_l)[cnt] = max_l_pre; while ((*min_l)[cnt] + cnt < bpdist) (*min_l)[cnt]++; if ((bpdist % 2) != (((*min_l)[cnt] + cnt) % 2)) (*min_l)[cnt]++; } } PRIVATE INLINE void prepareArray(FLT_OR_DBL ***array, int min_k, int max_k, int *min_l, int *max_l) { int i, mem; *array = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * (max_k - min_k + 1)); *array -= min_k; for (i = min_k; i <= max_k; i++) { mem = (max_l[i] - min_l[i] + 1) / 2 + 1; (*array)[i] = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * mem); (*array)[i] -= min_l[i] / 2; } } /* ################################# # DEPRECATED FUNCTIONS BELOW # ################################# */ PRIVATE void crosslink(TwoDpfold_vars *vars) { vrna_fold_compound_t *c; vrna_mx_pf_t *m; c = vars->compatibility; m = c->exp_matrices; vars->sequence = c->sequence; vars->seq_length = c->length; vars->reference_pt1 = c->reference_pt1; vars->reference_pt2 = c->reference_pt2; vars->referenceBPs1 = c->referenceBPs1; vars->referenceBPs2 = c->referenceBPs2; vars->mm1 = c->mm1; vars->mm2 = c->mm2; vars->bpdist = c->bpdist; vars->dangles = c->exp_params->model_details.dangles; vars->circ = c->exp_params->model_details.circ; vars->temperature = c->exp_params->model_details.temperature; vars->init_temp = c->exp_params->model_details.temperature; vars->pf_scale = c->exp_params->pf_scale; vars->pf_params = c->exp_params; vars->scale = m->scale; vars->ptype = c->ptype_pf_compat; vars->S = c->sequence_encoding2; vars->S1 = c->sequence_encoding; vars->jindx = c->jindx; vars->my_iindx = c->iindx; vars->maxD1 = c->maxD1; vars->maxD2 = c->maxD2; vars->Q = m->Q; vars->l_min_values = m->l_min_Q; vars->l_max_values = m->l_max_Q; vars->k_min_values = m->k_min_Q; vars->k_max_values = m->k_max_Q; vars->Q_B = m->Q_B; vars->l_min_values_b = m->l_min_Q_B; vars->l_max_values_b = m->l_max_Q_B; vars->k_min_values_b = m->k_min_Q_B; vars->k_max_values_b = m->k_max_Q_B; vars->Q_M = m->Q_M; vars->l_min_values_m = m->l_min_Q_M; vars->l_max_values_m = m->l_max_Q_M; vars->k_min_values_m = m->k_min_Q_M; vars->k_max_values_m = m->k_max_Q_M; vars->Q_M1 = m->Q_M1; vars->l_min_values_m1 = m->l_min_Q_M1; vars->l_max_values_m1 = m->l_max_Q_M1; vars->k_min_values_m1 = m->k_min_Q_M1; vars->k_max_values_m1 = m->k_max_Q_M1; vars->Q_M2_rem = m->Q_M2_rem; vars->Q_M2 = m->Q_M2; vars->l_min_values_m2 = m->l_min_Q_M2; vars->l_max_values_m2 = m->l_max_Q_M2; vars->k_min_values_m2 = m->k_min_Q_M2; vars->k_max_values_m2 = m->k_max_Q_M2; vars->Q_c = m->Q_c; vars->Q_cH = m->Q_cH; vars->Q_cI = m->Q_cI; vars->Q_cM = m->Q_cM; vars->Q_c_rem = m->Q_c_rem; vars->Q_cH_rem = m->Q_cH_rem; vars->Q_cI_rem = m->Q_cI_rem; vars->Q_cM_rem = m->Q_cM_rem; vars->Q_rem = m->Q_rem; vars->Q_B_rem = m->Q_B_rem; vars->Q_M_rem = m->Q_M_rem; vars->Q_M1_rem = m->Q_M1_rem; } PUBLIC char * TwoDpfold_pbacktrack(TwoDpfold_vars *vars, int d1, int d2) { return vrna_pbacktrack_TwoD(vars->compatibility, d1, d2); } PUBLIC char * TwoDpfold_pbacktrack5(TwoDpfold_vars *vars, int d1, int d2, unsigned int length) { return vrna_pbacktrack5_TwoD(vars->compatibility, d1, d2, length); } PUBLIC TwoDpfold_vars * get_TwoDpfold_variables(const char *seq, const char *structure1, char *structure2, int circ) { vrna_md_t md; TwoDpfold_vars *vars; set_model_details(&md); md.circ = circ; vars = (TwoDpfold_vars *)malloc(sizeof(TwoDpfold_vars)); vars->compatibility = vrna_fold_compound_TwoD(seq, structure1, structure2, &md, VRNA_OPTION_PF); crosslink(vars); return vars; } PUBLIC void destroy_TwoDpfold_variables(TwoDpfold_vars *vars) { if (vars == NULL) return; vrna_fold_compound_free(vars->compatibility); free(vars); } vrna_sol_TwoD_pf_t * TwoDpfoldList(TwoDpfold_vars *vars, int distance1, int distance2) { vrna_sol_TwoD_pf_t *sol; sol = vrna_pf_TwoD(vars->compatibility, distance1, distance2); crosslink(vars); return sol; }
AlloyVector.h
/* * Copyright(C) 2015, Blake C. Lucas, Ph.D. (img.science@gmail.com) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef ALLOYLINEARALGEBRA_H_ #define ALLOYLINEARALGEBRA_H_ #include "AlloyMath.h" #include <vector> #include <functional> #include <iomanip> #include <limits> #include <algorithm> #include <fstream> #include "cereal/types/vector.hpp" #include "cereal/types/string.hpp" namespace aly { bool SANITY_CHECK_LINALG(); template<class T, int C> struct Vector { public: std::vector<vec<T, C>> data; const int channels = C; typedef vec<T, C> ValueType; typedef typename std::vector<ValueType>::iterator iterator; typedef typename std::vector<ValueType>::const_iterator const_iterator; typedef typename std::vector<ValueType>::reverse_iterator reverse_iterator; iterator begin() { return data.begin(); } iterator end() { return data.end(); } const_iterator cbegin() const { return data.cbegin(); } const_iterator cend() const { return data.cend(); } reverse_iterator rbegin() { return data.rbegin(); } reverse_iterator rend() { return data.rend(); } reverse_iterator rbegin() const { return data.rbegin(); } reverse_iterator rend() const { return data.rend(); } template<class Archive> void save(Archive & archive) const { archive(cereal::make_nvp(MakeString() << "vector" << C, data)); } template<class Archive> void load(Archive & archive) { archive(cereal::make_nvp(MakeString() << "vector" << C, data)); } void set(const T& val) { data.assign(data.size(), vec<T, C>(val)); } void set(const vec<T, C>& val) { data.assign(data.size(), val); } void set(const std::vector<vec<T, C>>& val) { data = val; } void set(T* val) { if (val == nullptr) return; size_t offset = 0; for (vec<T, C>& x : data) { for (int c = 0; c < C; c++) { x[c] = val[offset++]; } } } void set(vec<T, C>* val) { if (val == nullptr) return; size_t offset = 0; for (vec<T, C>& x : data) { x = val[offset++]; } } template<class F> void apply(F f) { size_t sz = size(); #pragma omp parallel for for (int offset = 0; offset < (int) sz; offset++) { f(offset, data[offset]); } } Vector(size_t sz) : data(sz) { } Vector(const Vector<T, C>& img) : Vector(img.size()) { set(img.data); } Vector<T, C>& operator=(const Vector<T, C>& rhs) { if (this == &rhs) return *this; if (rhs.size() > 0) { this->set(rhs.data); } else { this->clear(); } return *this; } Vector() { } Vector(T* ptr, size_t sz) : Vector(sz) { set(ptr); } Vector(vec<T, C>* ptr, size_t sz) : Vector(sz) { set(ptr); } Vector(const std::vector<vec<T, C>>& ref) : data(ref) { } size_t size() const { return data.size(); } size_t typeSize() const { return sizeof(vec<T, C> ); } void resize(size_t sz) { data.resize(sz); data.shrink_to_fit(); } void resize(size_t sz, const vec<T, C>& val) { data.resize(sz, val); data.shrink_to_fit(); } void append(const vec<T, C>& val) { data.push_back(val); } void push_back(const vec<T, C>& val) { data.push_back(val); } T* ptr() { if (data.size() == 0) return nullptr; return &(data.front()[0]); } const T* ptr() const { if (data.size() == 0) return nullptr; return &(data.front()[0]); } void setZero() { data.assign(data.size(), vec<T, C>((T) 0)); } const vec<T, C>& operator[](const size_t i) const { if (i >= data.size()) throw std::runtime_error( MakeString() << "Vector index out of bounds " << i << "/" << data.size()); return data[i]; } vec<T, C>& operator[](const size_t i) { if (i >= data.size()) throw std::runtime_error( MakeString() << "Vector index out of bounds " << i << "/" << data.size()); return data[i]; } inline void clear() { data.clear(); data.shrink_to_fit(); } vec<T, C> min() const { vec<T, C> minVal(std::numeric_limits<T>::max()); for (const vec<T, C>& val : data) { minVal = aly::minVec(val, minVal); } return minVal; } vec<T, C> max() const { vec<T, C> maxVal(std::numeric_limits<T>::min()); for (const vec<T, C>& val : data) { maxVal = aly::maxVec(val, maxVal); } return maxVal; } std::pair<vec<T, C>, vec<T, C>> range() const { vec<T, C> maxVal(std::numeric_limits<T>::min()); vec<T, C> minVal(std::numeric_limits<T>::max()); for (const vec<T, C>& val : data) { maxVal = aly::maxVec(val, maxVal); minVal = aly::minVec(val, minVal); } return std::pair<vec<T, C>, vec<T, C>>(minVal, maxVal); } vec<T, C> mean() const { vec<double, C> mean(0.0); for (const vec<T, C>& val : data) { mean += vec<double, C>(val); } mean = mean / (double) data.size(); return vec<T, C>(mean); } vec<T, C> median() const { std::vector<T> bands[C]; for (int c = 0; c < C; c++) { bands[c].resize(data.size()); } size_t index = 0; for (const vec<T, C>& val : data) { for (int c = 0; c < C; c++) { bands[c][index] = val[c]; } index++; } #pragma omp parallel for for (int c = 0; c < C; c++) { std::sort(bands[c].begin(), bands[c].end()); } vec<T, C> med; if (data.size() % 2 == 0) { for (int c = 0; c < C; c++) { med[c] = T( ((double) bands[c][data.size() / 2] + (double) bands[c][data.size() / 2 - 1]) * 0.5f); } } else { for (int c = 0; c < C; c++) { med[c] = bands[c][data.size() / 2]; } } return med; } vec<T, C> mad() const { if (data.size() <= 2) return vec<T, C>(T(0)); vec<T, C> med = median(); std::vector<T> bands[C]; for (int c = 0; c < C; c++) { bands[c].resize(data.size()); } size_t index = 0; for (const vec<T, C>& val : data) { vec<T, C> e = aly::abs(val - med); for (int c = 0; c < C; c++) { bands[c][index] = e[c]; } index++; } #pragma omp parallel for for (int c = 0; c < C; c++) { std::sort(bands[c].begin(), bands[c].end()); } vec<T, C> mad; if (data.size() % 2 == 0) { for (int c = 0; c < C; c++) { mad[c] = T( ((double) bands[c][data.size() / 2] + (double) bands[c][data.size() / 2 - 1]) * 0.5f); } } else { for (int c = 0; c < C; c++) { mad[c] = bands[c][data.size() / 2]; } } return mad; } vec<T, C> madStdDev() const { return vec<T, C>(1.4826 * vec<double, C>(mad())); } vec<T, C> stdDev() const { if (data.size() < 2) { return vec<T, C>(T(0)); } vec<T, C> avg = mean(); vec<double, C> var(0.0); for (const vec<T, C>& val : data) { vec<double, C> e = vec<double, C>(val - avg); var += e * e; } var = var / (double) (data.size() - 1); return vec<T, C>(aly::sqrt(var)); } }; template<class T, int C> void Transform(Vector<T, C>& im1, Vector<T, C>& im2, const std::function<void(vec<T, C>&, vec<T, C>&)>& func) { if (im1.size() != im2.size()) throw std::runtime_error( MakeString() << "Vector dimensions do not match. " << im1.size() << "!=" << im2.size()); size_t sz = im1.size(); #pragma omp parallel for for (size_t offset = 0; offset < sz; offset++) { func(im1.data[offset], im2.data[offset]); } } template<class T, int C> void Transform(Vector<T, C>& im1, const std::function<void(vec<T, C>&)>& func) { size_t sz = im1.size(); #pragma omp parallel for for (int offset = 0; offset < (int) sz; offset++) { func(im1.data[offset]); } } template<class T, int C> void Transform(Vector<T, C>& im1, const Vector<T, C>& im2, const std::function<void(vec<T, C>&, const vec<T, C>&)>& func) { if (im1.size() != im2.size()) throw std::runtime_error( MakeString() << "Vector dimensions do not match. " << im1.size() << "!=" << im2.size()); size_t sz = im1.size(); #pragma omp parallel for for (int offset = 0; offset < (int) sz; offset++) { func(im1.data[offset], im2.data[offset]); } } template<class T, int C> void Transform(Vector<T, C>& im1, const Vector<T, C>& im2, const Vector<T, C>& im3, const Vector<T, C>& im4, const std::function< void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&, const vec<T, C>&)>& func) { if (im1.size() != im2.size()) throw std::runtime_error( MakeString() << "Vector dimensions do not match. " << im1.size() << "!=" << im2.size()); size_t sz = im1.size(); #pragma omp parallel for for (int offset = 0; offset < (int) sz; offset++) { func(im1.data[offset], im2.data[offset], im3.data[offset], im4.data[offset]); } } template<class T, int C> void Transform(Vector<T, C>& im1, const Vector<T, C>& im2, const Vector<T, C>& im3, const std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)>& func) { if (im1.size() != im2.size()) throw std::runtime_error( MakeString() << "Vector dimensions do not match. " << im1.size() << "!=" << im2.size()); size_t sz = im1.size(); #pragma omp parallel for for (int offset = 0; offset < (int) sz; offset++) { func(im1.data[offset], im2.data[offset], im3.data[offset]); } } template<class T, int C> void Transform(Vector<T, C>& im1, Vector<T, C>& im2, const std::function< void(size_t offset, vec<T, C>& val1, vec<T, C>& val2)>& func) { if (im1.size() != im2.size()) throw std::runtime_error( MakeString() << "Vector dimensions do not match. " << im1.size() << "!=" << im2.size()); size_t sz = im1.size(); #pragma omp parallel for for (size_t offset = 0; offset < sz; offset++) { func(offset, im1.data[offset], im2.data[offset]); } } template<class T, class L, class R, int C> std::basic_ostream<L, R> & operator <<( std::basic_ostream<L, R> & ss, const Vector<T, C> & A) { size_t index = 0; for (const vec<T, C>& val : A.data) { ss << std::setw(5) << index++ << ": " << val << std::endl; } return ss; } template<class T, int C> Vector<T, C> operator+(const vec<T, C>& scalar, const Vector<T, C>& img) { Vector<T, C> out(img.size()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = scalar + val2;}; Transform(out, img, f); return out; } template<class T, int C> void ScaleAdd(Vector<T, C>& out, const vec<T, C>& scalar, const Vector<T, C>& in) { out.resize(in.size()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 += scalar * val2;}; Transform(out, in, f); } template<class T, int C> void ScaleAdd(Vector<T, C>& out, const T& scalar, const Vector<T, C>& in) { out.resize(in.size()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 += scalar * val2;}; Transform(out, in, f); } template<class T, int C> void ScaleAdd(Vector<T, C>& out, const Vector<T, C>& in1, const vec<T, C>& scalar, const Vector<T, C>& in2) { out.resize(in1.size()); std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2, const vec<T, C>& val3) {val1 = val2+scalar * val3;}; Transform(out, in1, in2, f); } template<class T, int C> void ScaleAdd(Vector<T, C>& out, const Vector<T, C>& in1, const vec<T, C>& scalar2, const Vector<T, C>& in2, const vec<T, C>& scalar3, const Vector<T, C>& in3) { out.resize(in1.size()); std::function< void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& out, const vec<T, C>& val1, const vec<T, C>& val2, const vec<T, C>& val3) { out = val1+scalar2*val2+scalar3 * val3;}; Transform(out, in1, in2, in3, f); } template<class T, int C> void ScaleSubtract(Vector<T, C>& out, const vec<T, C>& scalar, const Vector<T, C>& in) { out.resize(in.size()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 -= scalar * val2;}; Transform(out, in, f); } template<class T, int C> void ScaleSubtract(Vector<T, C>& out, const Vector<T, C>& in1, const vec<T, C>& scalar, const Vector<T, C>& in2) { out.resize(in1.size()); std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2, const vec<T, C>& val3) {val1 = val2 - scalar * val3;}; Transform(out, in1, in2, f); } template<class T, int C> void Subtract(Vector<T, C>& out, const Vector<T, C>& v1, const Vector<T, C>& v2) { out.resize(v1.size()); std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2, const vec<T, C>& val3) {val1 = val2-val3;}; Transform(out, v1, v2, f); } template<class T, int C> void Add(Vector<T, C>& out, const Vector<T, C>& v1, const Vector<T, C>& v2) { out.resize(v1.size()); std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2, const vec<T, C>& val3) {val1 = val2 + val3;}; Transform(out, v1, v2, f); } template<class T, int C> Vector<T, C> operator-(const vec<T, C>& scalar, const Vector<T, C>& img) { Vector<T, C> out(img.size()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = scalar - val2;}; Transform(out, img, f); return out; } template<class T, int C> Vector<T, C> operator*(const vec<T, C>& scalar, const Vector<T, C>& img) { Vector<T, C> out(img.size()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = scalar*val2;}; Transform(out, img, f); return out; } template<class T, int C> Vector<T, C> operator*(const T& scalar, const Vector<T, C>& img) { Vector<T, C> out(img.size()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = scalar*val2;}; Transform(out, img, f); return out; } template<class T, int C> Vector<T, C> operator/(const vec<T, C>& scalar, const Vector<T, C>& img) { Vector<T, C> out(img.size()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = scalar / val2;}; Transform(out, img, f); return out; } template<class T, int C> Vector<T, C> operator+(const Vector<T, C>& img, const vec<T, C>& scalar) { Vector<T, C> out(img.size()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = val2 + scalar;}; Transform(out, img, f); return out; } template<class T, int C> Vector<T, C> operator-(const Vector<T, C>& img, const vec<T, C>& scalar) { Vector<T, C> out(img.size()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = val2 - scalar;}; Transform(out, img, f); return out; } template<class T, int C> Vector<T, C> operator*(const Vector<T, C>& img, const vec<T, C>& scalar) { Vector<T, C> out(img.size()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = val2*scalar;}; Transform(out, img, f); return out; } template<class T, int C> Vector<T, C> operator/(const Vector<T, C>& img, const vec<T, C>& scalar) { Vector<T, C> out(img.size()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = val2 / scalar;}; Transform(out, img, f); return out; } template<class T, int C> Vector<T, C> operator-(const Vector<T, C>& img) { Vector<T, C> out(img.size()); std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 = -val2;}; Transform(out, img, f); return out; } template<class T, int C> Vector<T, C> operator+=(Vector<T, C>& out, const Vector<T, C>& img) { std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 += val2;}; Transform(out, img, f); return out; } template<class T, int C> Vector<T, C> operator-=(Vector<T, C>& out, const Vector<T, C>& img) { std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 -= val2;}; Transform(out, img, f); return out; } template<class T, int C> Vector<T, C> operator*=(Vector<T, C>& out, const Vector<T, C>& img) { std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 *= val2;}; Transform(out, img, f); return out; } template<class T, int C> Vector<T, C> operator/=(Vector<T, C>& out, const Vector<T, C>& img) { std::function<void(vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2) {val1 /= val2;}; Transform(out, img, f); return out; } template<class T, int C> Vector<T, C> operator+=(Vector<T, C>& out, const vec<T, C>& scalar) { std::function<void(vec<T, C>&)> f = [=](vec<T, C>& val1) {val1 += scalar;}; Transform(out, f); return out; } template<class T, int C> Vector<T, C> operator-=(Vector<T, C>& out, const vec<T, C>& scalar) { std::function<void(vec<T, C>&)> f = [=](vec<T, C>& val1) {val1 -= scalar;}; Transform(out, f); return out; } template<class T, int C> Vector<T, C> operator*=(Vector<T, C>& out, const vec<T, C>& scalar) { std::function<void(vec<T, C>&)> f = [=](vec<T, C>& val1) {val1 *= scalar;}; Transform(out, f); return out; } template<class T, int C> Vector<T, C> operator/=(Vector<T, C>& out, const vec<T, C>& scalar) { std::function<void(vec<T, C>&)> f = [=](vec<T, C>& val1) {val1 /= scalar;}; Transform(out, f); return out; } template<class T, int C> Vector<T, C> operator+(const Vector<T, C>& img1, const Vector<T, C>& img2) { Vector<T, C> out(img1.size()); std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2, const vec<T, C>& val3) {val1 = val2 + val3;}; Transform(out, img1, img2, f); return out; } template<class T, int C> Vector<T, C> operator-(const Vector<T, C>& img1, const Vector<T, C>& img2) { Vector<T, C> out(img1.size()); std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2, const vec<T, C>& val3) {val1 = val2 - val3;}; Transform(out, img1, img2, f); return out; } template<class T, int C> Vector<T, C> operator*(const Vector<T, C>& img1, const Vector<T, C>& img2) { Vector<T, C> out(img1.size()); std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2, const vec<T, C>& val3) {val1 = val2*val3;}; Transform(out, img1, img2, f); return out; } template<class T, int C> Vector<T, C> operator/(const Vector<T, C>& img1, const Vector<T, C>& img2) { Vector<T, C> out(img1.size()); std::function<void(vec<T, C>&, const vec<T, C>&, const vec<T, C>&)> f = [=](vec<T, C>& val1, const vec<T, C>& val2, const vec<T, C>& val3) {val1 = val2 / val3;}; Transform(out, img1, img2, f); return out; } template<class T, int C> vec<double, C> dotVec(const Vector<T, C>& a, const Vector<T, C>& b) { vec<double, C> ans(0.0); if (a.size() != b.size()) throw std::runtime_error( MakeString() << "Vector dimensions do not match. " << a.size() << "!=" << b.size()); size_t sz = a.size(); #pragma omp parallel for for (int c = 0; c < C; c++) { double cans = 0; #pragma omp parallel for reduction(+:cans) for (int i = 0; i < (int) sz; i++) { cans += (double) a[i][c] * (double) b[i][c]; } ans[c] = cans; } return ans; } template<class T, int C> double dot(const Vector<T, C>& a, const Vector<T, C>& b) { double ans = 0.0; if (a.size() != b.size()) throw std::runtime_error( MakeString() << "Vector dimensions do not match. " << a.size() << "!=" << b.size()); size_t sz = a.size(); #pragma omp parallel for reduction(+:ans) for (int i = 0; i < (int) sz; i++) { ans += dot(vec<double, C>(a[i]), vec<double, C>(b[i])); } return ans; } template<class T, int C> T lengthSqr(const Vector<T, C>& a) { T ans(0); size_t sz = a.size(); #pragma omp parallel for reduction(+:ans) for (int i = 0; i < (int) sz; i++) { ans += dot(a[i], a[i]); } return ans; } template<class T, int C> T lengthL1(const Vector<T, C>& a) { T ans(0); size_t sz = a.size(); #pragma omp parallel for reduction(+:ans) for (int i = 0; i < (int) sz; i++) { for (int c = 0; c < C; c++) { ans += std::abs(a[i][c]); } } return ans; } template<class T, int C> vec<T, C> lengthVecL1(const Vector<T, C>& a) { vec<T, C> ans((T) 0); size_t sz = a.size(); #pragma omp parallel for for (int c = 0; c < C; c++) { T cans = 0; #pragma omp parallel for reduction(+:cans) for (int i = 0; i < (int) sz; i++) { cans += std::abs(a[i][c]); } ans[c] = cans; } return ans; } template<class T, int C> vec<T, C> maxVec(const Vector<T, C>& a) { vec<T, C> ans((T) 0); size_t sz = a.size(); #pragma omp parallel for for (int c = 0; c < C; c++) { T tmp(std::numeric_limits<T>::min()); //#pragma omp parallel for reduction(max:tmp) for (int i = 0; i < (int) sz; i++) { if (a[i][c] > tmp) tmp = a[i][c]; } ans[c] = tmp; } return ans; } template<class T, int C> vec<T, C> minVec(const Vector<T, C>& a) { vec<T, C> ans((T) 0); size_t sz = a.size(); #pragma omp parallel for for (int c = 0; c < C; c++) { T tmp(std::numeric_limits<T>::max()); //#pragma omp parallel for reduction(min:tmp) for (int i = 0; i < (int) sz; i++) { if (a[i][c] < tmp) tmp = a[i][c]; } ans[c] = tmp; } return ans; } template<class T, int C> T max(const Vector<T, C>& a) { size_t sz = a.size(); T tmp(std::numeric_limits<T>::min()); //#pragma omp parallel for reduction(max:tmp) for (int i = 0; i < (int) sz; i++) { for (int c = 0; c < C; c++) { if (a[i][c] > tmp) tmp = a[i][c]; } } return tmp; } template<class T, int C> T min(const Vector<T, C>& a) { size_t sz = a.size(); T tmp(std::numeric_limits<T>::max()); //#pragma omp parallel for reduction(min:tmp) for (int i = 0; i < (int) sz; i++) { for (int c = 0; c < C; c++) { if (a[i][c] < tmp) tmp = a[i][c]; } } return tmp; } template<class T, int C> T length(const Vector<T, C>& a) { return std::sqrt(lengthSqr(a)); } template<class T, int C> vec<double, C> lengthVecSqr(const Vector<T, C>& a) { vec<double, C> ans(0.0); size_t sz = a.size(); #pragma omp parallel for for (int c = 0; c < C; c++) { double cans = 0; #pragma omp parallel for reduction(+:cans) for (int i = 0; i < (int) sz; i++) { double val = a[i][c]; cans += val * val; } ans[c] = cans; } return ans; } template<class T, int C> vec<double, C> lengthVec(const Vector<T, C>& a) { return aly::sqrt(lengthVecSqr(a)); } template<class T, int C> void WriteVectorToFile(const std::string& file,const Vector<T,C>& vector) { uint64_t sz = vector.size(); std::ofstream os(file, std::ios::binary); os.write((const char*)&sz, sizeof(uint64_t)); os.write((const char*)vector.ptr(), (std::streamsize)(sz*vector.typeSize())); } template<class T, int C> void ReadVectorFromFile(const std::string& file, Vector<T, C>& vector) { std::ifstream os(file, std::ios::binary); uint64_t sz=0; os.read((char*)&sz,sizeof(uint64_t)); vector.resize(sz); os.read((char*)vector.ptr(),(std::streamsize)sz); } typedef Vector<uint8_t, 4> VectorRGBA; typedef Vector<int, 4> VectorRGBAi; typedef Vector<float, 4> VectorRGBAf; typedef Vector<uint8_t, 3> VectorRGB; typedef Vector<int, 3> VectorRGBi; typedef Vector<float, 3> VectorRGBf; typedef Vector<uint8_t, 1> VectorA; typedef Vector<int, 1> VectorAi; typedef Vector<float, 1> VectorAf; typedef Vector<uint8_t, 4> Vector4b; typedef Vector<uint16_t, 4> Vector4us; typedef Vector<int16_t, 4> Vector4s; typedef Vector<int, 4> Vector4i; typedef Vector<uint32_t, 4> Vector4ui; typedef Vector<float, 4> Vector4f; typedef Vector<double, 4> Vector4d; typedef Vector<uint8_t, 3> Vector3b; typedef Vector<uint16_t, 3> Vector3us; typedef Vector<int16_t, 3> Vector3s; typedef Vector<int, 3> Vector3i; typedef Vector<uint32_t, 3> Vector3ui; typedef Vector<float, 3> Vector3f; typedef Vector<double, 3> Vector3d; typedef Vector<uint8_t, 2> Vector2b; typedef Vector<uint16_t, 2> Vector2us; typedef Vector<int16_t, 2> Vector2s; typedef Vector<int, 2> Vector2i; typedef Vector<uint32_t, 2> Vector2ui; typedef Vector<float, 2> Vector2f; typedef Vector<double, 2> Vector2d; typedef Vector<uint8_t, 1> Vector1b; typedef Vector<uint16_t, 1> Vector1us; typedef Vector<int16_t, 1> Vector1s; typedef Vector<int, 1> Vector1i; typedef Vector<uint32_t, 1> Vector1ui; typedef Vector<float, 1> Vector1f; typedef Vector<double, 1> Vector1d; } ; #endif /* ALLOYLINEARALGEBRA_H_ */
ast-dump-openmp-begin-declare-variant_2.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s // expected-no-diagnostics #pragma omp begin declare variant match(device={kind(cpu)}) int also_before(void) { return 0; } #pragma omp end declare variant #pragma omp begin declare variant match(implementation={vendor(score(100):llvm)}) int also_after(void) { return 0; } #pragma omp end declare variant #pragma omp begin declare variant match(implementation={vendor(score(0):llvm)}) int also_before(void) { return 1; } #pragma omp end declare variant int also_after(void) { return 2; } int test(void) { // Should return 0. return also_after() + also_before(); } // Make sure: // - we do see the ast nodes for the cpu kind // - we do see the ast nodes for the llvm vendor // - we pick the right callees // CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, col:21> col:5 implicit used also_before 'int ({{.*}})' // CHECK-NEXT: | |-OMPDeclareVariantAttr [[ADDR_1:0x[a-z0-9]*]] <<invalid sloc>> Implicit device={kind(cpu)} // CHECK-NEXT: | | `-DeclRefExpr [[ADDR_2:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_3:0x[a-z0-9]*]] 'also_before[device={kind(cpu)}]' 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(score(0): llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:17:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_3]] <line:6:1, line:8:1> line:6:1 also_before[device={kind(cpu)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_7:0x[a-z0-9]*]] <col:23, line:8:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_8:0x[a-z0-9]*]] <line:7:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_9:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_10:0x[a-z0-9]*]] <line:12:1, col:20> col:5 implicit used also_after 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_11:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(score(100): llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_12:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_13:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_13]] <col:1, line:14:1> line:12:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:22, line:14:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:13:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:17:1, line:19:1> line:17:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_17:0x[a-z0-9]*]] <col:23, line:19:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_18:0x[a-z0-9]*]] <line:18:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_19:0x[a-z0-9]*]] <col:10> 'int' 1 // CHECK-NEXT: |-FunctionDecl [[ADDR_20:0x[a-z0-9]*]] prev [[ADDR_10]] <line:22:1, line:24:1> line:22:5 used also_after 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_21:0x[a-z0-9]*]] <col:22, line:24:1> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_22:0x[a-z0-9]*]] <line:23:3, col:10> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_23:0x[a-z0-9]*]] <col:10> 'int' 2 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_24:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(score(100): llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_12]] <line:12:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: `-FunctionDecl [[ADDR_25:0x[a-z0-9]*]] <line:26:1, line:29:1> line:26:5 test 'int ({{.*}})' // CHECK-NEXT: `-CompoundStmt [[ADDR_26:0x[a-z0-9]*]] <col:16, line:29:1> // CHECK-NEXT: `-ReturnStmt [[ADDR_27:0x[a-z0-9]*]] <line:28:3, col:37> // CHECK-NEXT: `-BinaryOperator [[ADDR_28:0x[a-z0-9]*]] <col:10, col:37> 'int' '+' // CHECK-NEXT: |-PseudoObjectExpr [[ADDR_29:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | |-CallExpr [[ADDR_30:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_31:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | `-DeclRefExpr [[ADDR_32:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_20]] 'also_after' 'int ({{.*}})' // CHECK-NEXT: | `-CallExpr [[ADDR_33:0x[a-z0-9]*]] <line:12:1, line:28:21> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <line:12:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_12]] <col:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: `-PseudoObjectExpr [[ADDR_35:0x[a-z0-9]*]] <line:28:25, col:37> 'int' // CHECK-NEXT: |-CallExpr [[ADDR_36:0x[a-z0-9]*]] <col:25, col:37> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_37:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_38:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // CHECK-NEXT: `-CallExpr [[ADDR_39:0x[a-z0-9]*]] <line:6:1, line:28:37> 'int' // CHECK-NEXT: `-ImplicitCastExpr [[ADDR_40:0x[a-z0-9]*]] <line:6:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: `-DeclRefExpr [[ADDR_2]] <col:1> 'int ({{.*}})' Function [[ADDR_3]] 'also_before[device={kind(cpu)}]' 'int ({{.*}})'
GB_binop__times_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_int16) // A.*B function (eWiseMult): GB (_AemultB_08__times_int16) // A.*B function (eWiseMult): GB (_AemultB_02__times_int16) // A.*B function (eWiseMult): GB (_AemultB_04__times_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int16) // A*D function (colscale): GB (_AxD__times_int16) // D*A function (rowscale): GB (_DxB__times_int16) // C+=B function (dense accum): GB (_Cdense_accumB__times_int16) // C+=b function (dense accum): GB (_Cdense_accumb__times_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int16) // C=scalar+B GB (_bind1st__times_int16) // C=scalar+B' GB (_bind1st_tran__times_int16) // C=A+scalar GB (_bind2nd__times_int16) // C=A'+scalar GB (_bind2nd_tran__times_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_INT16 || GxB_NO_TIMES_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__times_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__times_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__times_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
conv_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: quanwang@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "conv_kernel_x86.h" #include "wino_conv_kernel_x86.h" #if __AVX__ #include <immintrin.h> #endif #ifndef _MSC_VER #include <sys/time.h> #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) static double get_current_time() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000.0 + tv.tv_usec / 1000.0; } #endif static int get_private_mem_size(struct ir_tensor* filter) { if (filter->data_type == TENGINE_DT_UINT8) // simulator uint8 inference with fp32 return filter->elem_num * filter->elem_size * 4; else return filter->elem_num * filter->elem_size; // caution } static void interleave(struct ir_tensor* filter, struct conv_priv_info* priv_info) { /* simply copy the data */ memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size); } static void interleave_uint8(struct ir_tensor* filter, struct conv_priv_info* priv_info) { /* dequant uint8 weight to fp32 for simulator */ float* weight_fp32 = (float* )priv_info->interleave_buffer; uint8_t* weight_uint8 = (uint8_t*)filter->data; float scale = filter->scale; int zero_point = filter->zero_point; for (int i = 0; i < filter->elem_num; i++) { weight_fp32[i] = ((float)weight_uint8[i] - (float)zero_point) * scale; } } void im2col_fp32(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int ksize_h, int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw) { const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; *(out++) = *in; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_uint8(uint8_t* data_img, float* data_col, struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, struct conv_param* param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; float scale = input_tensor->scale; int zero_point = input_tensor->zero_point; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { uint8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; float in_fp32 = ((float)in[0] - (float)zero_point) * scale; out[0] = in_fp32; out++; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_int8(int8_t* data_img, int8_t* data_col, struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, struct conv_param* param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; int8_t * out = data_col + (c * outh + h) * outw; const int8_t * end = out + w_high; if (im_row >= 0 && im_row < inh) { int8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(int8_t)); out += w_low; while (out < end) { in += sw; out[0] = in[0]; out++; } memset(out, 0, (outw - w_high) * sizeof(int8_t)); } else { memset(out, 0, outw * sizeof(int8_t)); } } } } static void im2col_ir(struct ir_tensor* input, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group) { int input_chan = param->input_channel / param->group; int image_size = input->dims[1] * input->dims[2] * input->dims[3]; int group_size = input_chan * input->dims[2] * input->dims[3]; void* input_base = (void*)((uint8_t*)input->data + (n * image_size + group * group_size) * input->elem_size); void* im2col_buf = (void*)priv_info->im2col_buffer; if (input->data_type == TENGINE_DT_FP32) { im2col_fp32(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3], param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w); } else if (input->data_type == TENGINE_DT_UINT8) { im2col_uint8(input_base, im2col_buf, input, output, param); } else if (input->data_type == TENGINE_DT_INT8) { im2col_int8(input_base, im2col_buf, input, output, param); } else { printf("Input data type %d not to be supported.\n", input->data_type); } } void input_pack4_fp32(int K, int N, float* pB, float* pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const float* img = pB + i; float* tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { #if __AVX__ _mm256_storeu_ps(tmp, _mm256_loadu_ps(img)); #else tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; #endif // __SSE__ tmp += 8; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const float* img = pB + i; float* tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_fp(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; float* output4 = pC + (i + 4) * N; float* output5 = pC + (i + 5) * N; float* output6 = pC + (i + 6) * N; float* output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); __m256 _sum4 = _mm256_set1_ps(0.0); __m256 _sum5 = _mm256_set1_ps(0.0); __m256 _sum6 = _mm256_set1_ps(0.0); __m256 _sum7 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70 va += 8; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41 _sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51 _sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61 _sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71 va += 8; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42 _sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52 _sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62 _sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72 va += 8; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43 _sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53 _sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63 _sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73 va += 8; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _va4 = _mm256_broadcast_ss(va + 4); __m256 _va5 = _mm256_broadcast_ss(va + 5); __m256 _va6 = _mm256_broadcast_ss(va + 6); __m256 _va7 = _mm256_broadcast_ss(va + 7); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70 va += 8; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); _mm256_storeu_ps(output4, _sum4); _mm256_storeu_ps(output5, _sum5); _mm256_storeu_ps(output6, _sum6); _mm256_storeu_ps(output7, _sum7); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m256 _sum0_7 = _mm256_set1_ps(0.0); __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _vb1 = _mm256_broadcast_ss(vb + 1); __m256 _vb2 = _mm256_broadcast_ss(vb + 2); __m256 _vb3 = _mm256_broadcast_ss(vb + 3); __m256 _va0 = _mm256_loadu_ps(va); __m256 _va1 = _mm256_loadu_ps(va + 8); __m256 _va2 = _mm256_loadu_ps(va + 16); __m256 _va3 = _mm256_loadu_ps(va + 24); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k70) * a00 _sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k71) * a10 _sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k72) * a20 _sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k73) * a30 va += 32; vb += 4; } _sum0 = _mm256_add_ps(_sum0, _sum1); _sum2 = _mm256_add_ps(_sum2, _sum3); _sum0_7 = _mm256_add_ps(_sum0_7, _sum0); _sum0_7 = _mm256_add_ps(_sum0_7, _sum2); for (; k < K; k++) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _va = _mm256_loadu_ps(va); _sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7); // sum0 += (k00-k70) * a00 va += 8; vb += 1; } float output_sum0_7[8] = {0.f}; _mm256_storeu_ps(output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; float sum4 = 0; float sum5 = 0; float sum6 = 0; float sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __AVX__ output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 va += 4; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 va += 4; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 va += 4; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m128 _sum0_3 = _mm_set1_ps(0.0); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va + 4); __m128 _va2 = _mm_loadu_ps(va + 8); __m128 _va3 = _mm_loadu_ps(va + 12); _sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k30) * a00 _sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k31) * a10 _sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k32) * a20 _sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k < K; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3); // sum0 += (k00-k30) * a00 va += 4; vb += 1; } float output_sum0_3[4] = {0.f}; _mm_storeu_ps(output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __AVX__ output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; // output ch0 for (int i = remain_outch_start; i < M; i++) { float* output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01 _sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02 _sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03 va += 4; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 va += 1; vb += 8; } _mm256_storeu_ps(output, _sum0); #else float sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif // __AVX__ output += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; #if __AVX__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k + 3 < K; k += 4) { __m128 _p0 = _mm_loadu_ps(vb); __m128 _k0 = _mm_loadu_ps(va); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0)); va += 4; vb += 4; } #ifdef _WIN32 float sum0 = _sum0.m128_f32[0] + _sum0.m128_f32[1] + _sum0.m128_f32[2] + _sum0.m128_f32[3]; #else float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #endif #else float sum0 = 0.f; #endif // __AVX__ for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } void input_pack4_int8(int K, int N, int8_t* pB, int8_t* pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const int8_t* img = pB + i; int8_t* tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; tmp += 8; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const int8_t* img = pB + i; int8_t* tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_i8(int M, int N, int K, int8_t* pA_t, int8_t* pB_t, int32_t* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; //#pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; int32_t* output0 = pC + ( i )*N; int32_t* output1 = pC + (i + 1) * N; int32_t* output2 = pC + (i + 2) * N; int32_t* output3 = pC + (i + 3) * N; int32_t* output4 = pC + (i + 4) * N; int32_t* output5 = pC + (i + 5) * N; int32_t* output6 = pC + (i + 6) * N; int32_t* output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); __m256i _sum4 = _mm256_set1_epi32(0); __m256i _sum5 = _mm256_set1_epi32(0); __m256i _sum6 = _mm256_set1_epi32(0); __m256i _sum7 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum7); va += 8; // k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum7); va += 8; // k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum7); va += 8; // k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum7); va += 8; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _va4 = _mm256_set1_epi32(*(va + 4)); __m256i _va5 = _mm256_set1_epi32(*(va + 5)); __m256i _va6 = _mm256_set1_epi32(*(va + 6)); __m256i _va7 = _mm256_set1_epi32(*(va + 7)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va4), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va5), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va6), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va7), _sum7); va += 8; vb += 8; } _mm256_storeu_si256((__m256i* )output0, _sum0); _mm256_storeu_si256((__m256i* )output1, _sum1); _mm256_storeu_si256((__m256i* )output2, _sum2); _mm256_storeu_si256((__m256i* )output3, _sum3); _mm256_storeu_si256((__m256i* )output4, _sum4); _mm256_storeu_si256((__m256i* )output5, _sum5); _mm256_storeu_si256((__m256i* )output6, _sum6); _mm256_storeu_si256((__m256i* )output7, _sum7); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; int32_t sum4[8] = {0}; int32_t sum5[8] = {0}; int32_t sum6[8] = {0}; int32_t sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m256i _sum0_7 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 16))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va += 32; vb += 4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum0); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); _sum0_7 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_7); va += 8; vb += 1; } int32_t output_sum0_7[8] = {0}; _mm256_storeu_si256((__m256i* )output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; int32_t sum4 = 0; int32_t sum5 = 0; int32_t sum6 = 0; int32_t sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; int32_t* output0 = pC + ( i )*N; int32_t* output1 = pC + (i + 1) * N; int32_t* output2 = pC + (i + 2) * N; int32_t* output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = K + 4) { // k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; // k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); va += 4; // k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); va += 4; // k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; vb += 8; } _mm256_storeu_si256((__m256i* )output0, _sum0); _mm256_storeu_si256((__m256i* )output1, _sum1); _mm256_storeu_si256((__m256i* )output2, _sum2); _mm256_storeu_si256((__m256i* )output3, _sum3); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m256i _sum0_3 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k=0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 4))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 12))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va+=16; vb+=4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum0); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); _sum0_3 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_3); va += 4; vb += 1; } //drop last 4 value int32_t output_sum0_3[4] = {0}; _mm256_storeu_si256((__m256i* )output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; // output ch0 for (int i = remain_outch_start; i < M; i++) { int32_t* output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256i _sum0 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum0); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); va += 1; vb += 8; } _mm256_storeu_si256((__m256i* )output, _sum0); #else int32_t sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif output += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; int32_t sum0 = 0.f; for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } static void sgemm_fp32(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; float* bias_fp32 = NULL; if (bias) bias_fp32 = ( float* )bias->data + outchan_g * group; float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = output_fp32; sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); // process bias if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_fp32[output_off] += bias_fp32[i]; } } } // process activation relu if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } // process activation relu6 if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } } static void sgemm_uint8(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; uint8_t * output_uint8 = ( uint8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; int* bias_int32 = NULL; float bias_scale = 0.f; if (bias) { bias_int32 = ( int* )bias->data + outchan_g * group; bias_scale = input->scale * filter->scale; } float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = (float*)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); /* process bias */ if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_sgemm[output_off] += (float )bias_int32[i] * bias_scale; } } } /* process activation relu */ if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; if (output_sgemm[output_off] > 6) output_sgemm[output_off] = 6; } } } /* quant from fp32 to uint8 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int udata = ( int )(round(output_sgemm[output_off] / output->scale) + output->zero_point); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[output_off] = udata; } } sys_free(output_sgemm); } static void sgemm_int8(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; int8_t* interleave_int8 = ( int8_t* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; int8_t* im2col_pack4_int8 = priv_info->im2col_buffer_pack4; int8_t * output_int8 = ( int8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; int32_t * bias_int32 = NULL; if (bias) bias_int32 = ( int* )bias->data + outchan_g * group; float input_scale = input->scale; float* kernel_scales = filter->scale_list; float output_scale = output->scale; int8_t* filter_sgemm = interleave_int8; int8_t* input_sgemm_pack4 = im2col_pack4_int8; int32_t* output_sgemm_int32 = (int32_t*)sys_malloc(outchan_g * out_h * out_w * sizeof(int32_t)); float* output_sgemm_fp32 = (float*)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_i8(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm_int32, num_thread); /* process bias and dequant output from int32 to fp32 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (bias) output_sgemm_fp32[output_off] = (float )(output_sgemm_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_sgemm_fp32[output_off] = (float )output_sgemm_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; if (output_sgemm_fp32[output_off] > 6) output_sgemm_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int32_t data_i32 = ( int32_t )(round(output_sgemm_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_sgemm_int32); sys_free(output_sgemm_fp32); } /* check the conv wheather need to be using winograd */ static int winograd_support(struct conv_param* param, int in_h, int in_w) { int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int input_chan = param->input_channel; int output_chan = param->output_channel; int group = param->group; if (in_h <= 10 && in_w <= 10) return 0; if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 || dilation_w != 1 || input_chan < 16 || output_chan < 16 || output_chan % 16) return 0; return 1; } int conv_hcl_get_shared_mem_size(struct ir_tensor* input, struct ir_tensor* output, struct conv_param* param) { int group = param->group; int input_chan = param->input_channel / group; int kernel_size = input_chan * param->kernel_h * param->kernel_w; int output_xy = output->dims[2] * output->dims[3]; int elem_size = input->elem_size; // simulator uint8 inference with fp32 if (input->data_type == TENGINE_DT_UINT8) elem_size = 4; return elem_size * output_xy * kernel_size; } int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor* filter, struct ir_tensor* output, struct conv_param* param) { int K = filter->elem_num / filter->dims[0]; int N = output->dims[2] * output->dims[3]; int elem_size = filter->elem_size; // simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; return (8 * K * (N / 8 + N % 8)) * elem_size; } int conv_hcl_get_interleave_pack4_size(int M, int K, struct ir_tensor* filter) { int elem_size = filter->elem_size; // simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; int size = 8 * K * (M / 8 + (M % 8) / 4 + M % 4) * elem_size; return size; } void conv_hcl_interleave_pack4_fp32(int M, int K, struct conv_priv_info* priv_info) { float* pA = ( float* )priv_info->interleave_buffer; float* pA_t = ( float* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; const float* k4 = pA + (p + 4) * K; const float* k5 = pA + (p + 5) * K; const float* k6 = pA + (p + 6) * K; const float* k7 = pA + (p + 7) * K; float* ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; float* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const float* k0 = pA + (p + 0) * K; float* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } void conv_hcl_interleave_pack4_int8(int M, int K, struct conv_priv_info* priv_info) { int8_t* pA = ( int8_t * )priv_info->interleave_buffer; int8_t* pA_t = ( int8_t* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const int8_t* k0 = pA + (p + 0) * K; const int8_t* k1 = pA + (p + 1) * K; const int8_t* k2 = pA + (p + 2) * K; const int8_t* k3 = pA + (p + 3) * K; const int8_t* k4 = pA + (p + 4) * K; const int8_t* k5 = pA + (p + 5) * K; const int8_t* k6 = pA + (p + 6) * K; const int8_t* k7 = pA + (p + 7) * K; int8_t* ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const int8_t* k0 = pA + (p + 0) * K; const int8_t* k1 = pA + (p + 1) * K; const int8_t* k2 = pA + (p + 2) * K; const int8_t* k3 = pA + (p + 3) * K; int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const int8_t* k0 = pA + (p + 0) * K; int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } int conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; /* check winograd implement, only for conv3x3s1 */ if (input_tensor->data_type == TENGINE_DT_FP32) { priv_info->winograd = winograd_support(param, in_h, in_w); if (priv_info->winograd) { return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param); } } if (!priv_info->external_im2col_mem) { int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; } if (!priv_info->external_im2col_pack4_mem) { int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; } if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } if (input_tensor->data_type == TENGINE_DT_UINT8) interleave_uint8(filter_tensor, priv_info); else interleave(filter_tensor, priv_info); if (priv_info->external_interleave_pack4_mem) { int M = filter_tensor->dims[0]; int K = filter_tensor->elem_num / filter_tensor->dims[0]; int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer_pack4 = mem; priv_info->interleave_buffer_pack4_size = mem_size; if (input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_UINT8) conv_hcl_interleave_pack4_fp32(M, K, priv_info); else conv_hcl_interleave_pack4_int8(M, K, priv_info); if (!priv_info->external_interleave_mem && priv_info->interleave_buffer) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } } else { priv_info->interleave_buffer_pack4 = priv_info->interleave_buffer; priv_info->interleave_buffer_pack4_size = priv_info->interleave_buffer_size; } return 0; } int conv_hcl_postrun(struct conv_priv_info* priv_info) { if (priv_info->winograd) { return wino_conv_hcl_postrun(priv_info); } if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL) { sys_free(priv_info->im2col_buffer); priv_info->im2col_buffer = NULL; } if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL) { sys_free(priv_info->im2col_buffer_pack4); priv_info->im2col_buffer_pack4 = NULL; } if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } return 0; } int conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { int group = param->group; int type = input_tensor->data_type; if (priv_info->winograd) { return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); } for (int i = 0; i < input_tensor->dims[0]; i++) // batch size { for (int j = 0; j < group; j++) { im2col_ir(input_tensor, output_tensor, priv_info, param, i, j); int K = filter_tensor->elem_num / filter_tensor->dims[0]; int N = output_tensor->dims[2] * output_tensor->dims[3]; void* im2col_buffer = priv_info->im2col_buffer; if (priv_info->external_interleave_pack4_mem) { if (type == TENGINE_DT_FP32 || type == TENGINE_DT_UINT8) input_pack4_fp32(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); else input_pack4_int8(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); } else { priv_info->im2col_buffer_pack4 = im2col_buffer; } if (type == TENGINE_DT_FP32) sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_UINT8) sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_INT8) sgemm_int8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else { printf("Input data type %d not to be supported.\n", input_tensor->data_type); return -1; } } } return 0; } int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_mem = 1; priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; return 0; } int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_pack4_mem = 1; priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; return 0; }
phpassMD5_fmt_plug.c
/* * This software was written by Jim Fougeron jfoug AT cox dot net in 2009. * No copyright is claimed, and the software is hereby placed in the public * domain. In case this attempt to disclaim copyright and place the software in * the public domain is deemed null and void, then the software is Copyright * (c) 2009 Jim Fougeron and it is hereby released to the general public under * the following terms: * * This software may be modified, redistributed, and used for any purpose, * in source and binary forms, with or without modification. * * Cracks phpass 'portable' hashes, and phpBBv3 hashes, which are simply phpass * portable, with a slightly different signature. These are 8 byte salted * hashes, with a 1 byte 'salt' that defines the number of loops to compute. * Internally we work with 8 byte salt (the 'real' salt), but let john track * it as 9 byte salts to also pass in the loop count. Code works even if * multiple loop count values within the input. PHPv5 kicked up the loop * count, Wordpress uses same format, but even higher loop count. The loop * count can be used to 'tune' the format, by asking to process only * only hashes of a specific count. * * uses openSSL's MD5 and SIMD MD5. * * Code was pretty much rewritten to re-enable this format, and to deprecate * dynamic_17. It required ported to use the new intrisic SIMD code, including * AVX2, AVX2-512, and others, and the overall starting point for this older * code was pretty bad. This port done August 2015, Jim Fougeron. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_phpassmd5; #elif FMT_REGISTERS_H john_register_one(&fmt_phpassmd5); #else #include <string.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "md5.h" #include "phpass_common.h" //#undef _OPENMP //#undef SIMD_COEF_32 //#undef SIMD_PARA_MD5 #ifdef _OPENMP #define OMP_SCALE 32 #include <omp.h> #endif #include "simd-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "phpass" #define FORMAT_NAME "" #define ALGORITHM_NAME "phpass ($P$ or $H$) " MD5_ALGORITHM_NAME #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5) #endif #define BENCHMARK_COMMENT " ($P$9)" #ifndef MD5_BUF_SIZ #define MD5_BUF_SIZ 16 #endif #define DIGEST_SIZE 16 #define SALT_SIZE 8 // NOTE salts are only 8 bytes, but we tell john they are 9. // We then take the 8 bytes of salt, and append the 1 byte of // loop count data, making it 9. #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*MD5_BUF_SIZ*4*SIMD_COEF_32 ) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef SIMD_COEF_32 // hash with key appended (used on all steps other than first) static uint32_t (*hash_key)[MD5_BUF_SIZ*NBKEYS]; // salt with key appended (only used in 1st step). static uint32_t (*cursalt)[MD5_BUF_SIZ*NBKEYS]; static uint32_t (*crypt_key)[DIGEST_SIZE/4*NBKEYS]; static unsigned max_keys; #else static char (*crypt_key)[PHPASS_CPU_PLAINTEXT_LENGTH+1+PHPASS_BINARY_SIZE]; static char (*saved_key)[PHPASS_CPU_PLAINTEXT_LENGTH + 1]; static unsigned (*saved_len); static unsigned char cursalt[SALT_SIZE]; #endif static unsigned loopCnt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*crypt_key), MEM_ALIGN_SIMD); hash_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*hash_key), MEM_ALIGN_SIMD); cursalt = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*cursalt), MEM_ALIGN_SIMD); max_keys = self->params.max_keys_per_crypt; #else saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); #endif } static void done(void) { MEM_FREE(crypt_key); #ifndef SIMD_COEF_32 MEM_FREE(saved_len); MEM_FREE(saved_key); #else MEM_FREE(hash_key); MEM_FREE(cursalt); #endif } static void set_salt(void *salt) { #ifdef SIMD_COEF_32 int i; uint32_t *p; p = cursalt[0]; for (i = 0; i < max_keys; ++i) { if (i && (i&(SIMD_COEF_32-1)) == 0) p += 15*SIMD_COEF_32; p[0] = ((uint32_t *)salt)[0]; p[SIMD_COEF_32] = ((uint32_t *)salt)[1]; ++p; } #else // !SIMD_COEF_32 memcpy(cursalt, salt, 8); #endif // compute the loop count for this salt loopCnt = (1 << (atoi64[ARCH_INDEX(((char*)salt)[8])])); } static void set_key(char *key, int index) { #ifdef SIMD_COEF_32 // in SIMD, we put the key into the cursalt (at offset 8), // and into hash_key (at offset 16). We also clean both // buffers, and put the 0x80, and the length into them. int len = strlen(key), i, j; unsigned char *co1 = (unsigned char*)cursalt; unsigned char *co2 = (unsigned char*)hash_key; for (i = 0; i < len; ++i) { // byte by byte. Slow but easy to follow, and the // speed here does not really matter. co1[GETPOS(i+8,index)] = key[i]; co2[GETPOS(i+16,index)] = key[i]; } // Place the end of string marker co1[GETPOS(i+8,index)] = 0x80; co2[GETPOS(i+16,index)] = 0x80; // clean out both buffers top parts. for (j = i+9; j < 56; ++j) co1[GETPOS(j,index)] = 0; for (j = i+17; j < 56; ++j) co2[GETPOS(j,index)] = 0; // set the length in bits of salt and hash co1[GETPOS(56,index)] = ((len+8)<<3)&0xFF; co2[GETPOS(56,index)] = ((len+16)<<3)&0xFF; co1[GETPOS(57,index)] = ((len+8)<<3)>>8; co2[GETPOS(57,index)] = ((len+16)<<3)>>8; #else int len= strlen(key); saved_len[index]=len; strcpy(saved_key[index], key); #endif } static char *get_key(int index) { #ifdef SIMD_COEF_32 unsigned char *saltb8 = (unsigned char*)cursalt; static char out[PHPASS_CPU_PLAINTEXT_LENGTH+1]; int len, i; // get salt length (in bits) len = saltb8[GETPOS(57,index)]; len <<= 8; len |= saltb8[GETPOS(56,index)]; // convert to bytes. len >>= 3; // we skip the 8 bytes of salt (to get to password). len -= 8; // now grab the password. for (i = 0; i < len; ++i) out[i] = saltb8[GETPOS(8+i,index)]; out[i] = 0; return out; #else return saved_key[index]; #endif } static int cmp_all(void *binary, int count) { unsigned i = 0; #ifdef SIMD_COEF_32 uint32_t *p; uint32_t bin = *(uint32_t *)binary; p = crypt_key[0]; for (i = 0; i < count; ++i) { if (i && (i&(SIMD_COEF_32-1)) == 0) p += 3*SIMD_COEF_32; if (bin == *p++) return 1; } return 0; #else for (i = 0; i < count; i++) if (!memcmp(binary, crypt_key[i], PHPASS_BINARY_SIZE)) return 1; return 0; #endif } static int cmp_exact(char *source, int index) { return 1; } static int cmp_one(void * binary, int index) { #ifdef SIMD_COEF_32 int idx = index&(SIMD_COEF_32-1); int off = (index/SIMD_COEF_32)*(4*SIMD_COEF_32); return((((uint32_t *)binary)[0] == ((uint32_t *)crypt_key)[off+0*SIMD_COEF_32+idx]) && (((uint32_t *)binary)[1] == ((uint32_t *)crypt_key)[off+1*SIMD_COEF_32+idx]) && (((uint32_t *)binary)[2] == ((uint32_t *)crypt_key)[off+2*SIMD_COEF_32+idx]) && (((uint32_t *)binary)[3] == ((uint32_t *)crypt_key)[off+3*SIMD_COEF_32+idx])); #else return !memcmp(binary, crypt_key[index], PHPASS_BINARY_SIZE); #endif } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int loops = 1, index; #ifdef _OPENMP loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT; #pragma omp parallel for #endif for (index = 0; index < loops; index++) { unsigned Lcount; #ifdef SIMD_COEF_32 SIMDmd5body(cursalt[index], hash_key[index], NULL, SSEi_OUTPUT_AS_INP_FMT); Lcount = loopCnt-1; do { SIMDmd5body(hash_key[index], hash_key[index], NULL, SSEi_OUTPUT_AS_INP_FMT); } while (--Lcount); // last hash goes into crypt_key SIMDmd5body(hash_key[index], crypt_key[index], NULL, 0); #else MD5_CTX ctx; MD5_Init( &ctx ); MD5_Update( &ctx, cursalt, 8 ); MD5_Update( &ctx, saved_key[index], saved_len[index] ); MD5_Final( (unsigned char *) crypt_key[index], &ctx); strcpy(((char*)&(crypt_key[index]))+PHPASS_BINARY_SIZE, saved_key[index]); Lcount = loopCnt; do { MD5_Init( &ctx ); MD5_Update( &ctx, crypt_key[index], PHPASS_BINARY_SIZE+saved_len[index]); MD5_Final( (unsigned char *)&(crypt_key[index]), &ctx); } while (--Lcount); #endif } return count; } static void * salt(char *ciphertext) { static union { unsigned char salt[SALT_SIZE+2]; uint32_t x; } x; unsigned char *salt = x.salt; // store off the 'real' 8 bytes of salt memcpy(salt, &ciphertext[4], 8); // append the 1 byte of loop count information. salt[8] = ciphertext[3]; salt[9]=0; return salt; } #ifdef SIMD_COEF_32 #define SIMD_INDEX (index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*SIMD_COEF_32*4 static int get_hash_0(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_0; } static int get_hash_1(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_1; } static int get_hash_2(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_2; } static int get_hash_3(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_3; } static int get_hash_4(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_4; } static int get_hash_5(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_5; } static int get_hash_6(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_6; } #else static int get_hash_0(int index) { return ((uint32_t*)(crypt_key[index]))[0] & PH_MASK_0; } static int get_hash_1(int index) { return ((uint32_t*)(crypt_key[index]))[0] & PH_MASK_1; } static int get_hash_2(int index) { return ((uint32_t*)(crypt_key[index]))[0] & PH_MASK_2; } static int get_hash_3(int index) { return ((uint32_t*)(crypt_key[index]))[0] & PH_MASK_3; } static int get_hash_4(int index) { return ((uint32_t*)(crypt_key[index]))[0] & PH_MASK_4; } static int get_hash_5(int index) { return ((uint32_t*)(crypt_key[index]))[0] & PH_MASK_5; } static int get_hash_6(int index) { return ((uint32_t*)(crypt_key[index]))[0] & PH_MASK_6; } #endif static int salt_hash(void *salt) { return *((ARCH_WORD *)salt) & 0x3FF; } struct fmt_main fmt_phpassmd5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PHPASS_CPU_PLAINTEXT_LENGTH, PHPASS_BINARY_SIZE, PHPASS_BINARY_ALIGN, SALT_SIZE+1, PHPASS_SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | #endif FMT_CASE | FMT_8_BIT, { "iteration count", }, { FORMAT_TAG, FORMAT_TAG2, FORMAT_TAG3 }, phpass_common_tests_39 }, { init, done, fmt_default_reset, phpass_common_prepare, phpass_common_valid, phpass_common_split, phpass_common_binary, salt, { phpass_common_iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
convert_csr5_x_csr.c
#include <alphasparse/opt.h> #include <memory.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #include "alphasparse/format.h" #include "alphasparse/util.h" alphasparse_status_t ONAME(const ALPHA_SPMAT_CSR *A, ALPHA_SPMAT_CSR5 **dst) { // if (!A->ordered) { // printf("we need sorted csr.\n"); // return ALpPHA_SPARSE_STATUS_INVALID_VALUE; // } ALPHA_SPMAT_CSR5 *B = alpha_malloc(sizeof(ALPHA_SPMAT_CSR5)); *dst = B; //init host point B->col_idx = NULL; B->row_ptr = NULL; B->val = NULL; B->tile_ptr = NULL; B->tile_desc = NULL; B->tile_desc_offset_ptr = NULL; B->tile_desc_offset = NULL; B->calibrator = NULL; B->num_rows = A->rows; B->num_cols = A->cols; B->nnz = A->rows_end[A->rows - 1]; B->val = alpha_memalign((uint64_t)(B->nnz) * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT); B->row_ptr = alpha_memalign((uint64_t)(A->rows + 1) * sizeof(ALPHA_INT), DEFAULT_ALIGNMENT); B->col_idx = alpha_memalign((uint64_t)(B->nnz) * sizeof(ALPHA_INT), DEFAULT_ALIGNMENT); for( ALPHA_INT i=0; i < B->num_rows+1; i++) { B->row_ptr[i] = A->rows_start[i]; } // compute sigma int r = 4; int s = 32; int t = 256; int u = 6; int csr_nnz_per_row = B->nnz / B->num_rows; if (csr_nnz_per_row <= r) B->csr5_sigma = r; else if (csr_nnz_per_row > r && csr_nnz_per_row <= s) B->csr5_sigma = csr_nnz_per_row; else if (csr_nnz_per_row <= t && csr_nnz_per_row > s) B->csr5_sigma = s; else // csr_nnz_per_row > t B->csr5_sigma = u; // conversion // compute #bits required for `y_offset' and `scansum_offset' int base = 2; B->csr5_bit_y_offset = 1; while (base < ALPHA_CSR5_OMEGA * B->csr5_sigma) { base *= 2; B->csr5_bit_y_offset++; } base = 2; B->csr5_bit_scansum_offset = 1; while (base < ALPHA_CSR5_OMEGA) { base *= 2; B->csr5_bit_scansum_offset++; } if ( (size_t) B->csr5_bit_y_offset + B->csr5_bit_scansum_offset > sizeof(uint32_t) * 8 - 1) { printf("error: csr5-omega not supported.\n"); return ALPHA_SPARSE_STATUS_NOT_SUPPORTED; } int bit_all = B->csr5_bit_y_offset + B->csr5_bit_scansum_offset + B->csr5_sigma; B->csr5_num_packets = ceil((float)bit_all /(float)(sizeof(uint32_t)*8)); // calculate the number of tiles B->csr5_p = ceil((float)B->nnz / (float)(ALPHA_CSR5_OMEGA * B->csr5_sigma)); //printf("sigma = %i, p = %i\n", B->csr5_sigma, B->csr5_p); // malloc the newly added arrays for CSR5 B->tile_ptr = alpha_memalign((uint64_t)(B->csr5_p+1) * sizeof(uint32_t), DEFAULT_ALIGNMENT); for( ALPHA_INT i=0; i<B->csr5_p+1; i++) { B->tile_ptr[i] = 0; } B->tile_desc = alpha_memalign((uint64_t)(B->csr5_p * ALPHA_CSR5_OMEGA * B->csr5_num_packets) * sizeof(uint32_t), DEFAULT_ALIGNMENT); for( ALPHA_INT i=0; i<B->csr5_p * ALPHA_CSR5_OMEGA * B->csr5_num_packets; i++) { B->tile_desc[i] = 0; } B->calibrator = alpha_memalign((uint64_t)(B->csr5_p) * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT); for( ALPHA_INT i=0; i<B->csr5_p; i++) { alpha_setzero(B->calibrator[i]); } B->tile_desc_offset_ptr = alpha_memalign((uint64_t)(B->csr5_p+1) * sizeof(ALPHA_INT), DEFAULT_ALIGNMENT); for( ALPHA_INT i=0; i<B->csr5_p+1; i++) { B->tile_desc_offset_ptr[i] = 0; } // convert csr data to csr5 data (3 steps) // step 1 generate tile pointer // step 1.1 binary search row pointer for (ALPHA_INT global_id = 0; global_id <= B->csr5_p; global_id++) { // compute tile boundaries by tile of size sigma * omega ALPHA_INT boundary = global_id * B->csr5_sigma * ALPHA_CSR5_OMEGA; // clamp tile boundaries to [0, nnz] boundary = boundary > B->nnz ? B->nnz : boundary; // binary search ALPHA_INT start = 0, stop = B->num_rows, median; ALPHA_INT key_median; while (stop >= start) { median = (stop + start) / 2; key_median = B->row_ptr[median]; if (boundary >= key_median) start = median + 1; else stop = median - 1; } B->tile_ptr[global_id] = start-1; } // step 1.2 check empty rows for (ALPHA_INT group_id = 0; group_id < B->csr5_p; group_id++) { int dirty = 0; uint32_t start = B->tile_ptr[group_id]; uint32_t stop = B->tile_ptr[group_id+1]; start = (start << 1) >> 1; stop = (stop << 1) >> 1; if (start == stop) continue; for (uint32_t row_idx = start; row_idx <= stop; row_idx++) { if (B->row_ptr[row_idx] == B->row_ptr[row_idx+1]) { dirty = 1; break; } } if (dirty) { start |= sizeof(uint32_t) == 4 ? 0x80000000 : 0x8000000000000000; B->tile_ptr[group_id] = start; } } B->csr5_tail_tile_start = (B->tile_ptr[B->csr5_p-1] << 1) >> 1; // step 2. generate tile descriptor int bit_all_offset = B->csr5_bit_y_offset + B->csr5_bit_scansum_offset; //generate_tile_descriptor_s1_kernel for (int par_id = 0; par_id < B->csr5_p-1; par_id++) { const ALPHA_INT row_start = B->tile_ptr[par_id] & 0x7FFFFFFF; const ALPHA_INT row_stop = B->tile_ptr[par_id + 1] & 0x7FFFFFFF; for (int rid = row_start; rid <= row_stop; rid++) { int ptr = B->row_ptr[rid]; int pid = ptr / (ALPHA_CSR5_OMEGA * B->csr5_sigma); if (pid == par_id) { int lx = (ptr / B->csr5_sigma) % ALPHA_CSR5_OMEGA; const int glid = ptr%B->csr5_sigma+bit_all_offset; const int ly = glid / 32; const int llid = glid % 32; const uint32_t val = 0x1 << (31 - llid); const int location = pid * ALPHA_CSR5_OMEGA * B->csr5_num_packets + ly * ALPHA_CSR5_OMEGA + lx; B->tile_desc[location] |= val; } } } //generate_tile_descriptor_s2_kernel int num_thread = 1; //omp_get_max_threads(); ALPHA_INT *s_segn_scan_all, *s_present_all; s_segn_scan_all = alpha_memalign((uint64_t)(2 * ALPHA_CSR5_OMEGA * num_thread) * sizeof(ALPHA_INT), DEFAULT_ALIGNMENT); s_present_all = alpha_memalign((uint64_t)(2 * ALPHA_CSR5_OMEGA * num_thread) * sizeof(ALPHA_INT), DEFAULT_ALIGNMENT); //int *s_segn_scan_all = (int *)malloc(2 * ALPHA_CSR5_OMEGA // * sizeof(int) * num_thread); //int *s_present_all = (int *)malloc(2 * ALPHA_CSR5_OMEGA // * sizeof(int) * num_thread); for (ALPHA_INT i = 0; i < num_thread; i++) s_present_all[i * 2 * ALPHA_CSR5_OMEGA + ALPHA_CSR5_OMEGA] = 1; //const int bit_all_offset = bit_y_offset + bit_scansum_offset; //#pragma omp parallel for for (int par_id = 0; par_id < B->csr5_p-1; par_id++) { int tid = 0; //omp_get_thread_num(); int *s_segn_scan = &s_segn_scan_all[tid * 2 * ALPHA_CSR5_OMEGA]; int *s_present = &s_present_all[tid * 2 * ALPHA_CSR5_OMEGA]; memset(s_segn_scan, 0, (ALPHA_CSR5_OMEGA + 1)*sizeof(int)); memset(s_present, 0, ALPHA_CSR5_OMEGA * sizeof(int)); bool with_empty_rows = (B->tile_ptr[par_id] >> 31) & 0x1; ALPHA_INT row_start = B->tile_ptr[par_id] & 0x7FFFFFFF; const ALPHA_INT row_stop = B->tile_ptr[par_id + 1] & 0x7FFFFFFF; if (row_start == row_stop) continue; //#pragma simd for (int lane_id = 0; lane_id < ALPHA_CSR5_OMEGA; lane_id++) { int start = 0, stop = 0, segn = 0; bool present = 0; uint32_t bitflag = 0; present |= !lane_id; // extract the first bit-flag packet int ly = 0; uint32_t first_packet = B->tile_desc[par_id * ALPHA_CSR5_OMEGA * B->csr5_num_packets+lane_id]; bitflag = (first_packet << bit_all_offset) | ((uint32_t)present << 31); start = !((bitflag >> 31) & 0x1); present |= (bitflag >> 31) & 0x1; for (int i = 1; i < B->csr5_sigma; i++) { if ((!ly && i == 32 - bit_all_offset) || (ly && (i - (32 - bit_all_offset)) % 32==0)) { ly++; bitflag = B->tile_desc[par_id * ALPHA_CSR5_OMEGA * B->csr5_num_packets + ly * ALPHA_CSR5_OMEGA + lane_id]; } const int norm_i = !ly ? i : i - (32 - bit_all_offset); stop += (bitflag >> (31 - norm_i % 32) ) & 0x1; present |= (bitflag >> (31 - norm_i % 32)) & 0x1; } // compute y_offset for all tiles segn = stop - start + present; segn = segn > 0 ? segn : 0; s_segn_scan[lane_id] = segn; // compute scansum_offset s_present[lane_id] = present; } //scan_single<int>(s_segn_scan, ALPHA_CSR5_OMEGA + 1); int old_val, new_val; old_val = s_segn_scan[0]; s_segn_scan[0] = 0; for (int i = 1; i < ALPHA_CSR5_OMEGA + 1; i++) { new_val = s_segn_scan[i]; s_segn_scan[i] = old_val + s_segn_scan[i-1]; old_val = new_val; } if (with_empty_rows) { B->tile_desc_offset_ptr[par_id] = s_segn_scan[ALPHA_CSR5_OMEGA]; B->tile_desc_offset_ptr[B->csr5_p] = 1; } //#pragma simd for (int lane_id = 0; lane_id < ALPHA_CSR5_OMEGA; lane_id++) { int y_offset = s_segn_scan[lane_id]; int scansum_offset = 0; int next1 = lane_id + 1; if (s_present[lane_id]) { while ( ! s_present[next1] && next1 < ALPHA_CSR5_OMEGA) { scansum_offset++; next1++; } } uint32_t first_packet = B->tile_desc[par_id * ALPHA_CSR5_OMEGA * B->csr5_num_packets + lane_id]; y_offset = lane_id ? y_offset - 1 : 0; first_packet |= y_offset << (32-B->csr5_bit_y_offset); first_packet |= scansum_offset << (32-bit_all_offset); B->tile_desc[par_id * ALPHA_CSR5_OMEGA * B->csr5_num_packets + lane_id] = first_packet; } } alpha_free(s_segn_scan_all); alpha_free(s_present_all); if (B->tile_desc_offset_ptr[B->csr5_p]) { //scan_single(B->tile_desc_offset_ptr, p+1); int old_val, new_val; old_val = B->tile_desc_offset_ptr[0]; B->tile_desc_offset_ptr[0] = 0; for (int i = 1; i < B->csr5_p+1; i++) { new_val = B->tile_desc_offset_ptr[i]; B->tile_desc_offset_ptr[i] = old_val + B->tile_desc_offset_ptr[i-1]; old_val = new_val; } } B->csr5_num_offsets = B->tile_desc_offset_ptr[B->csr5_p]; if (B->csr5_num_offsets) { B->tile_desc_offset = alpha_memalign((uint64_t)(B->csr5_num_offsets) * sizeof(ALPHA_INT), DEFAULT_ALIGNMENT); //err = generate_tile_descriptor_offset const int bit_bitflag = 32 - bit_all_offset; //#pragma omp parallel for for (int par_id = 0; par_id < B->csr5_p-1; par_id++) { bool with_empty_rows = (B->tile_ptr[par_id] >> 31)&0x1; if (!with_empty_rows) continue; ALPHA_INT row_start = B->tile_ptr[par_id] & 0x7FFFFFFF; const ALPHA_INT row_stop = B->tile_ptr[par_id + 1] & 0x7FFFFFFF; int offset_pointer = B->tile_desc_offset_ptr[par_id]; //#pragma simd for (int lane_id = 0; lane_id < ALPHA_CSR5_OMEGA; lane_id++) { bool local_bit; // extract the first bit-flag packet int ly = 0; uint32_t descriptor = B->tile_desc[par_id * ALPHA_CSR5_OMEGA * B->csr5_num_packets + lane_id]; int y_offset = descriptor >> (32 - B->csr5_bit_y_offset); descriptor = descriptor << bit_all_offset; descriptor = lane_id ? descriptor : descriptor | 0x80000000; local_bit = (descriptor >> 31) & 0x1; if (local_bit && lane_id) { const ALPHA_INT idx = par_id * ALPHA_CSR5_OMEGA * B->csr5_sigma + lane_id * B->csr5_sigma; // binary search ALPHA_INT start = 0; ALPHA_INT stop = row_stop - row_start - 1; ALPHA_INT median, key_median; while (stop >= start) { median = (stop + start) / 2; key_median = B->row_ptr[row_start+1+median]; if (idx >= key_median) start = median + 1; else stop = median - 1; } const ALPHA_INT y_index = start-1; B->tile_desc_offset[offset_pointer + y_offset] = y_index; y_offset++; } for (int i = 1; i < B->csr5_sigma; i++) { if ((!ly && i == bit_bitflag) || (ly && !(31 & (i - bit_bitflag)))) { ly++; descriptor = B->tile_desc[par_id * ALPHA_CSR5_OMEGA * B->csr5_num_packets + ly * ALPHA_CSR5_OMEGA + lane_id]; } const int norm_i = 31 & (!ly ? i : i - bit_bitflag); local_bit = (descriptor >> (31 - norm_i))&0x1; if (local_bit) { const ALPHA_INT idx = par_id * ALPHA_CSR5_OMEGA * B->csr5_sigma + lane_id * B->csr5_sigma + i; // binary search ALPHA_INT start = 0; ALPHA_INT stop = row_stop-row_start-1; ALPHA_INT median, key_median; while (stop >= start) { median = (stop + start) / 2; key_median=B->row_ptr[row_start+1+median]; if (idx >= key_median) start = median + 1; else stop = median - 1; } const ALPHA_INT y_index = start-1; B->tile_desc_offset[offset_pointer + y_offset] = y_index; y_offset++; } } } } } // step 3. transpose column_index and value arrays //#pragma omp parallel for for (int par_id = 0; par_id < B->csr5_p; par_id++) { // if this is fast track tile, do not transpose it if (B->tile_ptr[par_id] == B->tile_ptr[par_id + 1]) { for (int idx = 0; idx < ALPHA_CSR5_OMEGA * B->csr5_sigma; idx++) { int src_idx = par_id * ALPHA_CSR5_OMEGA * B->csr5_sigma + idx; B->col_idx[src_idx] = A->col_indx[src_idx]; B->val[src_idx] = A->values[src_idx]; } continue; } //#pragma simd if (par_id < B->csr5_p-1) { for (int idx = 0; idx < ALPHA_CSR5_OMEGA * B->csr5_sigma; idx++) { int idx_y = idx % B->csr5_sigma; int idx_x = idx / B->csr5_sigma; int src_idx = par_id * ALPHA_CSR5_OMEGA * B->csr5_sigma + idx; int dst_idx = par_id * ALPHA_CSR5_OMEGA * B->csr5_sigma + idx_y * ALPHA_CSR5_OMEGA + idx_x; B->col_idx[dst_idx] = A->col_indx[src_idx]; B->val[dst_idx] = A->values[src_idx]; } } else { // the last tile for (int idx = par_id * ALPHA_CSR5_OMEGA * B->csr5_sigma; idx < B->nnz; idx++) { B->col_idx[idx] = A->col_indx[idx]; B->val[idx] = A->values[idx]; } } } //init deviece point B->d_col_idx = NULL; B->d_row_ptr = NULL; B->d_val = NULL; B->d_tile_ptr = NULL; B->d_tile_desc = NULL; B->d_tile_desc_offset_ptr = NULL; B->d_tile_desc_offset = NULL; B->d_calibrator = NULL; return ALPHA_SPARSE_STATUS_SUCCESS; }
GB_unaryop__abs_fp64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp64_uint64 // op(A') function: GB_tran__abs_fp64_uint64 // C type: double // A type: uint64_t // cast: double cij = (double) aij // unaryop: cij = fabs (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabs (x) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp64_uint64 ( double *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__islt_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_int32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__islt_int32) // A.*B function (eWiseMult): GB (_AemultB_03__islt_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int32) // A*D function (colscale): GB (_AxD__islt_int32) // D*A function (rowscale): GB (_DxB__islt_int32) // C+=B function (dense accum): GB (_Cdense_accumB__islt_int32) // C+=b function (dense accum): GB (_Cdense_accumb__islt_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int32) // C=scalar+B GB (_bind1st__islt_int32) // C=scalar+B' GB (_bind1st_tran__islt_int32) // C=A+scalar GB (_bind2nd__islt_int32) // C=A'+scalar GB (_bind2nd_tran__islt_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_INT32 || GxB_NO_ISLT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__islt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__islt_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__islt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
InternalTransferCircuit.h
#ifndef _INTERNAL_TRANSFER_CIRCUIT_H_ #define _INTERNAL_TRANSFER_CIRCUIT_H_ #include "Circuit.h" #include "../Utils/Constants.h" #include "../Utils/Data.h" #include "../Utils/Utils.h" #include "../Gadgets/AccountGadgets.h" #include "../Gadgets/TradingHistoryGadgets.h" #include "ethsnarks.hpp" #include "utils.hpp" using namespace ethsnarks; namespace Loopring { class InternalTransferGadget : public GadgetT { public: const Constants& constants; // User From state BalanceGadget balanceFBefore_From; BalanceGadget balanceTBefore_From; AccountGadget accountBefore_From; // User To state BalanceGadget balanceTBefore_To; AccountGadget accountBefore_To; // Operator state BalanceGadget balanceBefore_O; // Inputs DualVariableGadget accountID_From; DualVariableGadget accountID_To; DualVariableGadget tokenID; DualVariableGadget amount; DualVariableGadget feeTokenID; DualVariableGadget fee; DualVariableGadget type; // Signature Poseidon_gadget_T<9, 1, 6, 53, 8, 1> hash; SignatureVerifier signatureVerifier; // Type NotGadget signatureInvalid; UnsafeAddGadget numConditionalTransfersAfter; RequireEqualGadget type_eq_signatureInvalid; // User To account check RequireNotZeroGadget publicKeyX_notZero; // Fee as float FloatGadget fFee; RequireAccuracyGadget requireAccuracyFee; // Amount as float FloatGadget fAmount; RequireAccuracyGadget requireAccuracyAmount; // Fee payment from From to the operator subadd_gadget feePayment; // Transfer from From to To subadd_gadget transferPayment; // Increase the nonce of From by 1 AddGadget nonce_From_after; // Update User From UpdateBalanceGadget updateBalanceF_From; UpdateBalanceGadget updateBalanceT_From; UpdateAccountGadget updateAccount_From; // Update User To UpdateBalanceGadget updateBalanceT_To; UpdateAccountGadget updateAccount_To; // Update Operator UpdateBalanceGadget updateBalanceF_O; InternalTransferGadget( ProtoboardT &pb, const jubjub::Params& params, const Constants& _constants, const VariableT& accountsMerkleRoot, const VariableT& operatorBalancesRoot, const VariableT& blockExchangeID, const VariableT& numConditionalTransfersBefore, const std::string &prefix ) : GadgetT(pb, prefix), constants(_constants), // User From state balanceFBefore_From(pb, FMT(prefix, "balanceFBefore_From")), balanceTBefore_From(pb, FMT(prefix, "balanceTBefore_From")), accountBefore_From(pb, FMT(prefix, "accountBefore_From")), // User To state balanceTBefore_To(pb, FMT(prefix, "balanceTBefore_To")), accountBefore_To(pb, FMT(prefix, "accountBefore_To")), // Operator state balanceBefore_O(pb, FMT(prefix, "balanceBefore_O")), // Inputs accountID_From(pb, NUM_BITS_ACCOUNT, FMT(prefix, ".accountID_From")), accountID_To(pb, NUM_BITS_ACCOUNT, FMT(prefix, ".accountID_To")), tokenID(pb, NUM_BITS_TOKEN, FMT(prefix, ".tokenID")), amount(pb, NUM_BITS_AMOUNT, FMT(prefix, ".amount")), feeTokenID(pb, NUM_BITS_TOKEN, FMT(prefix, ".feeTokenID")), fee(pb, NUM_BITS_AMOUNT, FMT(prefix, ".fee")), type(pb, NUM_BITS_TYPE, FMT(prefix, ".type")), // Signature // 首先对InternalTransfer from方的交易进行验签 hash(pb, var_array({blockExchangeID, accountID_From.packed, accountID_To.packed, tokenID.packed, amount.packed, feeTokenID.packed, fee.packed, accountBefore_From.nonce}), FMT(this->annotation_prefix, ".hash")), signatureVerifier(pb, params, constants, accountBefore_From.publicKey, hash.result(), FMT(prefix, ".signatureVerifier"), false), // Type signatureInvalid(pb, signatureVerifier.result(), ".signatureInvalid"), // numConditionalTransfersAfter计算没有验签通过交易数量。 numConditionalTransfersAfter(pb, numConditionalTransfersBefore, signatureInvalid.result(), ".numConditionalTransfersAfter"), // 这里对交易的type进行了校验,即验签结果和type值的类型需要相同 type_eq_signatureInvalid(pb, type.packed, signatureInvalid.result(), ".type_eq_signatureInvalid"), // User To account check // 确保to账户的地址存在 publicKeyX_notZero(pb, accountBefore_To.publicKey.x, FMT(prefix, ".publicKeyX_notZero")), // Fee as float // 确定费用值,并且对费用值的精确度进行约束 fFee(pb, constants, Float16Encoding, FMT(prefix, ".fFee")), requireAccuracyFee(pb, fFee.value(), fee.packed, Float16Accuracy, NUM_BITS_AMOUNT, FMT(prefix, ".requireAccuracyFee")), // Amount as float // 确定交易金额,并且对交易金额进行的精确度进行约束 fAmount(pb, constants, Float24Encoding, FMT(prefix, ".fTansAmount")), requireAccuracyAmount(pb, fAmount.value(), amount.packed, Float24Accuracy, NUM_BITS_AMOUNT, FMT(prefix, ".requireAccuracyAmount")), // Fee payment from From to the operator // from支付交易费用 feePayment(pb, NUM_BITS_AMOUNT, balanceFBefore_From.balance, balanceBefore_O.balance, fFee.value(), FMT(prefix, ".feePayment")), // Transfer from From to To // transfer转账费用支付 transferPayment(pb, NUM_BITS_AMOUNT, balanceTBefore_From.balance, balanceTBefore_To.balance, fAmount.value(), FMT(prefix, ".transferPayment")), // Increase the nonce of From by 1 (unless it's a conditional transfer) // nonce++ nonce_From_after(pb, accountBefore_From.nonce, signatureVerifier.result(), NUM_BITS_NONCE, FMT(prefix, ".nonce_From_after")), // Update User From updateBalanceF_From(pb, accountBefore_From.balancesRoot, feeTokenID.bits, {balanceFBefore_From.balance, balanceFBefore_From.tradingHistory}, {feePayment.X, balanceFBefore_From.tradingHistory}, FMT(prefix, ".updateBalanceF_From")), updateBalanceT_From(pb, updateBalanceF_From.result(), tokenID.bits, {balanceTBefore_From.balance, balanceTBefore_From.tradingHistory}, {transferPayment.X, balanceTBefore_From.tradingHistory}, FMT(prefix, ".updateBalanceT_From")), updateAccount_From(pb, accountsMerkleRoot, accountID_From.bits, {accountBefore_From.publicKey.x, accountBefore_From.publicKey.y, accountBefore_From.nonce, accountBefore_From.balancesRoot}, {accountBefore_From.publicKey.x, accountBefore_From.publicKey.y, nonce_From_after.result(), updateBalanceT_From.result()}, FMT(prefix, ".updateAccount_From")), // Update User To updateBalanceT_To(pb, accountBefore_To.balancesRoot, tokenID.bits, {balanceTBefore_To.balance, balanceTBefore_To.tradingHistory}, {transferPayment.Y, balanceTBefore_To.tradingHistory}, FMT(prefix, ".updateBalanceT_To")), updateAccount_To(pb, updateAccount_From.result(), accountID_To.bits, {accountBefore_To.publicKey.x, accountBefore_To.publicKey.y, accountBefore_To.nonce, accountBefore_To.balancesRoot}, {accountBefore_To.publicKey.x, accountBefore_To.publicKey.y, accountBefore_To.nonce, updateBalanceT_To.result()}, FMT(prefix, ".updateAccount_To")), // Update Operator // 更新operator的balanceRoot updateBalanceF_O(pb, operatorBalancesRoot, feeTokenID.bits, {balanceBefore_O.balance, balanceBefore_O.tradingHistory}, {feePayment.Y, balanceBefore_O.tradingHistory}, FMT(prefix, ".updateBalanceF_O")) { } void generate_r1cs_witness(const InternalTransfer& transfer) { // User From state balanceFBefore_From.generate_r1cs_witness(transfer.balanceUpdateF_From.before); balanceTBefore_From.generate_r1cs_witness(transfer.balanceUpdateT_From.before); accountBefore_From.generate_r1cs_witness(transfer.accountUpdate_From.before); // User To state balanceTBefore_To.generate_r1cs_witness(transfer.balanceUpdateT_To.before); accountBefore_To.generate_r1cs_witness(transfer.accountUpdate_To.before); // Operator state balanceBefore_O.generate_r1cs_witness(transfer.balanceUpdateF_O.before); // Inputs accountID_From.generate_r1cs_witness(pb, transfer.accountUpdate_From.accountID); accountID_To.generate_r1cs_witness(pb, transfer.accountUpdate_To.accountID); tokenID.generate_r1cs_witness(pb, transfer.balanceUpdateT_From.tokenID); amount.generate_r1cs_witness(pb, transfer.amount); feeTokenID.generate_r1cs_witness(pb, transfer.balanceUpdateF_From.tokenID); fee.generate_r1cs_witness(pb, transfer.fee); type.generate_r1cs_witness(pb, transfer.type); // Signature hash.generate_r1cs_witness(); signatureVerifier.generate_r1cs_witness(transfer.signature); // Type signatureInvalid.generate_r1cs_witness(); pb.val(numConditionalTransfersAfter.sum) = transfer.numConditionalTransfersAfter; type_eq_signatureInvalid.generate_r1cs_witness(); // User To account check publicKeyX_notZero.generate_r1cs_witness(); // Fee as float fFee.generate_r1cs_witness(toFloat(transfer.fee, Float16Encoding)); requireAccuracyFee.generate_r1cs_witness(); // Amount as float fAmount.generate_r1cs_witness(toFloat(transfer.amount, Float24Encoding)); requireAccuracyAmount.generate_r1cs_witness(); // Fee payment from From to the operator feePayment.generate_r1cs_witness(); // Transfer from From to To transferPayment.generate_r1cs_witness(); // Increase the nonce of From by 1 nonce_From_after.generate_r1cs_witness(); // Update User From updateBalanceF_From.generate_r1cs_witness(transfer.balanceUpdateF_From.proof); updateBalanceT_From.generate_r1cs_witness(transfer.balanceUpdateT_From.proof); updateAccount_From.generate_r1cs_witness(transfer.accountUpdate_From.proof); // Update User To updateBalanceT_To.generate_r1cs_witness(transfer.balanceUpdateT_To.proof); updateAccount_To.generate_r1cs_witness(transfer.accountUpdate_To.proof); // Update Operator updateBalanceF_O.generate_r1cs_witness(transfer.balanceUpdateF_O.proof); } void generate_r1cs_constraints() { // Inputs accountID_From.generate_r1cs_constraints(true); accountID_To.generate_r1cs_constraints(true); tokenID.generate_r1cs_constraints(true); amount.generate_r1cs_constraints(true); feeTokenID.generate_r1cs_constraints(true); fee.generate_r1cs_constraints(true); type.generate_r1cs_constraints(true); // Signature hash.generate_r1cs_constraints(); signatureVerifier.generate_r1cs_constraints(); // Type signatureInvalid.generate_r1cs_constraints(); numConditionalTransfersAfter.generate_r1cs_constraints(); type_eq_signatureInvalid.generate_r1cs_constraints(); // User To account check publicKeyX_notZero.generate_r1cs_constraints(); // Fee as float fFee.generate_r1cs_constraints(); requireAccuracyFee.generate_r1cs_constraints(); // Amount as float fAmount.generate_r1cs_constraints(); requireAccuracyAmount.generate_r1cs_constraints(); // Fee payment from From to the operator feePayment.generate_r1cs_constraints(); // Transfer from From to To transferPayment.generate_r1cs_constraints(); // Increase the nonce of From by 1 nonce_From_after.generate_r1cs_constraints(); // Update User From updateBalanceF_From.generate_r1cs_constraints(); updateBalanceT_From.generate_r1cs_constraints(); updateAccount_From.generate_r1cs_constraints(); // Update User To updateBalanceT_To.generate_r1cs_constraints(); updateAccount_To.generate_r1cs_constraints(); // Update Operator updateBalanceF_O.generate_r1cs_constraints(); } const std::vector<VariableArrayT> getPublicData() const { return {type.bits, accountID_From.bits, accountID_To.bits, VariableArrayT(2, constants.zero), tokenID.bits, VariableArrayT(2, constants.zero), feeTokenID.bits, fAmount.bits(), fFee.bits()}; } const VariableT& getNewAccountsRoot() const { return updateAccount_To.result(); } const VariableT& getNewOperatorBalancesRoot() const { return updateBalanceF_O.result(); } const VariableT& getNewNumConditionalTransfers() const { return numConditionalTransfersAfter.result(); } }; class InternalTransferCircuit : public Circuit { public: PublicDataGadget publicData; Constants constants; jubjub::Params params; // State AccountGadget accountBefore_O; // Inputs DualVariableGadget exchangeID; DualVariableGadget merkleRootBefore; DualVariableGadget merkleRootAfter; std::unique_ptr<libsnark::dual_variable_gadget<FieldT>> numConditionalTransfers; DualVariableGadget operatorAccountID; // Operator account check RequireNotZeroGadget publicKeyX_notZero; // Internal transfers bool onchainDataAvailability; unsigned int numTransfers; std::vector<InternalTransferGadget> transfers; // Update Operator std::unique_ptr<UpdateAccountGadget> updateAccount_O; InternalTransferCircuit(ProtoboardT &pb, const std::string &prefix) : Circuit(pb, prefix), publicData(pb, FMT(prefix, ".publicData")), constants(pb, FMT(prefix, ".constants")), // State accountBefore_O(pb, FMT(prefix, ".accountBefore_O")), // Inputs exchangeID(pb, NUM_BITS_EXCHANGE_ID, FMT(prefix, ".exchangeID")), merkleRootBefore(pb, 256, FMT(prefix, ".merkleRootBefore")), merkleRootAfter(pb, 256, FMT(prefix, ".merkleRootAfter")), operatorAccountID(pb, NUM_BITS_ACCOUNT, FMT(prefix, ".operatorAccountID")), // Operator account check publicKeyX_notZero(pb, accountBefore_O.publicKey.x, FMT(prefix, ".publicKeyX_notZero")) { } void generateConstraints(bool onchainDataAvailability, unsigned int blockSize) override { this->onchainDataAvailability = onchainDataAvailability; this->numTransfers = blockSize; constants.generate_r1cs_constraints(); // Inputs exchangeID.generate_r1cs_constraints(true); merkleRootBefore.generate_r1cs_constraints(true); merkleRootAfter.generate_r1cs_constraints(true); operatorAccountID.generate_r1cs_constraints(true); // Operator account check publicKeyX_notZero.generate_r1cs_constraints(); // Internal transfers transfers.reserve(numTransfers); for (size_t j = 0; j < numTransfers; j++) { // 每次循环更新transAccountsRoot以及operatorBalanceRoot,最后再对operatorAccountRoot的值进行更新 VariableT transAccountsRoot = (j == 0) ? merkleRootBefore.packed : transfers.back().getNewAccountsRoot(); VariableT transOperatorBalancesRoot = (j == 0) ? accountBefore_O.balancesRoot : transfers.back().getNewOperatorBalancesRoot(); transfers.emplace_back( pb, params, constants, transAccountsRoot, transOperatorBalancesRoot, exchangeID.packed, // 获取验签不通过的转账交易的数量 (j == 0) ? constants.zero : transfers.back().getNewNumConditionalTransfers(), std::string("transfer_") + std::to_string(j)); transfers.back().generate_r1cs_constraints(); } // Update Operator // 最后更新operatorAccountRoot updateAccount_O.reset(new UpdateAccountGadget(pb, transfers.back().getNewAccountsRoot(), operatorAccountID.bits, {accountBefore_O.publicKey.x, accountBefore_O.publicKey.y, accountBefore_O.nonce, accountBefore_O.balancesRoot}, {accountBefore_O.publicKey.x, accountBefore_O.publicKey.y, accountBefore_O.nonce, transfers.back().getNewOperatorBalancesRoot()}, FMT(annotation_prefix, ".updateAccount_O"))); updateAccount_O->generate_r1cs_constraints(); // Num conditional transfers // 验签通过的内部交易数量 numConditionalTransfers.reset(new libsnark::dual_variable_gadget<FieldT>( pb, transfers.back().getNewNumConditionalTransfers(), 32, ".numConditionalTransfers") ); numConditionalTransfers->generate_r1cs_constraints(true); // Public data publicData.add(exchangeID.bits); publicData.add(merkleRootBefore.bits); publicData.add(merkleRootAfter.bits); publicData.add(numConditionalTransfers->bits); if (onchainDataAvailability) { publicData.add(operatorAccountID.bits); for (const InternalTransferGadget& transfer : transfers) { publicData.add(transfer.getPublicData()); } } publicData.generate_r1cs_constraints(); // Check the new merkle root requireEqual(pb, updateAccount_O->result(), merkleRootAfter.packed, "newMerkleRoot"); } bool generateWitness(const Loopring::InternalTransferBlock &block) { constants.generate_r1cs_witness(); // State accountBefore_O.generate_r1cs_witness(block.accountUpdate_O.before); // Inputs exchangeID.generate_r1cs_witness(pb, block.exchangeID); merkleRootBefore.generate_r1cs_witness(pb, block.merkleRootBefore); merkleRootAfter.generate_r1cs_witness(pb, block.merkleRootAfter); operatorAccountID.generate_r1cs_witness(pb, block.operatorAccountID); // Operator account check publicKeyX_notZero.generate_r1cs_witness(); // Internal transfers #ifdef MULTICORE #pragma omp parallel for #endif for (unsigned int i = 0; i < block.transfers.size(); i++) { transfers[i].generate_r1cs_witness(block.transfers[i]); } // Update operator updateAccount_O->generate_r1cs_witness(block.accountUpdate_O.proof); // Num conditional transfers numConditionalTransfers->generate_r1cs_witness_from_packed(); // Public data publicData.generate_r1cs_witness(); return true; } bool generateWitness(const json& input) override { return generateWitness(input.get<Loopring::InternalTransferBlock>()); } BlockType getBlockType() override { return BlockType::InternalTransfer; } unsigned int getBlockSize() override { return numTransfers; } void printInfo() override { std::cout << pb.num_constraints() << " constraints (" << (pb.num_constraints() / numTransfers) << "/transfer)" << std::endl; } }; } // namespace Loopring #endif
PVRangeSubSampler.h
/* * MIT License * * © ESI Group, 2015 * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef __PVRANGESUBSAMPLER_H__ #define __PVRANGESUBSAMPLER_H__ #include <pvcop/db/array.h> #include <pvcop/types/datetime_us.h> #include <pvkernel/core/inendi_bench.h> // for BENCH_END, BENCH_START #include <inendi/PVPlotted.h> #include <pvkernel/rush/PVNraw.h> #include <numeric> #include <math.h> #include <type_traits> #include <unordered_set> namespace Inendi { class PVRangeSubSampler { public: enum SAMPLING_MODE { MEAN, MIN, MAX, }; public: using zoom_f = double; private: struct SamplingParams { size_t first = 0; size_t last = 0; pvcop::db::array minmax = {}; size_t min = 0; size_t max = 0; SamplingParams(size_t first = 0, size_t last = 0, const pvcop::db::array& minmax = {}, size_t min = 0, size_t max = 0) : first(first), last(last), minmax(minmax.copy()), min(min), max(max) { } bool operator==(const SamplingParams& rhs) const { return rhs.first == first and rhs.last == last and rhs.minmax == minmax and rhs.min == min and rhs.max == max; } bool operator!=(const SamplingParams& rhs) const { return not(*this == rhs); } }; private: static constexpr const size_t reserved_bits = 2; using value_type = Inendi::PVPlotted::value_type; public: using display_type = uint16_t; static constexpr const size_t display_value_bits = std::numeric_limits<display_type>::digits - reserved_bits; static constexpr const display_type display_type_min_val = 0; static constexpr const display_type display_type_max_val = (1 << display_value_bits) - 1; public: static constexpr const display_type no_value = 0b01 << display_value_bits; static constexpr const display_type underflow_value = 0b10 << display_value_bits; static constexpr const display_type overflow_value = 0b11 << display_value_bits; static constexpr bool display_match(display_type d, display_type mask) { return ((d >> display_value_bits) << display_value_bits) == mask; } public: PVRangeSubSampler(const pvcop::db::array& time, const std::vector<pvcop::core::array<value_type>>& timeseries, const PVRush::PVNraw& nraw, const pvcop::db::selection& sel, const pvcop::db::array* split = nullptr, size_t sampling_count = 2048); template <SAMPLING_MODE mode> void set_sampling_mode() { _compute_ranges_reduction_f = [this](auto... args) { compute_ranges_reduction<mode>(args...); }; } void set_sampling_count(size_t sampling_count); void set_selected_timeseries(const std::unordered_set<size_t>& selected_timeseries); void set_split_column(const pvcop::db::array* split); size_t samples_count() const { return _sampling_count; } size_t total_count() const { return _time.get().size(); } size_t timeseries_count() const { return _timeseries.size(); } size_t group_count() const { return _split_count; } std::string group_name(size_t i) const { return _split ? _split->at(_split_extents.to_core_array()[i]) : ""; } const std::vector<display_type>& sampled_timeserie(size_t index) const { return _ts_matrix[index]; } const std::vector<size_t>& histogram() const { return _histogram; } const pvcop::db::array& minmax_time() const { return _minmax; } pvcop::db::array ratio_to_minmax(zoom_f ratio1, zoom_f ratio2) const; std::pair<zoom_f, zoom_f> minmax_to_ratio(const pvcop::db::array& minmax) const; const pvcop::db::indexes& sorted_indexes() const { return _sorted_indexes; } void subsample(zoom_f first_ratio, zoom_f last_ratio, zoom_f min_ratio = 0, zoom_f max_ratio = 0); void subsample(const pvcop::db::array& minmax, uint32_t min = 0, uint32_t max = 0); void resubsample(); void resubsample(const std::unordered_set<size_t>& timeseries); bool valid() const; private: void allocate_internal_structures(); void subsample(size_t first, size_t last, const pvcop::db::array& minmax, uint32_t min = 0, uint32_t max = 0); template <SAMPLING_MODE mode> void compute_ranges_reduction(size_t first, size_t /*last*/, size_t min, size_t max); template <typename F> void compute_ranges_reduction(size_t first, size_t /*last*/, size_t min, size_t max); public: sigc::signal<void()> _subsampled; private: size_t _sampling_count; const pvcop::db::array& _original_time; std::reference_wrapper<const pvcop::db::array> _time; const std::vector<pvcop::core::array<value_type>> _timeseries; const PVRush::PVNraw& _nraw; std::unordered_set<size_t> _selected_timeseries; std::vector<size_t> _timeseries_to_subsample; const pvcop::db::selection& _sel; const pvcop::db::array* _split; pvcop::db::groups _split_groups; pvcop::db::extents _split_extents; size_t _split_count = 1; pvcop::db::array _shifted_time; pvcop::db::indexes _sorted_indexes; pvcop::core::array<uint32_t> _sort; pvcop::db::array _minmax; std::vector<size_t> _histogram; std::vector<std::vector<display_type>> _ts_matrix; SamplingParams _last_params; bool _reset = false; bool _valid = false; std::function<void(size_t, size_t, size_t, size_t)> _compute_ranges_reduction_f; }; template <Inendi::PVRangeSubSampler::SAMPLING_MODE M> struct sampling_mode_t { static constexpr const Inendi::PVRangeSubSampler::SAMPLING_MODE mode = M; inline static uint64_t init() { return 0; } inline static uint64_t reduce(uint64_t accum, uint32_t) { return accum; } }; template <Inendi::PVRangeSubSampler::SAMPLING_MODE mode, typename... T> struct func_resolver { using type = std::tuple_element_t<mode, std::tuple<T...>>; static_assert(type::mode == mode, "func_resolver parameters must be ordered according to SAMPLING_MODE enum"); }; template <Inendi::PVRangeSubSampler::SAMPLING_MODE mode> void Inendi::PVRangeSubSampler::compute_ranges_reduction(size_t first, size_t /*last*/, size_t min, size_t max) { struct mean_t : sampling_mode_t<SAMPLING_MODE::MEAN> { inline static void map(uint64_t& accum, uint32_t value) { accum += (std::numeric_limits<uint32_t>::max() - value); } inline static uint64_t reduce(uint64_t accum, uint32_t value_count) { return accum / value_count; } }; struct min_t : sampling_mode_t<SAMPLING_MODE::MIN> { inline static uint64_t init() { return std::numeric_limits<uint64_t>::max(); } inline static void map(uint64_t& accum, uint32_t value) { accum = std::min(std::numeric_limits<uint32_t>::max() - value, (uint32_t)accum); } }; struct max_t : sampling_mode_t<SAMPLING_MODE::MAX> { inline static void map(uint64_t& accum, uint32_t value) { accum = std::max(std::numeric_limits<uint32_t>::max() - value, (uint32_t)accum); } }; compute_ranges_reduction<typename func_resolver<mode, mean_t, min_t, max_t>::type>( first, (size_t)0, min, max); } template <typename F> void Inendi::PVRangeSubSampler::compute_ranges_reduction(size_t first, size_t /*last*/, size_t min, size_t max) { BENCH_START(compute_ranges_reduction); // Remove invalid values from selection const pvcop::db::selection& valid_sel = _time.get().valid_selection(_sel); const auto split_groups = _split ? _split_groups.to_core_array() : pvcop::core::array<pvcop::db::index_t>(); std::vector<uint64_t> accums(_split_count, F::init()); std::vector<uint64_t> selected_values_counts(_split_count, 0); std::unordered_set<size_t> columns_to_subsample_set; for (size_t t : _timeseries_to_subsample) { columns_to_subsample_set.emplace(t / _split_count); } const std::vector<size_t> columns_to_subsample(columns_to_subsample_set.begin(), columns_to_subsample_set.end()); #pragma omp parallel for firstprivate(accums, selected_values_counts) for (auto it = columns_to_subsample.begin(); it < columns_to_subsample.end(); ++it) { size_t i = *it; size_t start = first; size_t end = first; const pvcop::core::array<value_type>& timeserie = _timeseries[i]; const pvcop::db::selection& ts_valid_sel = _nraw.column(PVCol(i)).valid_selection(valid_sel); for (size_t j = 0; j < _histogram.size(); j++) { const size_t values_count = _histogram[j]; end += values_count; std::fill(selected_values_counts.begin(), selected_values_counts.end(), 0); std::fill(accums.begin(), accums.end(), F::init()); for (size_t k = start; k < end; k++) { auto v = not _sort ? k : _sort[k]; const size_t group_index = _split ? split_groups[v] : 0; uint64_t& accum = accums[group_index]; if (ts_valid_sel[v]) { selected_values_counts[group_index]++; F::map(accum, timeserie[v]); } } start = end; for (size_t group_index = 0; group_index < _split_count; group_index++) { size_t& selected_values_count = selected_values_counts[group_index]; const size_t ii = (_split_count * i) + group_index; if (selected_values_count == 0) { _ts_matrix[ii][j] = no_value; // no value in range } else { uint64_t& accum = accums[group_index]; const uint64_t raw_value = F::reduce(accum, selected_values_count); if (min != 0 and raw_value < min) { // underflow _ts_matrix[ii][j] = underflow_value; } else if (raw_value > max) { // overflow _ts_matrix[ii][j] = overflow_value; } else { _ts_matrix[ii][j] = (display_type)((zoom_f(raw_value - min) / (max - min)) * display_type_max_val); // nominal value } } } } } BENCH_END(compute_ranges_reduction, "compute_ranges_reduction", _time.get().size(), sizeof(uint64_t), _sampling_count, sizeof(uint64_t)); } } // namespace Inendi #endif // __PVRANGESUBSAMPLER_H__
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/configure.h" #include "magick/constitute.h" #include "magick/decorate.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/effect.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/montage.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/shear.h" #include "magick/signature-private.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/xml-tree.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ static const char *MinimalThresholdMap = "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image, % const size_t width,const size_t height, % const ssize_t offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o offset: the mean offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const ssize_t offset, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType number_pixels; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(threshold_image,DirectClass) == MagickFalse) { InheritException(exception,&threshold_image->exception); threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Local adaptive threshold. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&zero); number_pixels=(MagickRealType) (width*height); image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket channel_bias, channel_sum; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict threshold_indexes; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) height/2L,image->columns+width,height,exception); q=GetCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); threshold_indexes=GetCacheViewAuthenticIndexQueue(threshold_view); channel_bias=zero; channel_sum=zero; r=p; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) { channel_bias.red+=r[u].red; channel_bias.green+=r[u].green; channel_bias.blue+=r[u].blue; channel_bias.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } channel_sum.red+=r[u].red; channel_sum.green+=r[u].green; channel_sum.blue+=r[u].blue; channel_sum.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } r+=image->columns+width; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket mean; mean=zero; r=p; channel_sum.red-=channel_bias.red; channel_sum.green-=channel_bias.green; channel_sum.blue-=channel_bias.blue; channel_sum.opacity-=channel_bias.opacity; channel_sum.index-=channel_bias.index; channel_bias=zero; for (v=0; v < (ssize_t) height; v++) { channel_bias.red+=r[0].red; channel_bias.green+=r[0].green; channel_bias.blue+=r[0].blue; channel_bias.opacity+=r[0].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+0); channel_sum.red+=r[width-1].red; channel_sum.green+=r[width-1].green; channel_sum.blue+=r[width-1].blue; channel_sum.opacity+=r[width-1].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+ width-1); r+=image->columns+width; } mean.red=(MagickRealType) (channel_sum.red/number_pixels+offset); mean.green=(MagickRealType) (channel_sum.green/number_pixels+offset); mean.blue=(MagickRealType) (channel_sum.blue/number_pixels+offset); mean.opacity=(MagickRealType) (channel_sum.opacity/number_pixels+offset); if (image->colorspace == CMYKColorspace) mean.index=(MagickRealType) (channel_sum.index/number_pixels+offset); SetPixelRed(q,((MagickRealType) GetPixelRed(q) <= mean.red) ? 0 : QuantumRange); SetPixelGreen(q,((MagickRealType) GetPixelGreen(q) <= mean.green) ? 0 : QuantumRange); SetPixelBlue(q,((MagickRealType) GetPixelBlue(q) <= mean.blue) ? 0 : QuantumRange); SetPixelOpacity(q,((MagickRealType) GetPixelOpacity(q) <= mean.opacity) ? 0 : QuantumRange); if (image->colorspace == CMYKColorspace) SetPixelIndex(threshold_indexes+x,(((MagickRealType) GetPixelIndex( threshold_indexes+x) <= mean.index) ? 0 : QuantumRange)); p++; q++; } sync=SyncCacheViewAuthenticPixels(threshold_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically selects a threshold and replaces each % pixel in the image with a black pixel if the image intentsity is less than % the selected threshold otherwise white. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ (void) exception; start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) ResetMagickMemory(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p++; } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(image,histogram,exception); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property); return(BilevelImage(image,QuantumRange*threshold/100.0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImageChannel method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold) % MagickBooleanType BilevelImageChannel(Image *image, % const ChannelType channel,const double threshold) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o threshold: define the threshold values. % % Aside: You can get the same results as operator using LevelImageChannels() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold) { MagickBooleanType status; status=BilevelImageChannel(image,DefaultChannels,threshold); return(status); } MagickExport MagickBooleanType BilevelImageChannel(Image *image, const ChannelType channel,const double threshold) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); /* Bilevel threshold image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIntensity(image,q) <= threshold ? 0 : QuantumRange); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold ? 0 : QuantumRange); else SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold ? OpaqueOpacity : TransparentOpacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BilevelImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image,const char *threshold) % MagickBooleanType BlackThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=BlackThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType BlackThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); /* Black threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) < threshold.red)) SetPixelRed(q,0); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) < threshold.green)) SetPixelGreen(q,0); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) < threshold.blue)) SetPixelBlue(q,0); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) < threshold.opacity)) SetPixelOpacity(q,0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x) < threshold.index)) SetPixelIndex(indexes+x,0); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlackThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImageChannel method is: % % MagickBooleanType ClampImage(Image *image) % MagickBooleanType ClampImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % */ MagickExport MagickBooleanType ClampImage(Image *image) { MagickBooleanType status; status=ClampImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType ClampImageChannel(Image *image, const ChannelType channel) { #define ClampImageTag "Clamp/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q))); SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q))); SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q))); SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q))); q++; } return(SyncImage(image)); } /* Clamp image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q))); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampPixel((MagickRealType) GetPixelIndex( indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClampImageChannel) #endif proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMapFile(const char *xml, const char *filename,const char *map_id,ExceptionInfo *exception) { const char *attribute, *content; double value; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; map = (ThresholdMap *) NULL; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(map); for (threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold = GetNextXMLTreeTag(threshold) ) { attribute=GetXMLTreeAttribute(threshold, "map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold, "alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } /* The map has been found -- allocate a Threshold Map to return */ map=(ThresholdMap *) AcquireMagickMemory(sizeof(ThresholdMap)); if (map == (ThresholdMap *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; /* Assign basic attributeibutes. */ attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels, "divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } /* Allocate theshold levels array. */ content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); { char *p; register ssize_t i; /* Parse levels into integer array. */ for (i=0; i< (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() load and search one or more threshold map files for the % a map matching the given name or aliase. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; ThresholdMap *map; map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { XMLTreeInfo *thresholds,*threshold,*description; const char *map,*alias,*content; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); for( threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold = GetNextXMLTreeTag(threshold) ) { map = GetXMLTreeAttribute(threshold, "map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias = GetXMLTreeAttribute(threshold, "alias"); /* alias is optional, no if test needed */ description=GetXMLTreeChild(threshold,"description"); if ( description == (XMLTreeInfo *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if ( content == (char *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() uses the ordered dithering technique of reducing color % images to monochrome using positional information to retain as much % information as possible. % % WARNING: This function is deprecated, and is now just a call to % the more more powerful OrderedPosterizeImage(); function. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image) % MagickBooleanType OrderedDitherImageChannel(Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image) { MagickBooleanType status; status=OrderedDitherImageChannel(image,DefaultChannels,&image->exception); return(status); } MagickExport MagickBooleanType OrderedDitherImageChannel(Image *image, const ChannelType channel,ExceptionInfo *exception) { MagickBooleanType status; /* Call the augumented function OrderedPosterizeImage() */ status=OrderedPosterizeImageChannel(image,channel,"o8x8",exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedPosterizeImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedPosterizeImage method is: % % MagickBooleanType OrderedPosterizeImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % MagickBooleanType OrderedPosterizeImageChannel(Image *image, % const ChannelType channel,const char *threshold_map, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedPosterizeImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { MagickBooleanType status; status=OrderedPosterizeImageChannel(image,DefaultChannels,threshold_map, exception); return(status); } MagickExport MagickBooleanType OrderedPosterizeImageChannel(Image *image, const ChannelType channel,const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; LongPixelPacket levels; MagickBooleanType status; MagickOffsetType progress; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); { char token[MaxTextExtent]; register const char *p; p=(char *)threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MaxTextExtent-1)) break; token[p-threshold_map] = *p; p++; } token[p-threshold_map] = '\0'; map = GetThresholdMap(token, exception); if ( map == (ThresholdMap *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } } /* Set channel levels from extra comma separated arguments Default to 2, the single value given, or individual channel values */ #if 1 { /* parse directly as a comma separated list of integers */ char *p; p = strchr((char *) threshold_map,','); if ( p != (char *) NULL && isdigit((int) ((unsigned char) *(++p))) ) levels.index = (unsigned int) strtoul(p, &p, 10); else levels.index = 2; levels.red = ((channel & RedChannel ) != 0) ? levels.index : 0; levels.green = ((channel & GreenChannel) != 0) ? levels.index : 0; levels.blue = ((channel & BlueChannel) != 0) ? levels.index : 0; levels.opacity = ((channel & OpacityChannel) != 0) ? levels.index : 0; levels.index = ((channel & IndexChannel) != 0 && (image->colorspace == CMYKColorspace)) ? levels.index : 0; /* if more than a single number, each channel has a separate value */ if ( p != (char *) NULL && *p == ',' ) { p=strchr((char *) threshold_map,','); p++; if ((channel & RedChannel) != 0) levels.red = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & GreenChannel) != 0) levels.green = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & BlueChannel) != 0) levels.blue = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & IndexChannel) != 0 && image->colorspace == CMYKColorspace) levels.index=(unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & OpacityChannel) != 0) levels.opacity = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); } } #else /* Parse level values as a geometry */ /* This difficult! * How to map GeometryInfo structure elements into * LongPixelPacket structure elements, but according to channel? * Note the channels list may skip elements!!!! * EG -channel BA -ordered-dither map,2,3 * will need to map g.rho -> l.blue, and g.sigma -> l.opacity * A simpler way is needed, probably converting geometry to a temporary * array, then using channel to advance the index into ssize_t pixel packet. */ #endif #if 0 printf("DEBUG levels r=%u g=%u b=%u a=%u i=%u\n", levels.red, levels.green, levels.blue, levels.opacity, levels.index); #endif { /* Do the posterized ordered dithering of the image */ ssize_t d; /* d = number of psuedo-level divisions added between color levels */ d = map->divisor-1; /* reduce levels to levels - 1 */ levels.red = levels.red ? levels.red-1 : 0; levels.green = levels.green ? levels.green-1 : 0; levels.blue = levels.blue ? levels.blue-1 : 0; levels.opacity = levels.opacity ? levels.opacity-1 : 0; levels.index = levels.index ? levels.index-1 : 0; if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t threshold, t, l; /* Figure out the dither threshold for this pixel This must be a integer from 1 to map->divisor-1 */ threshold = map->levels[(x%map->width) +map->width*(y%map->height)]; /* Dither each channel in the image as appropriate Notes on the integer Math... total number of divisions = (levels-1)*(divisor-1)+1) t1 = this colors psuedo_level = q->red * total_divisions / (QuantumRange+1) l = posterization level 0..levels t = dither threshold level 0..divisor-1 NB: 0 only on last Each color_level is of size QuantumRange / (levels-1) NB: All input levels and divisor are already had 1 subtracted Opacity is inverted so 'off' represents transparent. */ if (levels.red) { t = (ssize_t) (QuantumScale*GetPixelRed(q)*(levels.red*d+1)); l = t/d; t = t-l*d; SetPixelRed(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.red))); } if (levels.green) { t = (ssize_t) (QuantumScale*GetPixelGreen(q)* (levels.green*d+1)); l = t/d; t = t-l*d; SetPixelGreen(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.green))); } if (levels.blue) { t = (ssize_t) (QuantumScale*GetPixelBlue(q)* (levels.blue*d+1)); l = t/d; t = t-l*d; SetPixelBlue(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.blue))); } if (levels.opacity) { t = (ssize_t) ((1.0-QuantumScale*GetPixelOpacity(q))* (levels.opacity*d+1)); l = t/d; t = t-l*d; SetPixelOpacity(q,ClampToQuantum((MagickRealType) ((1.0-l-(t >= threshold))*(MagickRealType) QuantumRange/ levels.opacity))); } if (levels.index) { t = (ssize_t) (QuantumScale*GetPixelIndex(indexes+x)* (levels.index*d+1)); l = t/d; t = t-l*d; SetPixelIndex(indexes+x,ClampToQuantum((MagickRealType) ((l+ (t>=threshold))*(MagickRealType) QuantumRange/levels.index))); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OrderedPosterizeImageChannel) #endif proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImageChannel method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon) % MagickBooleanType PerceptibleImageChannel(Image *image, % const ChannelType channel,const double epsilon) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon) { MagickBooleanType status; status=PerceptibleImageChannel(image,DefaultChannels,epsilon); return(status); } MagickExport MagickBooleanType PerceptibleImageChannel(Image *image, const ChannelType channel,const double epsilon) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); q++; } return(SyncImage(image)); } /* Perceptible image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PerceptibleThreshold(GetPixelIndex(indexes+x), epsilon)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PerceptibleImageChannel) #endif proceed=SetImageProgress(image,PerceptibleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImageChannel(Image *image, % const char *thresholds,ExceptionInfo *exception) % MagickBooleanType RandomThresholdImageChannel(Image *image, % const ChannelType channel,const char *thresholds, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o thresholds: a geometry string containing low,high thresholds. If the % string contains 2x2, 3x3, or 4x4, an ordered dither of order 2, 3, or 4 % is performed instead. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { MagickBooleanType status; status=RandomThresholdImageChannel(image,DefaultChannels,thresholds, exception); return(status); } MagickExport MagickBooleanType RandomThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickStatusType flags; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickRealType min_threshold, max_threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (thresholds == (const char *) NULL) return(MagickTrue); GetMagickPixelPacket(image,&threshold); min_threshold=0.0; max_threshold=(MagickRealType) QuantumRange; flags=ParseGeometry(thresholds,&geometry_info); min_threshold=geometry_info.rho; max_threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) max_threshold=min_threshold; if (strchr(thresholds,'%') != (char *) NULL) { max_threshold*=(MagickRealType) (0.01*QuantumRange); min_threshold*=(MagickRealType) (0.01*QuantumRange); } else if (((max_threshold == min_threshold) || (max_threshold == 1)) && (min_threshold <= 8)) { /* Backward Compatibility -- ordered-dither -- IM v 6.2.9-6. */ status=OrderedPosterizeImageChannel(image,channel,thresholds,exception); return(status); } /* Random threshold image. */ status=MagickTrue; progress=0; if (channel == CompositeChannels) { if (AcquireImageColormap(image,2) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { IndexPacket index; MagickRealType intensity; intensity=GetPixelIntensity(image,q); if (intensity < min_threshold) threshold.index=min_threshold; else if (intensity > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType)(QuantumRange* GetPseudoRandomValue(random_info[id])); index=(IndexPacket) (intensity <= threshold.index ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RandomThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if ((MagickRealType) GetPixelRed(q) < min_threshold) threshold.red=min_threshold; else if ((MagickRealType) GetPixelRed(q) > max_threshold) threshold.red=max_threshold; else threshold.red=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & GreenChannel) != 0) { if ((MagickRealType) GetPixelGreen(q) < min_threshold) threshold.green=min_threshold; else if ((MagickRealType) GetPixelGreen(q) > max_threshold) threshold.green=max_threshold; else threshold.green=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & BlueChannel) != 0) { if ((MagickRealType) GetPixelBlue(q) < min_threshold) threshold.blue=min_threshold; else if ((MagickRealType) GetPixelBlue(q) > max_threshold) threshold.blue=max_threshold; else threshold.blue=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & OpacityChannel) != 0) { if ((MagickRealType) GetPixelOpacity(q) < min_threshold) threshold.opacity=min_threshold; else if ((MagickRealType) GetPixelOpacity(q) > max_threshold) threshold.opacity=max_threshold; else threshold.opacity=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if ((MagickRealType) GetPixelIndex(indexes+x) < min_threshold) threshold.index=min_threshold; else if ((MagickRealType) GetPixelIndex(indexes+x) > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold.red ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold.green ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold.blue ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold.opacity ? 0 : QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold.index ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RandomThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image,const char *threshold) % MagickBooleanType WhiteThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=WhiteThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType WhiteThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); flags=ParseGeometry(thresholds,&geometry_info); GetMagickPixelPacket(image,&threshold); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) > threshold.red)) SetPixelRed(q,QuantumRange); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) > threshold.green)) SetPixelGreen(q,QuantumRange); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) > threshold.blue)) SetPixelBlue(q,QuantumRange); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) > threshold.opacity)) SetPixelOpacity(q,QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x)) > threshold.index) SetPixelIndex(indexes+x,QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WhiteThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
hybrid-hello.c
#include <stdio.h> #include <mpi.h> #include <omp.h> int main(int argc, char *argv[]) { int my_id, omp_rank; int provided, required=MPI_THREAD_FUNNELED; MPI_Init_thread(&argc, &argv, required, &provided); MPI_Comm_rank(MPI_COMM_WORLD, &my_id); #pragma omp parallel private(omp_rank) { omp_rank = omp_get_thread_num(); printf("I'm thread %d in process %d\n", omp_rank, my_id); } MPI_Finalize(); }
GB_emult_02.c
//------------------------------------------------------------------------------ // GB_emult_02: C = A.*B where A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C = A.*B where A is sparse/hyper and B is bitmap/full constructs C with // the same sparsity structure as A. This method can also be called with // the two input matrices swapped, with flipxy true, to handle the case // where A is bitmap/full and B is sparse/hyper. // When no mask is present, or the mask is applied later, this method handles // the following cases: // ------------------------------------------ // C = A .* B // ------------------------------------------ // sparse . sparse bitmap // sparse . sparse full // sparse . bitmap sparse // sparse . full sparse // If M is sparse/hyper and complemented, it is not passed here: // ------------------------------------------ // C <!M>= A .* B // ------------------------------------------ // sparse sparse sparse bitmap (mask later) // sparse sparse sparse full (mask later) // sparse sparse bitmap sparse (mask later) // sparse sparse full sparse (mask later) // If M is present, it is bitmap/full: // ------------------------------------------ // C <M> = A .* B // ------------------------------------------ // sparse bitmap sparse bitmap // sparse bitmap sparse full // sparse bitmap bitmap sparse // sparse bitmap full sparse // ------------------------------------------ // C <M> = A .* B // ------------------------------------------ // sparse full sparse bitmap // sparse full sparse full // sparse full bitmap sparse // sparse full full sparse // ------------------------------------------ // C <!M> = A .* B // ------------------------------------------ // sparse bitmap sparse bitmap // sparse bitmap sparse full // sparse bitmap bitmap sparse // sparse bitmap full sparse // ------------------------------------------ // C <!M> = A .* B // ------------------------------------------ // sparse full sparse bitmap // sparse full sparse full // sparse full bitmap sparse // sparse full full sparse #include "GB_ewise.h" #include "GB_emult.h" #include "GB_binop.h" #include "GB_unused.h" #ifndef GBCOMPACT #include "GB_binop__include.h" #endif #define GB_FREE_WORKSPACE \ { \ GB_WERK_POP (Work, int64_t) ; \ GB_WERK_POP (A_ek_slicing, int64_t) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORKSPACE ; \ GB_phbix_free (C) ; \ } GrB_Info GB_emult_02 // C=A.*B when A is sparse/hyper, B bitmap/full ( GrB_Matrix C, // output matrix, static header const GrB_Type ctype, // type of output matrix C const bool C_is_csc, // format of output matrix C const GrB_Matrix M, // optional mask, unused if NULL const bool Mask_struct, // if true, use the only structure of M const bool Mask_comp, // if true, use !M const GrB_Matrix A, // input A matrix (sparse/hyper) const GrB_Matrix B, // input B matrix (bitmap/full) GrB_BinaryOp op, // op to perform C = op (A,B) bool flipxy, // if true use fmult(y,x) else fmult(x,y) GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (C != NULL && (C->static_header || GBNSTATIC)) ; ASSERT_MATRIX_OK_OR_NULL (M, "M for emult_02", GB0) ; ASSERT_MATRIX_OK (A, "A for emult_02", GB0) ; ASSERT_MATRIX_OK (B, "B for emult_02", GB0) ; ASSERT_BINARYOP_OK (op, "op for emult_02", GB0) ; ASSERT_TYPE_OK (ctype, "ctype for emult_02", GB0) ; ASSERT (GB_IS_SPARSE (A) || GB_IS_HYPERSPARSE (A)) ; ASSERT (!GB_PENDING (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (B)) ; ASSERT (M == NULL || GB_IS_BITMAP (B) || GB_IS_FULL (B)) ; int C_sparsity = GB_sparsity (A) ; if (M == NULL) { GBURBLE ("emult_02:(%s=%s.*%s)", GB_sparsity_char (C_sparsity), GB_sparsity_char_matrix (A), GB_sparsity_char_matrix (B)) ; } else { GBURBLE ("emult_02:(%s<%s%s%s>=%s.*%s) ", GB_sparsity_char (C_sparsity), Mask_comp ? "!" : "", GB_sparsity_char_matrix (M), Mask_struct ? ",struct" : "", GB_sparsity_char_matrix (A), GB_sparsity_char_matrix (B)) ; } //-------------------------------------------------------------------------- // revise the operator to handle flipxy //-------------------------------------------------------------------------- // Replace the ANY operator with SECOND. ANY and SECOND give the same // result if flipxy is false. However, SECOND is changed to FIRST if // flipxy is true. This ensures that the results do not depend on the // sparsity structures of A and B. if (op->opcode == GB_ANY_binop_code) { switch (op->xtype->code) { case GB_BOOL_code : op = GrB_SECOND_BOOL ; break ; case GB_INT8_code : op = GrB_SECOND_INT8 ; break ; case GB_INT16_code : op = GrB_SECOND_INT16 ; break ; case GB_INT32_code : op = GrB_SECOND_INT32 ; break ; case GB_INT64_code : op = GrB_SECOND_INT64 ; break ; case GB_UINT8_code : op = GrB_SECOND_UINT8 ; break ; case GB_UINT16_code : op = GrB_SECOND_UINT16 ; break ; case GB_UINT32_code : op = GrB_SECOND_UINT32 ; break ; case GB_UINT64_code : op = GrB_SECOND_UINT64 ; break ; case GB_FP32_code : op = GrB_SECOND_FP32 ; break ; case GB_FP64_code : op = GrB_SECOND_FP64 ; break ; case GB_FC32_code : op = GxB_SECOND_FC32 ; break ; case GB_FC64_code : op = GxB_SECOND_FC64 ; break ; default: ; } } if (flipxy) { bool handled ; op = GB_flip_op (op, &handled) ; if (handled) flipxy = false ; } ASSERT_BINARYOP_OK (op, "final op for emult_02", GB0) ; //-------------------------------------------------------------------------- // declare workspace //-------------------------------------------------------------------------- GB_WERK_DECLARE (Work, int64_t) ; int64_t *restrict Wfirst = NULL ; int64_t *restrict Wlast = NULL ; int64_t *restrict Cp_kfirst = NULL ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; //-------------------------------------------------------------------------- // get M, A, and B //-------------------------------------------------------------------------- const int8_t *restrict Mb = (M == NULL) ? NULL : M->b ; const GB_void *restrict Mx = (M == NULL || Mask_struct) ? NULL : (const GB_void *) M->x ; const size_t msize = (M == NULL) ? 0 : M->type->size ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const int64_t vlen = A->vlen ; const int64_t vdim = A->vdim ; const int64_t nvec = A->nvec ; const int64_t anz = GB_nnz (A) ; const int8_t *restrict Bb = B->b ; const bool B_is_bitmap = GB_IS_BITMAP (B) ; //-------------------------------------------------------------------------- // check if C is iso and compute its iso value if it is //-------------------------------------------------------------------------- const size_t csize = ctype->size ; GB_void cscalar [GB_VLA(csize)] ; bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ; //-------------------------------------------------------------------------- // allocate C->p and C->h //-------------------------------------------------------------------------- GB_OK (GB_new (&C, // sparse or hyper (same as A), existing header ctype, vlen, vdim, GB_Ap_calloc, C_is_csc, C_sparsity, A->hyper_switch, nvec, Context)) ; int64_t *restrict Cp = C->p ; //-------------------------------------------------------------------------- // slice the input matrix A //-------------------------------------------------------------------------- int A_nthreads, A_ntasks ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; GB_SLICE_MATRIX (A, 8, chunk) ; //-------------------------------------------------------------------------- // count entries in C //-------------------------------------------------------------------------- C->nvec_nonempty = A->nvec_nonempty ; C->nvec = nvec ; const bool C_has_pattern_of_A = !B_is_bitmap && (M == NULL) ; if (!C_has_pattern_of_A) { //---------------------------------------------------------------------- // allocate workspace //---------------------------------------------------------------------- GB_WERK_PUSH (Work, 3*A_ntasks, int64_t) ; if (Work == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } Wfirst = Work ; Wlast = Work + A_ntasks ; Cp_kfirst = Work + A_ntasks * 2 ; //---------------------------------------------------------------------- // count entries in C //---------------------------------------------------------------------- // This phase is very similar to GB_select_phase1 (GB_ENTRY_SELECTOR). if (M == NULL) { //------------------------------------------------------------------ // Method2(a): C = A.*B where A is sparse/hyper and B is bitmap //------------------------------------------------------------------ ASSERT (B_is_bitmap) ; int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < A_ntasks ; tid++) { int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; Wfirst [tid] = 0 ; Wlast [tid] = 0 ; for (int64_t k = kfirst ; k <= klast ; k++) { // count the entries in C(:,j) int64_t j = GBH (Ah, k) ; int64_t pB_start = j * vlen ; int64_t pA, pA_end ; GB_get_pA (&pA, &pA_end, tid, k, kfirst, klast, pstart_Aslice, Ap, vlen) ; int64_t cjnz = 0 ; for ( ; pA < pA_end ; pA++) { cjnz += Bb [pB_start + Ai [pA]] ; } if (k == kfirst) { Wfirst [tid] = cjnz ; } else if (k == klast) { Wlast [tid] = cjnz ; } else { Cp [k] = cjnz ; } } } } else { //------------------------------------------------------------------ // Method2(c): C<#M> = A.*B; M, B bitmap/full, A is sparse/hyper //------------------------------------------------------------------ ASSERT (M != NULL) ; int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < A_ntasks ; tid++) { int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; Wfirst [tid] = 0 ; Wlast [tid] = 0 ; for (int64_t k = kfirst ; k <= klast ; k++) { // count the entries in C(:,j) int64_t j = GBH (Ah, k) ; int64_t pB_start = j * vlen ; int64_t pA, pA_end ; GB_get_pA (&pA, &pA_end, tid, k, kfirst, klast, pstart_Aslice, Ap, vlen) ; int64_t cjnz = 0 ; for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; int64_t pB = pB_start + i ; bool mij = GBB (Mb, pB) && GB_mcast (Mx, pB, msize) ; mij = mij ^ Mask_comp ; cjnz += (mij && GBB (Bb, pB)) ; } if (k == kfirst) { Wfirst [tid] = cjnz ; } else if (k == klast) { Wlast [tid] = cjnz ; } else { Cp [k] = cjnz ; } } } } //---------------------------------------------------------------------- // finalize Cp, cumulative sum of Cp and compute Cp_kfirst //---------------------------------------------------------------------- GB_ek_slice_merge1 (Cp, Wfirst, Wlast, A_ek_slicing, A_ntasks) ; GB_ek_slice_merge2 (&(C->nvec_nonempty), Cp_kfirst, Cp, nvec, Wfirst, Wlast, A_ek_slicing, A_ntasks, A_nthreads, Context) ; } //-------------------------------------------------------------------------- // allocate C->i and C->x //-------------------------------------------------------------------------- int64_t cnz = (C_has_pattern_of_A) ? anz : Cp [nvec] ; // set C->iso = C_iso OK GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, C_iso, Context)) ; //-------------------------------------------------------------------------- // copy pattern into C //-------------------------------------------------------------------------- // TODO: could make these components of C shallow instead of memcpy if (GB_IS_HYPERSPARSE (A)) { // copy A->h into C->h GB_memcpy (C->h, Ah, nvec * sizeof (int64_t), A_nthreads) ; } if (C_has_pattern_of_A) { // Method2(b): B is full and no mask present, so the pattern of C is // the same as the pattern of A GB_memcpy (Cp, Ap, (nvec+1) * sizeof (int64_t), A_nthreads) ; GB_memcpy (C->i, Ai, cnz * sizeof (int64_t), A_nthreads) ; } C->jumbled = A->jumbled ; C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // get the opcode //-------------------------------------------------------------------------- // if flipxy was true on input and the op is positional, FIRST, SECOND, or // PAIR, the op has already been flipped, so these tests do not have to // consider that case. GB_Opcode opcode = op->opcode ; bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ; bool op_is_first = (opcode == GB_FIRST_binop_code) ; bool op_is_second = (opcode == GB_SECOND_binop_code) ; bool op_is_pair = (opcode == GB_PAIR_binop_code) ; GB_Type_code ccode = ctype->code ; //-------------------------------------------------------------------------- // check if the values of A and/or B are ignored //-------------------------------------------------------------------------- // With C = ewisemult (A,B), only the intersection of A and B is used. // If op is SECOND or PAIR, the values of A are never accessed. // If op is FIRST or PAIR, the values of B are never accessed. // If op is PAIR, the values of A and B are never accessed. // Contrast with ewiseadd. // A is passed as x, and B as y, in z = op(x,y) bool A_is_pattern = op_is_second || op_is_pair || op_is_positional ; bool B_is_pattern = op_is_first || op_is_pair || op_is_positional ; //-------------------------------------------------------------------------- // using a built-in binary operator (except for positional operators) //-------------------------------------------------------------------------- #define GB_PHASE_2_OF_2 bool done = false ; if (C_iso) { //---------------------------------------------------------------------- // C is iso //---------------------------------------------------------------------- // Cx [0] = cscalar = op (A,B) GB_BURBLE_MATRIX (C, "(iso emult) ") ; memcpy (C->x, cscalar, csize) ; // pattern of C = set intersection of pattern of A and B // flipxy is ignored since the operator is not applied #define GB_ISO_EMULT #include "GB_emult_02_template.c" done = true ; } else { #ifndef GBCOMPACT //------------------------------------------------------------------ // define the worker for the switch factory //------------------------------------------------------------------ #define GB_AemultB_02(mult,xname) GB (_AemultB_02_ ## mult ## xname) #define GB_BINOP_WORKER(mult,xname) \ { \ info = GB_AemultB_02(mult,xname) (C, \ M, Mask_struct, Mask_comp, A, B, flipxy, \ Cp_kfirst, A_ek_slicing, A_ntasks, A_nthreads) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; //------------------------------------------------------------------ // launch the switch factory //------------------------------------------------------------------ GB_Type_code xcode, ycode, zcode ; if (!op_is_positional && GB_binop_builtin (A->type, A_is_pattern, B->type, B_is_pattern, op, false, &opcode, &xcode, &ycode, &zcode) && ccode == zcode) { #define GB_NO_PAIR #include "GB_binop_factory.c" } #endif } //-------------------------------------------------------------------------- // generic worker //-------------------------------------------------------------------------- if (!done) { GB_BURBLE_MATRIX (C, "(generic emult_02: %s) ", op->name) ; int ewise_method = flipxy ? GB_EMULT_METHOD3 : GB_EMULT_METHOD2 ; GB_ewise_generic (C, op, NULL, 0, 0, NULL, NULL, NULL, C_sparsity, ewise_method, Cp_kfirst, NULL, 0, 0, A_ek_slicing, A_ntasks, A_nthreads, NULL, 0, 0, M, Mask_struct, Mask_comp, A, B, Context) ; } //-------------------------------------------------------------------------- // remove empty vectors from C, if hypersparse //-------------------------------------------------------------------------- GB_OK (GB_hypermatrix_prune (C, Context)) ; //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; ASSERT_MATRIX_OK (C, "C output for emult_02", GB0) ; return (GrB_SUCCESS) ; }