source
stringlengths
3
92
c
stringlengths
26
2.25M
kiss_fft.c
/* * Copyright (c) 2003-2010, Mark Borgerding. All rights reserved. * This file is part of KISS FFT - https://github.com/mborgerding/kissfft * * SPDX-License-Identifier: BSD-3-Clause * See COPYING file for more information. */ #include "_kiss_fft_guts.h" /* The guts header contains all the multiplication and addition macros that are defined for fixed or floating point complex numbers. It also delares the kf_ internal functions. */ static void kf_bfly2(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, int m) { kiss_fft_cpx* Fout2; kiss_fft_cpx* tw1 = st->twiddles; kiss_fft_cpx t; Fout2 = Fout + m; do { C_FIXDIV(*Fout, 2); C_FIXDIV(*Fout2, 2); C_MUL(t, *Fout2, *tw1); tw1 += fstride; C_SUB(*Fout2, *Fout, t); C_ADDTO(*Fout, t); ++Fout2; ++Fout; } while (--m); } static void kf_bfly4(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, const size_t m) { kiss_fft_cpx *tw1, *tw2, *tw3; kiss_fft_cpx scratch[6]; size_t k = m; const size_t m2 = 2 * m; const size_t m3 = 3 * m; tw3 = tw2 = tw1 = st->twiddles; do { C_FIXDIV(*Fout, 4); C_FIXDIV(Fout[m], 4); C_FIXDIV(Fout[m2], 4); C_FIXDIV(Fout[m3], 4); C_MUL(scratch[0], Fout[m], *tw1); C_MUL(scratch[1], Fout[m2], *tw2); C_MUL(scratch[2], Fout[m3], *tw3); C_SUB(scratch[5], *Fout, scratch[1]); C_ADDTO(*Fout, scratch[1]); C_ADD(scratch[3], scratch[0], scratch[2]); C_SUB(scratch[4], scratch[0], scratch[2]); C_SUB(Fout[m2], *Fout, scratch[3]); tw1 += fstride; tw2 += fstride * 2; tw3 += fstride * 3; C_ADDTO(*Fout, scratch[3]); if (st->inverse) { Fout[m].r = scratch[5].r - scratch[4].i; Fout[m].i = scratch[5].i + scratch[4].r; Fout[m3].r = scratch[5].r + scratch[4].i; Fout[m3].i = scratch[5].i - scratch[4].r; } else { Fout[m].r = scratch[5].r + scratch[4].i; Fout[m].i = scratch[5].i - scratch[4].r; Fout[m3].r = scratch[5].r - scratch[4].i; Fout[m3].i = scratch[5].i + scratch[4].r; } ++Fout; } while (--k); } static void kf_bfly3(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, size_t m) { size_t k = m; const size_t m2 = 2 * m; kiss_fft_cpx *tw1, *tw2; kiss_fft_cpx scratch[5]; kiss_fft_cpx epi3; epi3 = st->twiddles[fstride * m]; tw1 = tw2 = st->twiddles; do { C_FIXDIV(*Fout, 3); C_FIXDIV(Fout[m], 3); C_FIXDIV(Fout[m2], 3); C_MUL(scratch[1], Fout[m], *tw1); C_MUL(scratch[2], Fout[m2], *tw2); C_ADD(scratch[3], scratch[1], scratch[2]); C_SUB(scratch[0], scratch[1], scratch[2]); tw1 += fstride; tw2 += fstride * 2; Fout[m].r = Fout->r - HALF_OF(scratch[3].r); Fout[m].i = Fout->i - HALF_OF(scratch[3].i); C_MULBYSCALAR(scratch[0], epi3.i); C_ADDTO(*Fout, scratch[3]); Fout[m2].r = Fout[m].r + scratch[0].i; Fout[m2].i = Fout[m].i - scratch[0].r; Fout[m].r -= scratch[0].i; Fout[m].i += scratch[0].r; ++Fout; } while (--k); } static void kf_bfly5(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, int m) { kiss_fft_cpx *Fout0, *Fout1, *Fout2, *Fout3, *Fout4; int u; kiss_fft_cpx scratch[13]; kiss_fft_cpx* twiddles = st->twiddles; kiss_fft_cpx* tw; kiss_fft_cpx ya, yb; ya = twiddles[fstride * m]; yb = twiddles[fstride * 2 * m]; Fout0 = Fout; Fout1 = Fout0 + m; Fout2 = Fout0 + 2 * m; Fout3 = Fout0 + 3 * m; Fout4 = Fout0 + 4 * m; tw = st->twiddles; for (u = 0; u < m; ++u) { C_FIXDIV(*Fout0, 5); C_FIXDIV(*Fout1, 5); C_FIXDIV(*Fout2, 5); C_FIXDIV(*Fout3, 5); C_FIXDIV(*Fout4, 5); scratch[0] = *Fout0; C_MUL(scratch[1], *Fout1, tw[u * fstride]); C_MUL(scratch[2], *Fout2, tw[2 * u * fstride]); C_MUL(scratch[3], *Fout3, tw[3 * u * fstride]); C_MUL(scratch[4], *Fout4, tw[4 * u * fstride]); C_ADD(scratch[7], scratch[1], scratch[4]); C_SUB(scratch[10], scratch[1], scratch[4]); C_ADD(scratch[8], scratch[2], scratch[3]); C_SUB(scratch[9], scratch[2], scratch[3]); Fout0->r += scratch[7].r + scratch[8].r; Fout0->i += scratch[7].i + scratch[8].i; scratch[5].r = scratch[0].r + S_MUL(scratch[7].r, ya.r) + S_MUL(scratch[8].r, yb.r); scratch[5].i = scratch[0].i + S_MUL(scratch[7].i, ya.r) + S_MUL(scratch[8].i, yb.r); scratch[6].r = S_MUL(scratch[10].i, ya.i) + S_MUL(scratch[9].i, yb.i); scratch[6].i = -S_MUL(scratch[10].r, ya.i) - S_MUL(scratch[9].r, yb.i); C_SUB(*Fout1, scratch[5], scratch[6]); C_ADD(*Fout4, scratch[5], scratch[6]); scratch[11].r = scratch[0].r + S_MUL(scratch[7].r, yb.r) + S_MUL(scratch[8].r, ya.r); scratch[11].i = scratch[0].i + S_MUL(scratch[7].i, yb.r) + S_MUL(scratch[8].i, ya.r); scratch[12].r = -S_MUL(scratch[10].i, yb.i) + S_MUL(scratch[9].i, ya.i); scratch[12].i = S_MUL(scratch[10].r, yb.i) - S_MUL(scratch[9].r, ya.i); C_ADD(*Fout2, scratch[11], scratch[12]); C_SUB(*Fout3, scratch[11], scratch[12]); ++Fout0; ++Fout1; ++Fout2; ++Fout3; ++Fout4; } } /* perform the butterfly for one stage of a mixed radix FFT */ static void kf_bfly_generic(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, int m, int p) { int u, k, q1, q; kiss_fft_cpx* twiddles = st->twiddles; kiss_fft_cpx t; int Norig = st->nfft; kiss_fft_cpx* scratch = (kiss_fft_cpx*)KISS_FFT_TMP_ALLOC(sizeof(kiss_fft_cpx) * p); for (u = 0; u < m; ++u) { k = u; for (q1 = 0; q1 < p; ++q1) { scratch[q1] = Fout[k]; C_FIXDIV(scratch[q1], p); k += m; } k = u; for (q1 = 0; q1 < p; ++q1) { int twidx = 0; Fout[k] = scratch[0]; for (q = 1; q < p; ++q) { twidx += fstride * k; if (twidx >= Norig) twidx -= Norig; C_MUL(t, scratch[q], twiddles[twidx]); C_ADDTO(Fout[k], t); } k += m; } } KISS_FFT_TMP_FREE(scratch); } static void kf_work(kiss_fft_cpx* Fout, const kiss_fft_cpx* f, const size_t fstride, int in_stride, int* factors, const kiss_fft_cfg st) { kiss_fft_cpx* Fout_beg = Fout; const int p = *factors++; /* the radix */ const int m = *factors++; /* stage's fft length/p */ const kiss_fft_cpx* Fout_end = Fout + p * m; #ifdef _OPENMP // use openmp extensions at the // top-level (not recursive) if (fstride == 1 && p <= 5 && m != 1) { int k; // execute the p different work units in different threads #pragma omp parallel for for (k = 0; k < p; ++k) kf_work(Fout + k * m, f + fstride * in_stride * k, fstride * p, in_stride, factors, st); // all threads have joined by this point switch (p) { case 2: kf_bfly2(Fout, fstride, st, m); break; case 3: kf_bfly3(Fout, fstride, st, m); break; case 4: kf_bfly4(Fout, fstride, st, m); break; case 5: kf_bfly5(Fout, fstride, st, m); break; default: kf_bfly_generic(Fout, fstride, st, m, p); break; } return; } #endif if (m == 1) { do { *Fout = *f; f += fstride * in_stride; } while (++Fout != Fout_end); } else { do { // recursive call: // DFT of size m*p performed by doing // p instances of smaller DFTs of size m, // each one takes a decimated version of the input kf_work(Fout, f, fstride * p, in_stride, factors, st); f += fstride * in_stride; } while ((Fout += m) != Fout_end); } Fout = Fout_beg; // recombine the p smaller DFTs switch (p) { case 2: kf_bfly2(Fout, fstride, st, m); break; case 3: kf_bfly3(Fout, fstride, st, m); break; case 4: kf_bfly4(Fout, fstride, st, m); break; case 5: kf_bfly5(Fout, fstride, st, m); break; default: kf_bfly_generic(Fout, fstride, st, m, p); break; } } /* facbuf is populated by p1,m1,p2,m2, ... where p[i] * m[i] = m[i-1] m0 = n */ static void kf_factor(int n, int* facbuf) { int p = 4; double floor_sqrt; floor_sqrt = floor(sqrt((double)n)); /*factor out powers of 4, powers of 2, then any remaining primes */ do { while (n % p) { switch (p) { case 4: p = 2; break; case 2: p = 3; break; default: p += 2; break; } if (p > floor_sqrt) p = n; /* no more factors, skip to end */ } n /= p; *facbuf++ = p; *facbuf++ = n; } while (n > 1); } /* * * User-callable function to allocate all necessary storage space for the fft. * * The return value is a contiguous block of memory, allocated with malloc. As such, * It can be freed with free(), rather than a kiss_fft-specific function. * */ kiss_fft_cfg kiss_fft_alloc(int nfft, int inverse_fft, void* mem, size_t* lenmem) { KISS_FFT_ALIGN_CHECK(mem) kiss_fft_cfg st = NULL; size_t memneeded = KISS_FFT_ALIGN_SIZE_UP( sizeof(struct kiss_fft_state) + sizeof(kiss_fft_cpx) * (nfft - 1)); /* twiddle factors*/ if (lenmem == NULL) { st = (kiss_fft_cfg)KISS_FFT_MALLOC(memneeded); } else { if (mem != NULL && *lenmem >= memneeded) st = (kiss_fft_cfg)mem; *lenmem = memneeded; } if (st) { int i; st->nfft = nfft; st->inverse = inverse_fft; for (i = 0; i < nfft; ++i) { const double pi = 3.141592653589793238462643383279502884197169399375105820974944; double phase = -2 * pi * i / nfft; if (st->inverse) phase *= -1; kf_cexp(st->twiddles + i, phase); } kf_factor(nfft, st->factors); } return st; } void kiss_fft_stride(kiss_fft_cfg st, const kiss_fft_cpx* fin, kiss_fft_cpx* fout, int in_stride) { if (fin == fout) { // NOTE: this is not really an in-place FFT algorithm. // It just performs an out-of-place FFT into a temp buffer kiss_fft_cpx* tmpbuf = (kiss_fft_cpx*)KISS_FFT_TMP_ALLOC(sizeof(kiss_fft_cpx) * st->nfft); kf_work(tmpbuf, fin, 1, in_stride, st->factors, st); memcpy(fout, tmpbuf, sizeof(kiss_fft_cpx) * st->nfft); KISS_FFT_TMP_FREE(tmpbuf); } else { kf_work(fout, fin, 1, in_stride, st->factors, st); } } void kiss_fft(kiss_fft_cfg cfg, const kiss_fft_cpx* fin, kiss_fft_cpx* fout) { kiss_fft_stride(cfg, fin, fout, 1); } void kiss_fft_cleanup(void) { // nothing needed any more } int kiss_fft_next_fast_size(int n) { while (1) { int m = n; while ((m % 2) == 0) m /= 2; while ((m % 3) == 0) m /= 3; while ((m % 5) == 0) m /= 5; if (m <= 1) break; /* n is completely factorable by twos, threes, and fives */ n++; } return n; }
compile.h
#ifndef _COMPILE_H_ #define _COMPILE_H_ #include "fsm.h" int print_diff(FILE *file, DiffList *list); int print_right_alg(FILE *file, AlgList *list, TokenNode *orderedlist); int print_eq(FILE *file, TokenNode *t, char *left_token_name); int print_if(FILE *file, IfList *list); void initial_values(xmlNode *root); int set_initial_value(char *name, double initvalue, char *units); // Fix duplicated algebraic variables void fix_duplicate_equations(xmlNode *root, AlgList *alg); void fix(xmlNode *root, AlgList *list); void change_duplicated_equations(int eqnumber, int bottom, int top, char *cname); void fix_node(TokenNode *node, char *oldname, char *newname); int print_if(FILE *file, IfList *list) { if (file == NULL) { printf("ERROR - Can't write in file, print_alg"); exit(1); } IfList *curl = rewind_if_list(list); TokenNode *cur = NULL; int i = 0; while (curl != NULL) { fprintf(file, "\n#define IFNUMBER_%d(name)", curl->ifheader->if_counter); fprintf(file, "if("); // printing the condition cur = curl->ifheader->cond; print_eq(file, cur, NULL); // printing if condition fprintf(file, ") { (name) = "); cur = curl->ifheader->piece; print_eq(file, cur, NULL); // printing the return of if fprintf(file, "; } "); while (curl->next != NULL && curl->next->ifheader->if_counter == curl->ifheader->if_counter) { curl = curl->next; fprintf(file, " else if("); cur = curl->ifheader->cond; print_eq(file, cur, NULL); // printing if condition fprintf(file, "){ (name) = "); cur = curl->ifheader->piece; print_eq(file, cur, NULL); // printing the return of if fprintf(file, ";"); fprintf(file, " }"); } fprintf(file, " else{"); fprintf(file, " (name) = "); cur = curl->ifheader->other; print_eq(file, cur, NULL); // printing the return of else fprintf(file, ";"); fprintf(file, " }"); i++; curl = curl->next; } return 0; } int print_diff(FILE *file, DiffList *list) { if (file == NULL) { printf("ERROR - Can't write in file, print_diff"); exit(1); } DiffList *curl = rewind_diff_list(list); TokenNode *cur = NULL; int count = 0; char tmp[2048]; while (curl != NULL) { fprintf(file, "\n rDY[%d] = ", count); sprintf(tmp, "rDY[%d]", count); cur = curl->diffheader->eq->next; print_eq(file, cur, tmp); fprintf(file, ";"); curl = curl->next; count++; } return 0; } int print_diff_cvode(FILE *file, DiffList *list) { if (file == NULL) { printf("ERROR - Can't write in file, print_diff"); exit(1); } DiffList *curl = rewind_diff_list(list); TokenNode *cur = NULL; int count = 0; char tmp[2048]; while (curl != NULL) { fprintf(file, "\n NV_Ith_S(rDY, %d) = ", count); sprintf(tmp, "NV_Ith_S(rDY, %d)", count); cur = curl->diffheader->eq->next; print_eq(file, cur, tmp); fprintf(file, ";"); curl = curl->next; count++; } return 0; } int print_eq(FILE *file, TokenNode *t, char *left_token_name) { if (file == NULL) { printf("ERROR - Can't write in file, print_alg"); exit(1); } TokenNode *cur = t; while (cur != NULL) { if (list_has_var(cur->token, algvarlist)) { fprintf(file, "calc_"); fprintf(file, "%s", cur->token.content); // 08/03/2008 fprintf(file, "()"); } else { if (cur->token.type == PI_W) { fprintf(file, "0.0f;\n"); fprintf(file, " IFNUMBER_%s(%s)", cur->token.content, left_token_name); } else { if (!strcmp(cur->token.content, difflist->diffheader->freevar.content)) fprintf(file, "%s_new", cur->token.content); else { if (!cur->token.isdiff || cur->token.type == CPAR || cur->token.type == OPAR) fprintf(file, "%s", cur->token.content); if (list_has_var(cur->token, difvarlist)) { if (!cur->token.isdiff) fprintf(file, "_old_"); else { DiffList *curdiff = rewind_diff_list(difflist); int found = 0; while (curdiff != NULL && !found) { if (!strcmp((const char *)curdiff->diffheader->diffvar.content, cur->token.content)) found = 1; else curdiff = curdiff->next; } if (found) { if(curdiff != NULL) { print_eq(file, curdiff->diffheader->eq->next, NULL); } } else { printf("ERROR - printfDiff Differential equation referenced, " "but not defined\n"); exit(1); } } } } } } cur = cur->next; } return 0; } void initial_values(xmlNode *root) { parvarlist = create_param_list(varlist, difvarlist, algvarlist); xmlNode *curmodel = root; xmlNode *curvar; while (curmodel != NULL) { // if the file is just a MathML not a CellML if (!strcmp((char *)curmodel, "math")) break; curmodel = get_component(curmodel); if (curmodel != NULL) { curvar = get_variable(curmodel); while (curvar != NULL) { char *name = ""; double initvalue = 0.0; char * units = ""; // looking for initial_values in the variable xmlAttr *attr = curvar->properties; while (attr != NULL) { if (!strcmp("initial_value", (char *)attr->name)) break; attr = attr->next; } // if a initial value was found if (attr != NULL) { initvalue = strtod((char *)xmlNodeGetContent((xmlNode *)attr), NULL); } // take the variable name attr = curvar->properties; while (attr != NULL) { if (!strcmp("name", (char *)attr->name)) break; attr = attr->next; } if (attr != NULL) { name = (char *)xmlNodeGetContent((xmlNode *)attr); } attr = curvar->properties; while (attr != NULL) { if (!strcmp("units", (char *)attr->name)) break; attr = attr->next; } if (attr != NULL) { units = (char *)xmlNodeGetContent((xmlNode *)attr); } set_initial_value(name, initvalue, units); curvar = get_variable(curvar); } } } } int set_initial_value(char *name, double initvalue, char *units) { // creating the param list TokenNode *curdif = rewind_token_list(difvarlist); int found = 0; // searching in difflist while ( (curdif != NULL) && (!found)) { if (!strcmp(strNoSpace(name), strNoSpace(curdif->token.content))) { curdif->units = units; if (initvalue != 0.0) { curdif->initialvalue = initvalue; } found = 1; } curdif = curdif->next; } TokenNode *curalg = rewind_token_list(algvarlist); // searching in alglist while ( (curalg != NULL) && (!found) ) { if (!strcmp(name, strNoSpace(curalg->token.content))) { curalg->units = units; if (initvalue != 0.0) curalg->initialvalue = initvalue; found = 1; } curalg = curalg->next; } TokenNode *curpar = rewind_token_list(parvarlist); // searching in parlist while ((curpar != NULL) && (!found)) { if (!strcmp(name, strNoSpace(curpar->token.content))) { curpar->units = units; if (initvalue != 0.0) curpar->initialvalue = initvalue; found = 1; } curpar = curpar->next; } if (!found) return 0; else return 1; } void fix_duplicate_equations(xmlNode *root, AlgList *alg) { xmlNode *curroot = root; TokenNode *list = NULL; AlgList *dpllist = NULL; AlgList *curalg = rewind_alg_list(alg); // creating a list of duplicated equations while (curalg != NULL) { Token t = curalg->eq->token; if (!list_has_var(t, list)) list = attach_token(t, list); else { dpllist = attach_alg_eq(NULL, dpllist); // PODE DAR PAU AKI dpllist->number = curalg->number; } curalg = curalg->next; } dpllist = rewind_alg_list(dpllist); fix(curroot, dpllist); } void fix(xmlNode *root, AlgList *list) { xmlNode *curroot = root; AlgList *dpllist = list; int counter = -1; // to count the equations // searching for the duplicated equation while ((curroot = get_component(curroot)) != NULL) { xmlNode *math_node = get_math(curroot); xmlNode *apply = NULL; if (math_node != NULL) apply = math_node->children; int component = counter; while (apply != NULL) { if (!strcmp((char *)apply->name, "apply")) counter++; apply = apply->next; } // getting the component name*/ xmlAttr *name = curroot->properties; char *cname = NULL; while (name != NULL) { if (!strcmp((char *)name->name, "name")) break; name = name->next; } if (name != NULL) { cname = (char *)xmlNodeGetContent((xmlNode *)name); } if (dpllist != NULL) while (counter >= dpllist->number && component < dpllist->number) { printf("A duplicated algebraic equation was found at component " "%s\nTrying to fix the problem\nPlease verify if the equations " "generated are correct\n", cname); change_duplicated_equations(dpllist->number, component + 1, counter, cname); dpllist = dpllist->next; if (dpllist == NULL) break; } } } void change_duplicated_equations(int eqnumber, int bottom, int top, char *cname) { AlgList *alg = rewind_alg_list(alglist); DiffList *diff = rewind_diff_list(difflist); char *oldname = NULL; while (alg != NULL) { if (alg->number == eqnumber) break; alg = alg->next; } if (alg != NULL) oldname = alg->eq->token.content; char *newname = (char *)calloc(strlen(oldname) + strlen(cname) + 13, sizeof(char)); sprintf(newname, "%s_duplicated_%s", oldname, cname); Token t; t.content = newname; algvarlist = attach_token(t, algvarlist); alg = rewind_alg_list(alglist); while (alg != NULL) { if (alg->number >= bottom && alg->number <= top) { TokenNode *cur = alg->eq; fix_node(cur, oldname, newname); } if (alg->number > top) break; alg = alg->next; } while (diff != NULL) { if (diff->number >= bottom && diff->number <= top) { TokenNode *cur = diff->diffheader->eq; fix_node(cur, oldname, newname); } if (diff->number > top) break; diff = diff->next; } } void fix_node(TokenNode *node, char *oldname, char *newname) { TokenNode *cur = node; while (cur != NULL) { if (cur->token.type == PI_W) { long num = strtol(cur->token.content, NULL, 10); IfList *ilist = rewind_if_list(iflist); int i; for (i = 0; i < num; i++) { ilist = ilist->next; } fix_node(ilist->ifheader->cond, oldname, newname); fix_node(ilist->ifheader->piece, oldname, newname); fix_node(ilist->ifheader->other, oldname, newname); } if (!strcmp(cur->token.content, oldname)) { cur->token.content = newname; } cur = cur->next; } } void generate_cpu_model(sds model_name) { if (eq_counter <= 0) { printf("ERROR - Equation not found\n"); exit(1); } preced_alg_list = create_preced_alg_list(rewind_alg_list(alglist)); AlgList *cural = NULL; TokenNode *resolved_dep_list = NULL; while (preced_alg_list != NULL) { cural = rewind_alg_list(preced_alg_list); while (cural != NULL) { cural = cural->next; } cural = rewind_alg_list(preced_alg_list); while (cural != NULL) { TokenNode *cureq = cural->eq->next; if (cureq == NULL) { resolved_dep_list = add_list(cural->eq->token, resolved_dep_list); preced_alg_list = delete_from_list(preced_alg_list, cural->eq->token); cural = cural->next; } else { while (cureq != NULL) { if (list_has_var(cureq->token, resolved_dep_list)) { if (cureq->prev != NULL) { cureq->prev->next = cureq->next; } if (cureq->next != NULL) { cureq->next->prev = cureq->prev; } } cureq = cureq->next; } cural = cural->next; } } } FILE *file; sds filename = sdsdup(model_name); filename = sdscat(filename, ".c"); sds headername = sdsdup(model_name); headername = sdscat(headername, ".h"); file = fopen(filename, "w"); fprintf(file, "#include \"%s\"\n", headername); fprintf(file, "#include <stdlib.h>\n"); fprintf(file, "real max_step;\n" "real min_step;\n" "real abstol;\n" "real reltol;\n" "bool adpt;\n" "real *ode_dt, *ode_previous_dt, *ode_time_new;\n\n"); fprintf(file, "GET_CELL_MODEL_DATA(init_cell_model_data) {\n" "\n" " if(get_initial_v)\n" " cell_model->initial_v = INITIAL_V;\n" " if(get_neq)\n" " cell_model->number_of_ode_equations = NEQ;\n" "}\n" "\n"); // SET INITIAL CONDITIONS CPU fprintf( file, "SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {\n" "\n" " log_to_stdout_and_file(\"Using %s CPU model\\n\");\n" "\n" " uint32_t num_cells = solver->original_num_cells;\n" " solver->sv = (real*)malloc(NEQ*num_cells*sizeof(real));\n" "\n" " max_step = solver->max_dt;\n" " min_step = solver->min_dt;\n" " abstol = solver->abs_tol;\n" " reltol = solver->rel_tol;\n" " adpt = solver->adaptive;\n" "\n" " if(adpt) {\n" " ode_dt = (real*)malloc(num_cells*sizeof(real));\n" "\n" " OMP(parallel for)\n" " for(int i = 0; i < num_cells; i++) {\n" " ode_dt[i] = solver->min_dt;\n" " }\n" "\n" " ode_previous_dt = (real*)calloc(num_cells, sizeof(real));\n" " ode_time_new = (real*)calloc(num_cells, sizeof(real));\n" " log_to_stdout_and_file(\"Using Adaptive Euler model to solve the ODEs\\n\");\n" " } else {\n" " log_to_stdout_and_file(\"Using Euler model to solve the ODEs\\n\");\n" " }\n" "\n" "\n" " OMP(parallel for)\n" " for(uint32_t i = 0; i < num_cells; i++) {\n\n" " real *sv = &solver->sv[i * NEQ];", model_name); fprintf(file, "\n\n"); TokenNode *cur = rewind_token_list(difvarlist); int counter = 0; while (cur != NULL) { fprintf(file, " sv[%d] = %e; //%s %s \n", counter, cur->initialvalue, cur->token.content, cur->units); cur = cur->next; counter++; } fprintf(file, " }\n}\n\n"); // SOLVE_MODEL_ODES fprintf(file, "SOLVE_MODEL_ODES(solve_model_odes_cpu) {\n" "\n" " uint32_t sv_id;\n" "\n" " size_t num_cells_to_solve = ode_solver->num_cells_to_solve;\n" " uint32_t * cells_to_solve = ode_solver->cells_to_solve;\n" " real *sv = ode_solver->sv;\n" " real dt = ode_solver->min_dt;\n" " uint32_t num_steps = ode_solver->num_steps;\n" "\n" " #pragma omp parallel for private(sv_id)\n" " for (u_int32_t i = 0; i < num_cells_to_solve; i++) {\n" "\n" " if(cells_to_solve)\n" " sv_id = cells_to_solve[i];\n" " else\n" " sv_id = i;\n" "\n" " if(adpt) {\n" "\n" " solve_forward_euler_cpu_adpt(sv + (sv_id * NEQ), stim_currents[i], current_t + dt, sv_id);\n" " }\n" " else {\n" " for (int j = 0; j < num_steps; ++j) {\n" " solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);\n" " }\n" "\n" " }\n" "\n" " }\n" "}\n\n"); // SOLVE MODEL ODE CPU fprintf(file,"void solve_model_ode_cpu(real dt, real *sv, real stim_current) {\n" "\n" " real rY[NEQ], rDY[NEQ];\n" "\n" " for(int i = 0; i < NEQ; i++)\n" " rY[i] = sv[i];\n" "\n" " RHS_cpu(rY, rDY, stim_current, dt);\n" "\n" " for(int i = 0; i < NEQ; i++)\n" " sv[i] = dt*rDY[i] + rY[i];\n" "}\n\n"); fprintf(file, "\n\nfloat __agos_factorial(int f){\n\tif(f>=0 && " "f<2)\n\t\treturn 1.0;\n\telse if(f < 0)\n\t\treturn " "0.0/0.0;\n\tfor(int i=f-1; i>=2; i--)\n\t\tf *= i;\n\treturn " "(float)f;\n}\n\n"); fprintf(file, "void solve_forward_euler_cpu_adpt(real *sv, real stim_curr, real final_time, int sv_id) {\n" "\n" " real rDY[NEQ];\n" "\n" " real _tolerances_[NEQ];\n" " real _aux_tol = 0.0;\n" " //initializes the variables\n" " real dt = ode_dt[sv_id];\n" " real time_new = ode_time_new[sv_id];\n" " real previous_dt = ode_previous_dt[sv_id];\n" "\n" " real edos_old_aux_[NEQ];\n" " real edos_new_euler_[NEQ];\n" " real *_k1__ = (real*) malloc(sizeof(real)*NEQ);\n" " real *_k2__ = (real*) malloc(sizeof(real)*NEQ);\n" " real *_k_aux__;\n" "\n" " const real _beta_safety_ = 0.8;\n" "\n" " const real __tiny_ = pow(abstol, 2.0f);\n" "\n" " if(time_new + dt > final_time) {\n" " dt = final_time - time_new;\n" " }\n" "\n" " RHS_cpu(sv, rDY, stim_curr, dt);\n" " time_new += dt;\n" "\n" " for(int i = 0; i < NEQ; i++){\n" " _k1__[i] = rDY[i];\n" " }\n" "\n" " int count = 0;\n" "\n" " int count_limit = (final_time - time_new)/min_step;\n" "\n" " int aux_count_limit = count_limit+2000000;\n" "\n" " if(aux_count_limit > 0) {\n" " count_limit = aux_count_limit;\n" " }\n" "\n" " while(1) {\n" "\n" " for(int i = 0; i < NEQ; i++) {\n" " //stores the old variables in a vector\n" " edos_old_aux_[i] = sv[i];\n" " //computes euler method\n" " edos_new_euler_[i] = _k1__[i] * dt + edos_old_aux_[i];\n" " //steps ahead to compute the rk2 method\n" " sv[i] = edos_new_euler_[i];\n" " }\n" "\n" " time_new += dt;\n" " RHS_cpu(sv, rDY, stim_curr, dt);\n" " time_new -= dt;//step back\n" "\n" " double greatestError = 0.0, auxError = 0.0;\n" " for(int i = 0; i < NEQ; i++) {\n" " // stores the new evaluation\n" " _k2__[i] = rDY[i];\n" " _aux_tol = fabs(edos_new_euler_[i]) * reltol;\n" " _tolerances_[i] = (abstol > _aux_tol) ? abstol : _aux_tol;\n" "\n" " // finds the greatest error between the steps\n" " auxError = fabs(((dt / 2.0) * (_k1__[i] - _k2__[i])) / _tolerances_[i]);\n" "\n" " greatestError = (auxError > greatestError) ? auxError : greatestError;\n" " }\n" " ///adapt the time step\n" " greatestError += __tiny_;\n" " previous_dt = dt;\n" " ///adapt the time step\n" " dt = _beta_safety_ * dt * sqrt(1.0f/greatestError);\n" "\n" " if (time_new + dt > final_time) {\n" " dt = final_time - time_new;\n" " }\n" "\n" " //it doesn't accept the solution\n" " if ( count < count_limit && (greatestError >= 1.0f)) {\n" " //restore the old values to do it again\n" " for(int i = 0; i < NEQ; i++) {\n" " sv[i] = edos_old_aux_[i];\n" " }\n" " count++;\n" " //throw the results away and compute again\n" " } else{//it accepts the solutions\n" " count = 0;\n" "\n" " if (dt < min_step) {\n" " dt = min_step;\n" " }\n" "\n" " else if (dt > max_step && max_step != 0) {\n" " dt = max_step;\n" " }\n" "\n" " if (time_new + dt > final_time) {\n" " dt = final_time - time_new;\n" " }\n" "\n" " _k_aux__ = _k2__;\n" " _k2__\t= _k1__;\n" " _k1__\t= _k_aux__;\n" "\n" " //it steps the method ahead, with euler solution\n" " for(int i = 0; i < NEQ; i++){\n" " sv[i] = edos_new_euler_[i];\n" " }\n" "\n" " if(time_new + previous_dt >= final_time) {\n" " if((fabs(final_time - time_new) < 1.0e-5)) {\n" " break;\n" " } else if(time_new < final_time) {\n" " dt = previous_dt = final_time - time_new;\n" " time_new += previous_dt;\n" " break;\n" " } else {\n" " dt = previous_dt = min_step;\n" " time_new += (final_time - time_new);\n" " printf(\"Error: %%lf\\n\", final_time - time_new);\n" " break;\n" " }\n" " } else {\n" " time_new += previous_dt;\n" " }\n" "\n" " }\n" " }\n" "\n" "\tode_dt[sv_id] = dt;\n" "\tode_time_new[sv_id] = time_new;\n" "\tode_previous_dt[sv_id] = previous_dt;\n" "\t\n" " free(_k1__);\n" " free(_k2__);\n" "}\n\b"); // RHS CPU fprintf(file, "void RHS_cpu(const real *sv, real *rDY, real stim_current, real dt) {\n\n"); fprintf(file, " //State variables\n"); cur = rewind_token_list(difvarlist); counter = 0; while (cur != NULL) { fprintf(file, " const real %s_old_ = sv[%d];\n", cur->token.content, counter); cur = cur->next; counter++; } fprintf(file, "\n"); fprintf(file, " #include \"%s_common.inc.c\"", model_name); fprintf(file, "\n\n}\n\n"); fclose(file); sds common_name = sdsdup(model_name); common_name = sdscat(common_name, "_common.inc.c"); file = fopen(common_name, "w"); print_if(file, iflist); fprintf(file, " //Parameters\n"); cur = rewind_token_list(parvarlist); while (cur != NULL) { fprintf(file, " const real %s = %.15e;\n", cur->token.content, cur->initialvalue); cur = cur->next; } print_right_alg(file, alglist, resolved_dep_list); print_diff(file, difflist); fclose(file); // HEADER FILE file = fopen(headername, "w"); sds model_upper = sdsdup(model_name); sdstoupper(model_upper); sdstoupper(model_name); fprintf(file, "#ifndef MONOALG3D_MODEL_%s_H\n", model_upper); fprintf(file, "#define MONOALG3D_MODEL_%s_H\n\n", model_upper); fprintf(file, "#include \"model_common.h\"\n\n"); cur = rewind_token_list(difvarlist); fprintf(file, "#define NEQ %d\n", counter); fprintf(file, "#define INITIAL_V (%lff)\n\n", cur->initialvalue); fprintf(file, "#ifdef __CUDACC__\n" "\n" "#include \"../gpu_utils/gpu_utils.h\"\n" "\n" "__global__ void kernel_set_model_initial_conditions(real *sv, int num_volumes);\n" "\n" "__global__ void solve_gpu(real cur_time, real dt, real *sv, real* stim_currents,\n" " uint32_t *cells_to_solve, uint32_t num_cells_to_solve,\n" " int num_steps);\n" "\n" "inline __device__ void RHS_gpu(real *sv, real *rDY, real stim_current, int thread_id, real dt);\n" "inline __device__ void solve_forward_euler_gpu_adpt(real *sv, real stim_curr, real final_time, int thread_id);\n" "\n" "#endif\n" "\n" "void RHS_cpu(const real *sv, real *rDY, real stim_current, real dt);\n" "inline void solve_forward_euler_cpu_adpt(real *sv, real stim_curr, real final_time, int thread_id);\n" "\n" "void solve_model_ode_cpu(real dt, real *sv, real stim_current);\n\n"); fprintf(file, "#endif //MONOALG3D_MODEL_%s_H\n\n", model_upper); fclose(file); sdsfree(model_upper); sdsfree(headername); sdsfree(filename); } void generate_gpu_model(sds model_name) { if (eq_counter <= 0) { printf("ERROR - Equation not found\n"); exit(1); } preced_alg_list = create_preced_alg_list(rewind_alg_list(alglist)); AlgList *cural; TokenNode *resolved_dep_list = NULL; while (preced_alg_list != NULL) { cural = rewind_alg_list(preced_alg_list); while (cural != NULL) { cural = cural->next; } cural = rewind_alg_list(preced_alg_list); while (cural != NULL) { TokenNode *cureq = cural->eq->next; if (cureq == NULL) { resolved_dep_list = add_list(cural->eq->token, resolved_dep_list); preced_alg_list = delete_from_list(preced_alg_list, cural->eq->token); cural = cural->next; } else { while (cureq != NULL) { if (list_has_var(cureq->token, resolved_dep_list)) { if (cureq->prev != NULL) { cureq->prev->next = cureq->next; } if (cureq->next != NULL) { cureq->next->prev = cureq->prev; } } cureq = cureq->next; } cural = cural->next; } } } FILE *file; sds filename = sdsdup(model_name); filename = sdscat(filename, ".cu"); sds headername = sdsdup(model_name); headername = sdscat(headername, ".h"); file = fopen(filename, "w"); fprintf(file, "#include \"%s\"\n", headername); fprintf(file, "#include <stddef.h>\n" "#include <stdint.h>\n" "\n" "__constant__ size_t pitch;\n" "__constant__ real abstol;\n" "__constant__ real reltol;\n" "__constant__ real max_dt;\n" "__constant__ real min_dt;\n" "__constant__ uint8_t use_adpt;\n" "size_t pitch_h;" "\n\n"); // SET_ODE_INITIAL_CONDITIONS_GPU fprintf(file, "extern \"C\" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {\n" "\n" " uint8_t use_adpt_h = (uint8_t)solver->adaptive;\n" "\n" " check_cuda_error(cudaMemcpyToSymbol(use_adpt, &use_adpt_h, sizeof(uint8_t)));\n" " log_to_stdout_and_file(\"Using %s GPU model\\n\");\n" "\n" " uint32_t num_volumes = solver->original_num_cells;\n" "\n" " if(use_adpt_h) {\n" " real reltol_h = solver->rel_tol;\n" " real abstol_h = solver->abs_tol;\n" " real max_dt_h = solver->max_dt;\n" " real min_dt_h = solver->min_dt;\n" "\n" " check_cuda_error(cudaMemcpyToSymbol(reltol, &reltol_h, sizeof(real)));\n" " check_cuda_error(cudaMemcpyToSymbol(abstol, &abstol_h, sizeof(real)));\n" " check_cuda_error(cudaMemcpyToSymbol(max_dt, &max_dt_h, sizeof(real)));\n" " check_cuda_error(cudaMemcpyToSymbol(min_dt, &min_dt_h, sizeof(real)));\n" " log_to_stdout_and_file(\"Using Adaptive Euler model to solve the ODEs\\n\");\n" " } else {\n" " log_to_stdout_and_file(\"Using Euler model to solve the ODEs\\n\");\n" " }\n" "\n" " // execution configuration\n" " const int GRID = (num_volumes + BLOCK_SIZE - 1) / BLOCK_SIZE;\n" "\n" " size_t size = num_volumes * sizeof(real);\n" "\n" " if(use_adpt_h)\n" " check_cuda_error(cudaMallocPitch((void **)&(solver->sv), &pitch_h, size, (size_t)NEQ + 3));\n" " else\n" " check_cuda_error(cudaMallocPitch((void **)&(solver->sv), &pitch_h, size, (size_t)NEQ));\n" "\n" " check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));\n" "\n" " kernel_set_model_initial_conditions<<<GRID, BLOCK_SIZE>>>(solver->sv, num_volumes);\n" "\n" " check_cuda_error(cudaPeekAtLastError());\n" " cudaDeviceSynchronize();\n" " return pitch_h;\n" "}\n\n", model_name); fprintf(file, "extern \"C\" SOLVE_MODEL_ODES(solve_model_odes_gpu) {\n" "\n" " size_t num_cells_to_solve = ode_solver->num_cells_to_solve;\n" " uint32_t * cells_to_solve = ode_solver->cells_to_solve;\n" " real *sv = ode_solver->sv;\n" " real dt = ode_solver->min_dt;\n" " uint32_t num_steps = ode_solver->num_steps;\n" "\n" " // execution configuration\n" " const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1) / BLOCK_SIZE;\n" "\n" " size_t stim_currents_size = sizeof(real) * num_cells_to_solve;\n" " size_t cells_to_solve_size = sizeof(uint32_t) * num_cells_to_solve;\n" "\n" " real *stims_currents_device;\n" " check_cuda_error(cudaMalloc((void **)&stims_currents_device, stim_currents_size));\n" " check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));\n" "\n" " // the array cells to solve is passed when we are using and adapative mesh\n" " uint32_t *cells_to_solve_device = NULL;\n" " if(cells_to_solve != NULL) {\n" " check_cuda_error(cudaMalloc((void **)&cells_to_solve_device, cells_to_solve_size));\n" " check_cuda_error(\n" " cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));\n" " }\n" "\n" " solve_gpu<<<GRID, BLOCK_SIZE>>>(current_t, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve,\n" " num_steps);\n" "\n" " check_cuda_error(cudaPeekAtLastError());\n" "\n" " check_cuda_error(cudaFree(stims_currents_device));\n" " if(cells_to_solve_device)\n" " check_cuda_error(cudaFree(cells_to_solve_device));\n" "}\n\n"); fprintf(file, "__global__ void kernel_set_model_initial_conditions(real *sv, int num_volumes) {\n" " int threadID = blockDim.x * blockIdx.x + threadIdx.x;\n" "\n" " if (threadID < num_volumes) {\n\n"); TokenNode *cur = rewind_token_list(difvarlist); int counter = 0; while (cur != NULL) { fprintf(file, " *((real * )((char *) sv + pitch * %d) + threadID) = " "%e; //%s %s \n", counter, cur->initialvalue, cur->token.content, cur->units); cur = cur->next; counter++; } fprintf(file, " if(use_adpt) {\n" " *((real *)((char *)sv + pitch * %d) + threadID) = min_dt; // dt\n" " *((real *)((char *)sv + pitch * %d) + threadID) = 0.0; // time_new\n" " *((real *)((char *)sv + pitch * %d) + threadID) = 0.0; // previous dt\n" " }\n", counter, counter+1, counter+2); fprintf(file, " }\n" "}\n\n"); // SOLVE_MODEL_ODES_GPU fprintf(file, "// Solving the model for each cell in the tissue matrix ni x nj\n" "__global__ void solve_gpu(real cur_time, real dt, real *sv, real *stim_currents, uint32_t *cells_to_solve,\n" " uint32_t num_cells_to_solve, int num_steps) {\n" " int threadID = blockDim.x * blockIdx.x + threadIdx.x;\n" " int sv_id;\n" "\n" " // Each thread solves one cell model\n" " if(threadID < num_cells_to_solve) {\n" " if(cells_to_solve)\n" " sv_id = cells_to_solve[threadID];\n" " else\n" " sv_id = threadID;\n" "\n" " if(!use_adpt) {\n" " real rDY[NEQ];\n" "\n" " for(int n = 0; n < num_steps; ++n) {\n" "\n" " RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);\n" "\n" " for(int i = 0; i < NEQ; i++) {\n" " *((real *)((char *)sv + pitch * i) + sv_id) =\n" " dt * rDY[i] + *((real *)((char *)sv + pitch * i) + sv_id);\n" " }\n" " }\n" " } else {\n" " solve_forward_euler_gpu_adpt(sv, stim_currents[threadID], cur_time + max_dt, sv_id);\n" " }\n" " }\n" "}\n\n"); fprintf(file, "inline __device__ void solve_forward_euler_gpu_adpt(real *sv, real stim_curr, real final_time, int thread_id) {\n" "\n" " #define DT *((real *)((char *)sv + pitch * %d) + thread_id)\n" " #define TIME_NEW *((real *)((char *)sv + pitch * %d) + thread_id)\n" " #define PREVIOUS_DT *((real *)((char *)sv + pitch * %d) + thread_id)\n" "\n" " real rDY[NEQ];\n" "\n" " real _tolerances_[NEQ];\n" " real _aux_tol = 0.0;\n" " real dt = DT;\n" " real time_new = TIME_NEW;\n" " real previous_dt = PREVIOUS_DT;\n" "\n" " real edos_old_aux_[NEQ];\n" " real edos_new_euler_[NEQ];\n" " real _k1__[NEQ];\n" " real _k2__[NEQ];\n" " real _k_aux__[NEQ];\n" " real sv_local[NEQ];\n" "\n" " const real _beta_safety_ = 0.8;\n" "\n" " const real __tiny_ = pow(abstol, 2.0f);\n" "\n" " // dt = ((time_new + dt) > final_time) ? (final_time - time_new) : dt;\n" " if(time_new + dt > final_time) {\n" " dt = final_time - time_new;\n" " }\n" "\n" " //#pragma unroll\n" " for(int i = 0; i < NEQ; i++) {\n" " sv_local[i] = *((real *)((char *)sv + pitch * i) + thread_id);\n" " }\n" "\n" " RHS_gpu(sv_local, rDY, stim_curr, thread_id, dt);\n" " time_new += dt;\n" "\n" " //#pragma unroll\n" " for(int i = 0; i < NEQ; i++) {\n" " _k1__[i] = rDY[i];\n" " }\n" "\n" " int count = 0;\n" "\n" " int count_limit = (final_time - time_new) / min_dt;\n" "\n" " int aux_count_limit = count_limit + 2000000;\n" "\n" " if(aux_count_limit > 0) {\n" " count_limit = aux_count_limit;\n" " }\n" "\n" " while(1) {\n" "\n" " for(int i = 0; i < NEQ; i++) {\n" " // stores the old variables in a vector\n" " edos_old_aux_[i] = sv_local[i];\n" " // //computes euler method\n" " edos_new_euler_[i] = _k1__[i] * dt + edos_old_aux_[i];\n" " // steps ahead to compute the rk2 method\n" " sv_local[i] = edos_new_euler_[i];\n" " }\n" "\n" " time_new += dt;\n" "\n" " RHS_gpu(sv_local, rDY, stim_curr, thread_id, dt);\n" " time_new -= dt; // step back\n" "\n" " real greatestError = 0.0, auxError = 0.0;\n" " for(int i = 0; i < NEQ; i++) {\n" "\n" " // stores the new evaluation\n" " _k2__[i] = rDY[i];\n" " _aux_tol = fabs(edos_new_euler_[i]) * reltol;\n" " _tolerances_[i] = (abstol > _aux_tol) ? abstol : _aux_tol;\n" "\n" " // finds the greatest error between the steps\n" " auxError = fabs(((dt / 2.0) * (_k1__[i] - _k2__[i])) / _tolerances_[i]);\n" "\n" " greatestError = (auxError > greatestError) ? auxError : greatestError;\n" " }\n" "\n" " /// adapt the time step\n" " greatestError += __tiny_;\n" " previous_dt = dt;\n" " /// adapt the time step\n" " dt = _beta_safety_ * dt * sqrt(1.0f / greatestError);\n" "\n" " if(time_new + dt > final_time) {\n" " dt = final_time - time_new;\n" " }\n" "\n" " // it doesn't accept the solution\n" " if(count < count_limit && (greatestError >= 1.0f)) {\n" " // restore the old values to do it again\n" " for(int i = 0; i < NEQ; i++) {\n" " sv_local[i] = edos_old_aux_[i];\n" " }\n" " count++;\n" " // throw the results away and compute again\n" " } else {\n" " count = 0;\n" "\n" " if(dt < min_dt) {\n" " dt = min_dt;\n" " }\n" "\n" " else if(dt > max_dt && max_dt != 0) {\n" " dt = max_dt;\n" " }\n" "\n" " if(time_new + dt > final_time) {\n" " dt = final_time - time_new;\n" " }\n" "\n" " // change vectors k1 e k2 , para que k2 seja aproveitado como k1 na proxima iteração\n" " for(int i = 0; i < NEQ; i++) {\n" " _k_aux__[i] = _k2__[i];\n" " _k2__[i] = _k1__[i];\n" " _k1__[i] = _k_aux__[i];\n" " }\n" "\n" " // it steps the method ahead, with euler solution\n" " for(int i = 0; i < NEQ; i++) {\n" " sv_local[i] = edos_new_euler_[i];\n" " }\n" "\n" " if(time_new + previous_dt >= final_time) {\n" " if((fabs(final_time - time_new) < 1.0e-5)) {\n" " break;\n" " } else if(time_new < final_time) {\n" " dt = previous_dt = final_time - time_new;\n" " time_new += previous_dt;\n" " break;\n" " } else {\n" " dt = previous_dt = min_dt;\n" " time_new += (final_time - time_new);\n" " printf(\"Error: %%d: %%lf\\n\", thread_id, final_time - time_new);\n" " break;\n" " }\n" " } else {\n" " time_new += previous_dt;\n" " }\n" " }\n" " }\n" "\n" " //#pragma unroll\n" " for(int i = 0; i < NEQ; i++) {\n" " *((real *)((char *)sv + pitch * i) + thread_id) = sv_local[i];\n" " }\n" "\n" " DT = dt;\n" " TIME_NEW = time_new;\n" " PREVIOUS_DT = previous_dt;\n" "}\n\n", counter, counter+1, counter+2); // RHS CPU fprintf(file, "\n\ninline __device__ void RHS_gpu(real *sv, real *rDY, real stim_current, int thread_id, real dt) {\n\n"); fprintf(file, " //State variables\n"); cur = rewind_token_list(difvarlist); counter = 0; while (cur != NULL) { fprintf(file, " real %s_old_; //%s\n", cur->token.content, cur->units); cur = cur->next; counter++; } fprintf(file, "\n\n if(use_adpt) {\n"); cur = rewind_token_list(difvarlist); counter = 0; while (cur != NULL) { fprintf(file, " %s_old_ = sv[%d];\n", cur->token.content, counter); cur = cur->next; counter++; } fprintf(file, " } else {\n"); cur = rewind_token_list(difvarlist); counter = 0; while (cur != NULL) { fprintf(file, " %s_old_ = *((real*)((char*)sv + pitch * %d) + " "thread_id);\n", cur->token.content, counter); cur = cur->next; counter++; } fprintf(file, " }\n\n"); fprintf(file, " #include \"%s_common.inc.c\"", model_name); fprintf(file, "\n\n}\n\n"); fclose(file); sdsfree(headername); sdsfree(filename); } static void generate_c_solver(sds model_name) { preced_alg_list = create_preced_alg_list(rewind_alg_list(alglist)); AlgList *cural; TokenNode *resolved_dep_list = NULL; while (preced_alg_list != NULL) { cural = rewind_alg_list(preced_alg_list); while (cural != NULL) { cural = cural->next; } cural = rewind_alg_list(preced_alg_list); while (cural != NULL) { TokenNode *cureq = cural->eq->next; if (cureq == NULL) { resolved_dep_list = add_list(cural->eq->token, resolved_dep_list); preced_alg_list = delete_from_list(preced_alg_list, cural->eq->token); cural = cural->next; } else { while (cureq != NULL) { if (list_has_var(cureq->token, resolved_dep_list)) { if (cureq->prev != NULL) { cureq->prev->next = cureq->next; } if (cureq->next != NULL) { cureq->next->prev = cureq->prev; } } cureq = cureq->next; } cural = cural->next; } } } FILE *file; sds filename = sdsdup(model_name); filename = sdscat(filename, "_single_cell_solver.c"); file = fopen(filename, "w"); fprintf(file, "#include <cvode/cvode.h>\n" "#include <math.h>\n" "#include <nvector/nvector_serial.h>\n" "#include <stdbool.h>\n" "#include <stdio.h>\n" "#include <stdlib.h>\n" "#include <sundials/sundials_dense.h>\n" "#include <sundials/sundials_types.h>\n" "#include <sunlinsol/sunlinsol_dense.h> \n" "#include <sunmatrix/sunmatrix_dense.h>" " \n\n"); // SET INITIAL CONDITIONS CPU fprintf(file, "void set_initial_conditions(N_Vector x0) { \n\n"); TokenNode *cur = rewind_token_list(difvarlist); int counter = 0; while (cur != NULL) { fprintf(file, " \tNV_Ith_S(x0, %d) = %ef; //%s %s \n", counter, cur->initialvalue, cur->token.content, cur->units); cur = cur->next; counter++; } fprintf(file, "}\n\n"); fprintf(file, "#define NEQ %d\n" "typedef realtype real;\n" "\n" "\n", counter); print_if(file, iflist); // RHS CPU fprintf(file, "\n\n static int %s(realtype time_new, N_Vector sv, N_Vector rDY, void *f_data) {\n\n", model_name); fprintf(file, " //State variables\n"); cur = rewind_token_list(difvarlist); counter = 0; while (cur != NULL) { fprintf(file, " const real %s_old_ = NV_Ith_S(sv, %d);\n", cur->token.content, counter); cur = cur->next; counter++; } fprintf(file, "\n"); fprintf(file, " //Parameters\n"); cur = rewind_token_list(parvarlist); while (cur != NULL) { fprintf(file, " const real %s = %.15ef;\n", cur->token.content, cur->initialvalue); cur = cur->next; } print_right_alg(file, alglist, resolved_dep_list); print_diff_cvode(file, difflist); fprintf(file, "\n\treturn 0; \n\n}\n\n"); fprintf(file, "static int check_flag(void *flagvalue, const char *funcname, int opt) {\n" "\n" " int *errflag;\n" "\n" " /* Check if SUNDIALS function returned NULL pointer - no memory allocated */\n" " if(opt == 0 && flagvalue == NULL) {\n" "\n" " fprintf(stderr, \"\\nSUNDIALS_ERROR: %%s() failed - returned NULL pointer\\n\\n\", funcname);\n" " return (1);\n" " }\n" "\n" " /* Check if flag < 0 */\n" " else if(opt == 1) {\n" " errflag = (int *)flagvalue;\n" " if(*errflag < 0) {\n" " fprintf(stderr, \"\\nSUNDIALS_ERROR: %%s() failed with flag = %%d\\n\\n\", funcname, *errflag);\n" " return (1);\n" " }\n" " }\n" "\n" " /* Check if function returned NULL pointer - no memory allocated */\n" " else if(opt == 2 && flagvalue == NULL) {\n" " fprintf(stderr, \"\\nMEMORY_ERROR: %%s() failed - returned NULL pointer\\n\\n\", funcname);\n" " return (1);\n" " }\n" "\n" " return (0);\n" "}\n"); fprintf(file, "void solve_ode(N_Vector y, float final_t) {\n" "\n" " void *cvode_mem = NULL;\n" " int flag;\n" "\n" " // Set up solver\n" " cvode_mem = CVodeCreate(CV_BDF);\n" "\n" " if(cvode_mem == 0) {\n" " fprintf(stderr, \"Error in CVodeMalloc: could not allocate\\n\");\n" " return;\n" " }\n" "\n" " flag = CVodeInit(cvode_mem, %s, 0, y);\n" " if(check_flag(&flag, \"CVodeInit\", 1))\n" " return;\n" "\n" " flag = CVodeSStolerances(cvode_mem, 1.49012e-6, 1.49012e-6);\n" " if(check_flag(&flag, \"CVodeSStolerances\", 1))\n" " return;\n" "\n" " /* Provide RHS flag as user data which can be access in user provided routines */\n" " // flag = CVodeSetUserData(cvode_mem, (void *)&params);\n" " //if(check_flag(&flag, \"CVodeSetUserData\", 1))\n" " // return;\n" "\n" " // Create dense SUNMatrix for use in linear solver\n" " SUNMatrix A = SUNDenseMatrix(NEQ, NEQ);\n" " if(check_flag((void *)A, \"SUNDenseMatrix\", 0))\n" " return;\n" "\n" " // Create dense linear solver for use by CVode\n" " SUNLinearSolver LS = SUNLinSol_Dense(y, A);\n" " if(check_flag((void *)LS, \"SUNLinSol_Dense\", 0))\n" " return;\n" "\n" " // Attach the linear solver and matrix to CVode by calling CVodeSetLinearSolver\n" " flag = CVodeSetLinearSolver(cvode_mem, LS, A);\n" " if(check_flag((void *)&flag, \"CVodeSetLinearSolver\", 1))\n" " return;\n" "\n" "\trealtype dt=0.01;\n" " realtype tout = dt;\n" " int retval;\n" " realtype t;\n" "\n" "\tFILE *f = fopen(\"out.txt\", \"w\");\n" "\n" "\twhile(tout < final_t) {\n" "\n" "\t\tretval = CVode(cvode_mem, tout, y, &t, CV_NORMAL);\n" "\n" "\t\tif(retval == CV_SUCCESS) {\t\t\t\n" "\t fprintf(f, \"%%lf \", t);\n" "\t\t\tfor(int i = 0; i < NEQ; i++) {\n" "\t \tfprintf(f, \"%%lf \", NV_Ith_S(y,i));\n" "\t\t\t} \n" "\t \n" "\t\t\tfprintf(f, \"\\n\");\n" "\n" "\t\t\ttout+=dt;\n" "\t\t}\n" "\n" "\t}\n" "\n" " // Free the linear solver memory\n" " SUNLinSolFree(LS);\n" " SUNMatDestroy(A);\n" " CVodeFree(&cvode_mem);\n" "}\n", model_name); fprintf(file, "\nint main(int argc, char **argv) {\n" "\n" "\tN_Vector x0 = N_VNew_Serial(NEQ);\n" "\n" "\tset_initial_conditions(x0);\n" "\n" "\tsolve_ode(x0, strtod(argv[1], NULL));\n" "\n" "\n" "\treturn (0);\n" "}"); fclose(file); printf("\n[INFO] To compile the single cell solver:\n"); printf("[INFO] gcc %s -o model -lm -lsundials_cvode\n\n", filename); printf("[INFO] To run the single cell solver\n"); printf("[INFO] ./model final_time.\n"); printf("[INFO] Ex: ./model 1000 will run a simulations with 1000 ms\n\n"); printf("[INFO] The model state variables will be saved in a file named out.txt\n"); printf("[INFO] You can use gnuplot to plot the results: \n"); printf("[INFO] gnuplot> plot 'out.txt' u 1:2 w lines \n\n"); sdsfree(filename); } int print_right_alg(FILE *file, AlgList *list, TokenNode *orderedlist) { if (file == NULL) { printf("ERROR - Can't write in file, print_alg"); exit(1); } TokenNode *curl = rewind_token_list(orderedlist); TokenNode *cur = NULL; AlgList *curalg = NULL; while (curl != NULL) { curalg = rewind_alg_list(list); while (strcmp(curalg->eq->token.content, curl->token.content) != 0) { curalg = curalg->next; } fprintf(file, "\n real calc_%s = ", curalg->eq->token.content); cur = curalg->eq; cur = cur->next->next; // if (strcmp(curalg->eq->token.content, (const char *)"i_Stim") == 0) { // fprintf(file, "stim_current"); // } else { //HACK: we should do this better :) sds tmp = sdsnew(""); tmp = sdscatfmt(tmp, "calc_%s", curalg->eq->token.content); print_eq(file, cur, tmp); // } fprintf(file, ";\t//%d", curalg->number); curl = curl->next; } fprintf(file, "\n"); return 0; } #endif //_COMPILE_H_
GB_unaryop__identity_uint32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint32_int64 // op(A') function: GB_tran__identity_uint32_int64 // C type: uint32_t // A type: int64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint32_int64 ( uint32_t *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kmeans.h
#ifndef KMEANS_H #define KMEANS_H vector<vector<int>> parallel_average_face(vector<vector<int>>& image_pixel,vector<string>& img_info, vector<string>& avg_face_info,int THREADS) { int num_subjects = int(img_info.size()/IMAGES_PER_SUBJECT); vector<int> start_index(num_subjects); vector<int> end_index(num_subjects); vector<vector<int>> avg_face; avg_face.resize(num_subjects,vector<int>(image_pixel[0].size())); avg_face_info.resize(num_subjects); string str = "avg_face_parallel/"; // #pragma omp parallel // { // #pragma omp nowait #pragma omp parallel for num_threads(THREADS) for(int i=0;i<num_subjects;i++){ avg_face_info[i] = img_info[IMAGES_PER_SUBJECT*i]; start_index[i] = IMAGES_PER_SUBJECT * i; end_index[i] = IMAGES_PER_SUBJECT * (i+1) - 1 ; #pragma omp collaspe(2) nowait for(int j= start_index[i];j<=end_index[i]; j++){ for (int k=0;k<image_pixel[0].size();k++) avg_face[i][k] += image_pixel[j][k] ; } } #pragma omp parallel for num_threads(THREADS) for(int i=0;i<avg_face.size();i++){ for (int k=0;k<avg_face[0].size();k++) avg_face[i][k] /= IMAGES_PER_SUBJECT; // print_image(avg_face[i],str+to_string(i),width,height); } return avg_face; } void initialize_centroids(vector<vector<int>>& image_pixel,vector<string> &img_info, vector<vector<int>>&centroids,int THREADS) { srand(10); //to initialize with avg face vector<string> avg_face_info; vector<vector<int>> avg_face = parallel_average_face(image_pixel,img_info,avg_face_info,THREADS); #pragma omp parallel for num_threads(THREADS) for (int i = 0; i < centroids.size(); i++){ //to initialize with avg face centroids[i] = avg_face[i]; //random initialization // for(int j = 0; j < centroids[0].size(); j++){ // centroids[i][j] = abs(rand())%256; // } } } void cluster_assignment(vector<vector<int>>& image_pixel, vector<string> &img_info, vector<vector<int>> &centroids, vector<int> &centroids_datapoint_map, int dist_criteria, int THREADS) { #pragma omp parallel for num_threads(THREADS) for(int i = 0; i < image_pixel.size(); i++){ double min_distance = INT_MAX*1.0,temp = 0.0; int closest_centroid = -1; for(int j = 0; j < centroids.size(); j++){ if (dist_criteria == euc) temp = euclidean_distance(centroids[j], image_pixel[i], THREADS); else if (dist_criteria == man) temp = manhattan_distance(centroids[j], image_pixel[i], THREADS); else if (dist_criteria == che) temp = chebyshev_distance(centroids[j], image_pixel[i], THREADS); else if (dist_criteria == hel) temp = hellinger_distance(centroids[j], image_pixel[i], THREADS); if(temp<min_distance){ min_distance = temp; closest_centroid = j; } } centroids_datapoint_map[i] = closest_centroid; } } double move_centroid(vector<vector<int>>& image_pixel, vector<string> &img_info, vector<vector<int>> &centroids, vector<int> &centroids_datapoint_map, int dist_criteria, int THREADS) { double movement = 0.0; #pragma omp parallel for num_threads(THREADS) reduction(+:movement) for(int i = 0;i < centroids.size(); i++){ vector<int> temp(centroids[0].size(),0); int count = 0; for(int j = 0; j < centroids_datapoint_map.size(); j++){ if(centroids_datapoint_map[j] == i){ count++; for(int k = 0; k < temp.size(); k++){ temp[k] += image_pixel[j][k]; } } } if(count>0){ for(int k = 0; k < temp.size(); k++){ temp[k] /= count; } if (dist_criteria == euc) movement += euclidean_distance(centroids[i], temp, THREADS); else if (dist_criteria == man) movement += manhattan_distance(centroids[i], temp, THREADS); else if (dist_criteria == che) movement += chebyshev_distance(centroids[i], temp, THREADS); else if (dist_criteria == hel) movement += hellinger_distance(centroids[i], temp, THREADS); centroids[i] = temp; } temp.clear(); } return movement; } void assign_centroids_label(vector<string> &img_info,vector<int> &centroids_datapoint_map,vector<string> &centroids_label, int THREADS){ #pragma omp parallel for num_threads(THREADS) for(int i = 0; i < centroids_label.size(); i++){ vector<int> vote(img_info.size()/IMAGES_PER_SUBJECT,0); for(int j = 0; j < centroids_datapoint_map.size(); j++){ if(centroids_datapoint_map[j]==i){ string temp = img_info[j]; temp.erase(temp.begin()); int index = atoi(temp.c_str()) - 1; vote[index] += 1; } } int max_vote = INT_MIN; string prediction = ""; for (int j = 0; j < vote.size(); j++){ if (vote[j] > max_vote){ max_vote = vote[j]; ostringstream str2; str2 << (j + 1); string sNew = str2.str(); prediction = "S" + sNew; } } centroids_label[i] = prediction; } } void prediction(vector<vector<int>> &test_images,vector<string> &test_image_info,vector<vector<int>>& centroids, vector<string>& centroids_label,vector<string>& predicted_image_info,int dist_criteria,int THREADS) { #pragma omp parallel for num_threads(THREADS) for(int i = 0; i < test_images.size(); i++){ double min_distane = INT_MAX*1.0,temp = 0.0; int closest_centroid = -1; for(int j = 0; j < centroids.size(); j++){ if (dist_criteria == euc) temp = euclidean_distance(centroids[j], test_images[i], THREADS); else if (dist_criteria == man) temp = manhattan_distance(centroids[j], test_images[i], THREADS); else if (dist_criteria == che) temp = chebyshev_distance(centroids[j], test_images[i], THREADS); else if (dist_criteria == hel) temp = hellinger_distance(centroids[j], test_images[i], THREADS); if(temp<min_distane){ closest_centroid = j; min_distane = temp; } } predicted_image_info[i] = centroids_label[closest_centroid]; } } void kmeans(vector<vector<int>> &image_pixel, vector<string> &img_info, int k, int width, int height, int dist_m, int THREADS,vector<string>& predicted_image_info,vector<vector<int>>& test_images, vector<string>& test_image_info) { double movement_threshold = 0.001, movement = 1.0*INT_MAX; int max_iterations = 10,iterations = 0; vector<vector<int>> centroids(k,vector<int>(image_pixel[0].size(),0)); vector<string> centroids_label(k," "); vector<int> centroids_datapoint_map(image_pixel.size(),0); //initialize centroids initialize_centroids(image_pixel,img_info,centroids,THREADS); while(movement > movement_threshold && iterations < max_iterations){ //cluster assignment cluster_assignment(image_pixel,img_info,centroids,centroids_datapoint_map,dist_m,THREADS); //move centroid movement = move_centroid(image_pixel,img_info,centroids,centroids_datapoint_map,dist_m,THREADS); cout<<"Movement: "<<movement<<"\n"; } assign_centroids_label(img_info,centroids_datapoint_map,centroids_label,THREADS); prediction(test_images,test_image_info,centroids,centroids_label,predicted_image_info,dist_m,THREADS); } #endif
rt_dswptr.c
#include "runtime.h" void RT_CORE_dswptr_ontile(Quark *quark, Quark_Task_Flags *task_flags, PLASMA_desc descA, double *Aij, int i1, int i2, const int *ipiv, int inc, const double *Akk, int ldak) { plasma_context_t *plasma; plasma = plasma_context_self(); if (plasma->runtime == PLASMA_QUARK) { QUARK_CORE_dswptr_ontile( quark, task_flags, descA, Aij, i1, i2, ipiv, inc, Akk, ldak); } else if (plasma->runtime == PLASMA_OMPSS) { #pragma omp target device (smp) copy_deps #pragma omp task inout([1]Aij) in([(i2-i1+1)*abs(inc)]ipiv, [1]Akk) label(dswptr_ontile) CORE_dswptr_ontile(descA, i1, i2, ipiv, inc, Akk, ldak); } }
GB_binop__isge_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isge_fp32 // A.*B function (eWiseMult): GB_AemultB__isge_fp32 // A*D function (colscale): GB_AxD__isge_fp32 // D*A function (rowscale): GB_DxB__isge_fp32 // C+=B function (dense accum): GB_Cdense_accumB__isge_fp32 // C+=b function (dense accum): GB_Cdense_accumb__isge_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_fp32 // C=scalar+B GB_bind1st__isge_fp32 // C=scalar+B' GB_bind1st_tran__isge_fp32 // C=A+scalar GB_bind2nd__isge_fp32 // C=A'+scalar GB_bind2nd_tran__isge_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_FP32 || GxB_NO_ISGE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isge_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isge_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isge_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isge_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isge_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isge_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isge_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isge_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isge_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__isge_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__isge_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/Decl.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TrailingObjects.h" #include <cassert> #include <cstddef> #include <iterator> #include <utility> namespace clang { class ASTContext; //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP clause. class OMPClause { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Ending location of the clause. SourceLocation EndLoc; /// Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// Returns the starting location of the clause. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns the ending location of the clause. SourceLocation getEndLoc() const { return EndLoc; } /// Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } /// Get the iterator range for the expressions used in the clauses. Used /// expressions include only the children that must be evaluated at the /// runtime before entering the construct. child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit = nullptr; /// Region that captures the associated stmt. OpenMPDirectiveKind CaptureRegion = llvm::omp::OMPD_unknown; protected: OMPClauseWithPreInit(const OMPClause *This) { assert(get(This) && "get is not tuned for pre-init."); } /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = llvm::omp::OMPD_unknown) { PreInit = S; CaptureRegion = ThisRegion; } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } /// Get capture region for the stmt in the clause. OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate = nullptr; protected: OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) { assert(get(This) && "get is not tuned for post-update."); } /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// This structure contains most locations needed for by an OMPVarListClause. struct OMPVarListLocTy { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Location of '('. SourceLocation LParenLoc; /// Ending location of the clause. SourceLocation EndLoc; OMPVarListLocTy() = default; OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {} }; /// This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of variables in the list. unsigned NumVars; protected: /// Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} /// Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } public: using varlist_iterator = MutableArrayRef<Expr *>::iterator; using varlist_const_iterator = ArrayRef<const Expr *>::iterator; using varlist_range = llvm::iterator_range<varlist_iterator>; using varlist_const_range = llvm::iterator_range<varlist_const_iterator>; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// This represents 'allocator' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp allocate(a) allocator(omp_default_mem_alloc) /// \endcode /// In this example directive '#pragma omp allocate' has simple 'allocator' /// clause with the allocator 'omp_default_mem_alloc'. class OMPAllocatorClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression with the allocator. Stmt *Allocator = nullptr; /// Set allocator. void setAllocator(Expr *A) { Allocator = A; } public: /// Build 'allocator' clause with the given allocator. /// /// \param A Allocator. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc), Allocator(A) {} /// Build an empty clause. OMPAllocatorClause() : OMPClause(llvm::omp::OMPC_allocator, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns allocator. Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); } child_range children() { return child_range(&Allocator, &Allocator + 1); } const_child_range children() const { return const_child_range(&Allocator, &Allocator + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocator; } }; /// This represents clause 'allocate' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// and clause 'allocate' for the variable 'a'. class OMPAllocateClause final : public OMPVarListClause<OMPAllocateClause>, private llvm::TrailingObjects<OMPAllocateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Allocator specified in the clause, or 'nullptr' if the default one is /// used. Expr *Allocator = nullptr; /// Position of the ':' delimiter in the clause; SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, StartLoc, LParenLoc, EndLoc, N), Allocator(Allocator), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPAllocateClause(unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } void setAllocator(Expr *A) { Allocator = A; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Returns the allocator expression or nullptr, if no allocator is specified. Expr *getAllocator() const { return Allocator; } /// Returns the location of the ':' delimiter. SourceLocation getColonLoc() const { return ColonLoc; } /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAllocateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocate; } }; /// This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. class OMPIfClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Location of ':' (if any). SourceLocation ColonLoc; /// Directive name modifier for the clause. OpenMPDirectiveKind NameModifier = llvm::omp::OMPD_unknown; /// Name modifier location. SourceLocation NameModifierLoc; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Set directive name modifier for the clause. void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// Set location of directive name modifier for the clause. void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// Set location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPIfClause() : OMPClause(llvm::omp::OMPC_if, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPIfClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_if; } }; /// This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } public: /// Build 'final' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPFinalClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_final, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPFinalClause() : OMPClause(llvm::omp::OMPC_final, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPFinalClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_final; } }; /// This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'num_threads' clause. Stmt *NumThreads = nullptr; /// Set condition. void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param HelperNumThreads Helper Number of threads for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_threads, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumThreads(NumThreads) { setPreInitStmt(HelperNumThreads, CaptureRegion); } /// Build an empty clause. OMPNumThreadsClause() : OMPClause(llvm::omp::OMPC_num_threads, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } const_child_range children() const { return const_child_range(&NumThreads, &NumThreads + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_threads; } }; /// This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Safelen = nullptr; /// Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// Build an empty clause. explicit OMPSafelenClause() : OMPClause(llvm::omp::OMPC_safelen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } child_range children() { return child_range(&Safelen, &Safelen + 1); } const_child_range children() const { return const_child_range(&Safelen, &Safelen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_safelen; } }; /// This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Simdlen = nullptr; /// Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// Build an empty clause. explicit OMPSimdlenClause() : OMPClause(llvm::omp::OMPC_simdlen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } const_child_range children() const { return const_child_range(&Simdlen, &Simdlen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simdlen; } }; /// This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// Build an empty clause. explicit OMPCollapseClause() : OMPClause(llvm::omp::OMPC_collapse, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_collapse; } }; /// This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. llvm::omp::DefaultKind Kind = llvm::omp::OMP_DEFAULT_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clauses. /// /// \param K Argument of clause. void setDefaultKind(llvm::omp::DefaultKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDefaultClause(llvm::omp::DefaultKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPDefaultClause() : OMPClause(llvm::omp::OMPC_default, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::DefaultKind getDefaultKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_default; } }; /// This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'proc_bind' clause. llvm::omp::ProcBindKind Kind = llvm::omp::OMP_PROC_BIND_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setProcBindKind(llvm::omp::ProcBindKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPProcBindClause(llvm::omp::ProcBindKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPProcBindClause() : OMPClause(llvm::omp::OMPC_proc_bind, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::ProcBindKind getProcBindKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_proc_bind; } }; /// This represents 'unified_address' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_address /// \endcode /// In this example directive '#pragma omp requires' has 'unified_address' /// clause. class OMPUnifiedAddressClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_address' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_address, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedAddressClause() : OMPClause(llvm::omp::OMPC_unified_address, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_address; } }; /// This represents 'unified_shared_memory' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_shared_memory /// \endcode /// In this example directive '#pragma omp requires' has 'unified_shared_memory' /// clause. class OMPUnifiedSharedMemoryClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_shared_memory' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_shared_memory, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedSharedMemoryClause() : OMPClause(llvm::omp::OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_shared_memory; } }; /// This represents 'reverse_offload' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires reverse_offload /// \endcode /// In this example directive '#pragma omp requires' has 'reverse_offload' /// clause. class OMPReverseOffloadClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'reverse_offload' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_reverse_offload, StartLoc, EndLoc) {} /// Build an empty clause. OMPReverseOffloadClause() : OMPClause(llvm::omp::OMPC_reverse_offload, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reverse_offload; } }; /// This represents 'dynamic_allocators' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires dynamic_allocators /// \endcode /// In this example directive '#pragma omp requires' has 'dynamic_allocators' /// clause. class OMPDynamicAllocatorsClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'dynamic_allocators' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_dynamic_allocators, StartLoc, EndLoc) {} /// Build an empty clause. OMPDynamicAllocatorsClause() : OMPClause(llvm::omp::OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dynamic_allocators; } }; /// This represents 'atomic_default_mem_order' clause in the '#pragma omp /// requires' directive. /// /// \code /// #pragma omp requires atomic_default_mem_order(seq_cst) /// \endcode /// In this example directive '#pragma omp requires' has simple /// atomic_default_mem_order' clause with kind 'seq_cst'. class OMPAtomicDefaultMemOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '(' SourceLocation LParenLoc; /// A kind of the 'atomic_default_mem_order' clause. OpenMPAtomicDefaultMemOrderClauseKind Kind = OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst', /// 'acq_rel' or 'relaxed'). /// /// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPAtomicDefaultMemOrderClause() : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the locaiton of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_atomic_default_mem_order; } }; /// This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown; /// Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// Start location of the schedule ind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// Set the first schedule modifier. /// /// \param M Schedule modifier. void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// Set the second schedule modifier. /// /// \param M Schedule modifier. void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// Set location of the first schedule modifier. void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// Set location of the second schedule modifier. void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// Set schedule modifier location. /// /// \param M Schedule modifier location. void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(llvm::omp::OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// Build an empty clause. explicit OMPScheduleClause() : OMPClause(llvm::omp::OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// Get kind of the clause. OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// Get the first modifier of the clause. OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// Get the second modifier of the clause. OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getScheduleKindLoc() { return KindLoc; } /// Get the first modifier location. SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// Get the second modifier location. SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_schedule; } }; /// This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. class OMPOrderedClause final : public OMPClause, private llvm::TrailingObjects<OMPOrderedClause, Expr *> { friend class OMPClauseReader; friend TrailingObjects; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Real number of loops. unsigned NumberOfLoops = 0; /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {} /// Build an empty clause. explicit OMPOrderedClause(unsigned NumLoops) : OMPClause(llvm::omp::OMPC_ordered, SourceLocation(), SourceLocation()), NumberOfLoops(NumLoops) {} /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPOrderedClause *Create(const ASTContext &C, Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Build an empty clause. static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } /// Set number of iterations for the specified loop. void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations); /// Get number of iterations for all the loops. ArrayRef<Expr *> getLoopNumIterations() const; /// Set loop counter for the specified loop. void setLoopCounter(unsigned NumLoop, Expr *Counter); /// Get loops counter for the specified loop. Expr *getLoopCounter(unsigned NumLoop); const Expr *getLoopCounter(unsigned NumLoop) const; child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_ordered; } }; /// This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. class OMPNowaitClause : public OMPClause { public: /// Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nowait, StartLoc, EndLoc) {} /// Build an empty clause. OMPNowaitClause() : OMPClause(llvm::omp::OMPC_nowait, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nowait; } }; /// This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. class OMPUntiedClause : public OMPClause { public: /// Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_untied, StartLoc, EndLoc) {} /// Build an empty clause. OMPUntiedClause() : OMPClause(llvm::omp::OMPC_untied, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_untied; } }; /// This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. class OMPMergeableClause : public OMPClause { public: /// Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_mergeable, StartLoc, EndLoc) {} /// Build an empty clause. OMPMergeableClause() : OMPClause(llvm::omp::OMPC_mergeable, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_mergeable; } }; /// This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. class OMPReadClause : public OMPClause { public: /// Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_read, StartLoc, EndLoc) {} /// Build an empty clause. OMPReadClause() : OMPClause(llvm::omp::OMPC_read, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_read; } }; /// This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. class OMPWriteClause : public OMPClause { public: /// Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_write, StartLoc, EndLoc) {} /// Build an empty clause. OMPWriteClause() : OMPClause(llvm::omp::OMPC_write, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_write; } }; /// This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// Also, this class represents 'update' clause in '#pragma omp depobj' /// directive. /// /// \code /// #pragma omp depobj(a) update(in) /// \endcode /// In this example directive '#pragma omp depobj' has 'update' clause with 'in' /// dependence kind. class OMPUpdateClause final : public OMPClause, private llvm::TrailingObjects<OMPUpdateClause, SourceLocation, OpenMPDependClauseKind> { friend class OMPClauseReader; friend TrailingObjects; /// true if extended version of the clause for 'depobj' directive. bool IsExtended = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<SourceLocation>) const { // 2 locations: for '(' and argument location. return IsExtended ? 2 : 0; } /// Sets the the location of '(' in clause for 'depobj' directive. void setLParenLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<SourceLocation>() = Loc; } /// Sets the the location of '(' in clause for 'depobj' directive. void setArgumentLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *std::next(getTrailingObjects<SourceLocation>(), 1) = Loc; } /// Sets the dependence kind for the clause for 'depobj' directive. void setDependencyKind(OpenMPDependClauseKind DK) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<OpenMPDependClauseKind>() = DK; } /// Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc, bool IsExtended) : OMPClause(llvm::omp::OMPC_update, StartLoc, EndLoc), IsExtended(IsExtended) {} /// Build an empty clause. OMPUpdateClause(bool IsExtended) : OMPClause(llvm::omp::OMPC_update, SourceLocation(), SourceLocation()), IsExtended(IsExtended) {} public: /// Creates clause for 'atomic' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates clause for 'depobj' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ArgumentLoc Location of the argument. /// \param DK Dependence kind. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, OpenMPDependClauseKind DK, SourceLocation EndLoc); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param IsExtended true if extended clause for 'depobj' directive must be /// created. static OMPUpdateClause *CreateEmpty(const ASTContext &C, bool IsExtended); /// Checks if the clause is the extended clauses for 'depobj' directive. bool isExtended() const { return IsExtended; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } /// Gets the the location of '(' in clause for 'depobj' directive. SourceLocation getLParenLoc() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<SourceLocation>(); } /// Gets the the location of argument in clause for 'depobj' directive. SourceLocation getArgumentLoc() const { assert(IsExtended && "Expected extended clause."); return *std::next(getTrailingObjects<SourceLocation>(), 1); } /// Gets the dependence kind in clause for 'depobj' directive. OpenMPDependClauseKind getDependencyKind() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<OpenMPDependClauseKind>(); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_update; } }; /// This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. class OMPCaptureClause : public OMPClause { public: /// Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_capture, StartLoc, EndLoc) {} /// Build an empty clause. OMPCaptureClause() : OMPClause(llvm::omp::OMPC_capture, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_capture; } }; /// This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. class OMPSeqCstClause : public OMPClause { public: /// Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_seq_cst, StartLoc, EndLoc) {} /// Build an empty clause. OMPSeqCstClause() : OMPClause(llvm::omp::OMPC_seq_cst, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_seq_cst; } }; /// This represents 'acq_rel' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acq_rel /// \endcode /// In this example directive '#pragma omp flush' has 'acq_rel' clause. class OMPAcqRelClause final : public OMPClause { public: /// Build 'ack_rel' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acq_rel, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcqRelClause() : OMPClause(llvm::omp::OMPC_acq_rel, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acq_rel; } }; /// This represents 'acquire' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acquire /// \endcode /// In this example directive '#pragma omp flush' has 'acquire' clause. class OMPAcquireClause final : public OMPClause { public: /// Build 'acquire' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acquire, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcquireClause() : OMPClause(llvm::omp::OMPC_acquire, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acquire; } }; /// This represents 'release' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush release /// \endcode /// In this example directive '#pragma omp flush' has 'release' clause. class OMPReleaseClause final : public OMPClause { public: /// Build 'release' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_release, StartLoc, EndLoc) {} /// Build an empty clause. OMPReleaseClause() : OMPClause(llvm::omp::OMPC_release, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_release; } }; /// This represents 'relaxed' clause in the '#pragma omp atomic' /// directives. /// /// \code /// #pragma omp atomic relaxed /// \endcode /// In this example directive '#pragma omp atomic' has 'relaxed' clause. class OMPRelaxedClause final : public OMPClause { public: /// Build 'relaxed' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_relaxed, StartLoc, EndLoc) {} /// Build an empty clause. OMPRelaxedClause() : OMPClause(llvm::omp::OMPC_relaxed, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_relaxed; } }; /// This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPPrivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_private; } }; /// This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(llvm::omp::OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( llvm::omp::OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_firstprivate; } }; /// This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Optional lastprivate kind, e.g. 'conditional', if specified by user. OpenMPLastprivateModifier LPKind; /// Optional location of the lasptrivate kind, if specified by user. SourceLocation LPKindLoc; /// Optional colon location, if specified by user. SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(llvm::omp::OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), LPKind(LPKind), LPKindLoc(LPKindLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( llvm::omp::OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } /// Sets lastprivate kind. void setKind(OpenMPLastprivateModifier Kind) { LPKind = Kind; } /// Sets location of the lastprivate kind. void setKindLoc(SourceLocation Loc) { LPKindLoc = Loc; } /// Sets colon symbol location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param LPKind Lastprivate kind, e.g. 'conditional'. /// \param LPKindLoc Location of the lastprivate kind. /// \param ColonLoc Location of the ':' symbol if lastprivate kind is used. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); /// Lastprivate kind. OpenMPLastprivateModifier getKind() const { return LPKind; } /// Returns the location of the lastprivate kind. SourceLocation getKindLoc() const { return LPKindLoc; } /// Returns the location of the ':' symbol, if any. SourceLocation getColonLoc() const { return ColonLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; /// Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLastprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_lastprivate; } }; /// This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPSharedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_shared; } }; /// This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Reduction modifier. OpenMPReductionClauseModifier Modifier = OMPC_REDUCTION_unknown; /// Reduction modifier location. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets reduction modifier. void setModifier(OpenMPReductionClauseModifier M) { Modifier = M; } /// Sets location of the modifier. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper copy operations for inscan reductions. /// The form is: Temps[i] = LHS[i]; void setInscanCopyOps(ArrayRef<Expr *> Ops); /// Get the list of helper inscan copy operations. MutableArrayRef<Expr *> getInscanCopyOps() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyOps() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } /// Set list of helper temp vars for inscan copy array operations. void setInscanCopyArrayTemps(ArrayRef<Expr *> CopyArrayTemps); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayTemps() { return MutableArrayRef<Expr *>(getInscanCopyOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayTemps() const { return llvm::makeArrayRef(getInscanCopyOps().end(), varlist_size()); } /// Set list of helper temp elements vars for inscan copy array operations. void setInscanCopyArrayElems(ArrayRef<Expr *> CopyArrayElems); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayElems() { return MutableArrayRef<Expr *>(getInscanCopyArrayTemps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayElems() const { return llvm::makeArrayRef(getInscanCopyArrayTemps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param CopyOps List of copy operations for inscan reductions: /// \code /// TempExprs = LHSExprs; /// \endcode /// \param CopyArrayTemps Temp arrays for prefix sums. /// \param CopyArrayElems Temp arrays for prefix sums. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> CopyOps, ArrayRef<Expr *> CopyArrayTemps, ArrayRef<Expr *> CopyArrayElems, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param Modifier Reduction modifier. static OMPReductionClause * CreateEmpty(const ASTContext &C, unsigned N, OpenMPReductionClauseModifier Modifier); /// Returns modifier. OpenMPReductionClauseModifier getModifier() const { return Modifier; } /// Returns modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range copy_ops() const { return helper_expr_const_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_range copy_ops() { return helper_expr_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_const_range copy_array_temps() const { return helper_expr_const_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_range copy_array_temps() { return helper_expr_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_const_range copy_array_elems() const { return helper_expr_const_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } helper_expr_range copy_array_elems() { return helper_expr_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPReductionClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reduction; } }; /// This represents clause 'task_reduction' in the '#pragma omp taskgroup' /// directives. /// /// \code /// #pragma omp taskgroup task_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp taskgroup' has clause /// 'task_reduction' with operator '+' and the variables 'a' and 'b'. class OMPTaskReductionClause final : public OMPVarListClause<OMPTaskReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPTaskReductionClause(unsigned N) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPTaskReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPTaskReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_task_reduction; } }; /// This represents clause 'in_reduction' in the '#pragma omp task' directives. /// /// \code /// #pragma omp task in_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp task' has clause 'in_reduction' with /// operator '+' and the variables 'a' and 'b'. class OMPInReductionClause final : public OMPVarListClause<OMPInReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPInReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPInReductionClause>(llvm::omp::OMPC_in_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInReductionClause(unsigned N) : OMPVarListClause<OMPInReductionClause>( llvm::omp::OMPC_in_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper reduction taskgroup descriptors. void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction taskgroup descriptors. MutableArrayRef<Expr *> getTaskgroupDescriptors() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getTaskgroupDescriptors() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param TaskgroupDescriptors List of helper taskgroup descriptors for /// corresponding items in parent taskgroup task_reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPInReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range taskgroup_descriptors() const { return helper_expr_const_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } helper_expr_range taskgroup_descriptors() { return helper_expr_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_in_reduction; } }; /// This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val; /// Location of linear modifier if any. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this) {} /// Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// Gets the list of used expressions for linear variables. MutableArrayRef<Expr *> getUsedExprs() { return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1); } ArrayRef<const Expr *> getUsedExprs() const { return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1); } /// Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); /// Sets the list of used expressions for the linear clause. void setUsedExprs(ArrayRef<Expr *> UE); using privates_iterator = MutableArrayRef<Expr *>::iterator; using privates_const_iterator = ArrayRef<const Expr *>::iterator; using privates_range = llvm::iterator_range<privates_iterator>; using privates_const_range = llvm::iterator_range<privates_const_iterator>; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } using updates_iterator = MutableArrayRef<Expr *>::iterator; using updates_const_iterator = ArrayRef<const Expr *>::iterator; using updates_range = llvm::iterator_range<updates_iterator>; using updates_const_range = llvm::iterator_range<updates_const_iterator>; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } using finals_iterator = MutableArrayRef<Expr *>::iterator; using finals_const_iterator = ArrayRef<const Expr *>::iterator; using finals_range = llvm::iterator_range<finals_iterator>; using finals_const_range = llvm::iterator_range<finals_const_iterator>; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } using used_expressions_iterator = MutableArrayRef<Expr *>::iterator; using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator; using used_expressions_range = llvm::iterator_range<used_expressions_iterator>; using used_expressions_const_range = llvm::iterator_range<used_expressions_const_iterator>; used_expressions_range used_expressions() { return finals_range(getUsedExprs().begin(), getUsedExprs().end()); } used_expressions_const_range used_expressions() const { return finals_const_range(getUsedExprs().begin(), getUsedExprs().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLinearClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPLinearClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_linear; } }; /// This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars) {} public: /// Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAlignedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_aligned; } }; /// This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyinClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyin; } }; /// This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(llvm::omp::OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( llvm::omp::OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyprivate; } }; /// This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFlushClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_flush; } }; /// This represents implicit clause 'depobj' for the '#pragma omp depobj' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// depobj' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp depobj(a) destroy /// \endcode /// In this example directive '#pragma omp depobj' has implicit clause 'depobj' /// with the depobj 'a'. class OMPDepobjClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Chunk size. Expr *Depobj = nullptr; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDepobjClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_depobj, StartLoc, EndLoc), LParenLoc(LParenLoc) {} /// Build an empty clause. /// explicit OMPDepobjClause() : OMPClause(llvm::omp::OMPC_depobj, SourceLocation(), SourceLocation()) {} void setDepobj(Expr *E) { Depobj = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Creates clause. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Depobj depobj expression associated with the 'depobj' directive. static OMPDepobjClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *Depobj); /// Creates an empty clause. /// /// \param C AST context. static OMPDepobjClause *CreateEmpty(const ASTContext &C); /// Returns depobj expression associated with the clause. Expr *getDepobj() { return Depobj; } const Expr *getDepobj() const { return Depobj; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&Depobj), reinterpret_cast<Stmt **>(&Depobj) + 1); } const_child_range children() const { auto Children = const_cast<OMPDepobjClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depobj; } }; /// This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; /// Dependency type location. SourceLocation DepLoc; /// Colon location. SourceLocation ColonLoc; /// Number of loops, associated with the depend clause. unsigned NumLoops = 0; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param NumLoops Number of loops that is associated with this depend /// clause. OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, StartLoc, LParenLoc, EndLoc, N), NumLoops(NumLoops) {} /// Build an empty clause. /// /// \param N Number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. explicit OMPDependClause(unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), NumLoops(NumLoops) {} /// Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Sets optional dependency modifier. void setModifier(Expr *DepModifier); public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL, unsigned NumLoops); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N, unsigned NumLoops); /// Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// Return optional depend modifier. Expr *getModifier(); const Expr *getModifier() const { return const_cast<OMPDependClause *>(this)->getModifier(); } /// Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Get number of loops associated with the clause. unsigned getNumLoops() const { return NumLoops; } /// Set the loop data for the depend clauses with 'sink|source' kind of /// dependency. void setLoopData(unsigned NumLoop, Expr *Cnt); /// Get the loop data. Expr *getLoopData(unsigned NumLoop); const Expr *getLoopData(unsigned NumLoop) const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPDependClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depend; } }; /// This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Device clause modifier. OpenMPDeviceClauseModifier Modifier = OMPC_DEVICE_unknown; /// Location of the modifier. SourceLocation ModifierLoc; /// Device number. Stmt *Device = nullptr; /// Set the device number. /// /// \param E Device number. void setDevice(Expr *E) { Device = E; } /// Sets modifier. void setModifier(OpenMPDeviceClauseModifier M) { Modifier = M; } /// Setst modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } public: /// Build 'device' clause. /// /// \param Modifier Clause modifier. /// \param E Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param ModifierLoc Modifier location. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Modifier(Modifier), ModifierLoc(ModifierLoc), Device(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPDeviceClause() : OMPClause(llvm::omp::OMPC_device, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// Return device number. Expr *getDevice() const { return cast<Expr>(Device); } /// Gets modifier. OpenMPDeviceClauseModifier getModifier() const { return Modifier; } /// Gets modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(&Device, &Device + 1); } const_child_range children() const { return const_child_range(&Device, &Device + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_device; } }; /// This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. class OMPThreadsClause : public OMPClause { public: /// Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_threads, StartLoc, EndLoc) {} /// Build an empty clause. OMPThreadsClause() : OMPClause(llvm::omp::OMPC_threads, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_threads; } }; /// This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. class OMPSIMDClause : public OMPClause { public: /// Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simd, StartLoc, EndLoc) {} /// Build an empty clause. OMPSIMDClause() : OMPClause(llvm::omp::OMPC_simd, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simd; } }; /// Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: /// Class that represents a component of a mappable expression. E.g. /// for an expression S.a, the first component is a declaration reference /// expression associated with 'S' and the second is a member expression /// associated with the field declaration 'a'. If the expression is an array /// subscript it may not have any associated declaration. In that case the /// associated declaration is set to nullptr. class MappableComponent { /// Expression associated with the component. Expr *AssociatedExpression = nullptr; /// Declaration associated with the declaration. If the component does /// not have a declaration (e.g. array subscripts or section), this is set /// to nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() = default; explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration) : AssociatedExpression(AssociatedExpression), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpression; } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // List of components of an expression. This first one is the whole // expression and the last one is the base expression. using MappableExprComponentList = SmallVector<MappableComponent, 8>; using MappableExprComponentListRef = ArrayRef<MappableComponent>; // List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>; using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>; protected: // Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations); }; /// This structure contains all sizes needed for by an /// OMPMappableExprListClause. struct OMPMappableExprListSizeTy { /// Number of expressions listed. unsigned NumVars; /// Number of unique base declarations. unsigned NumUniqueDeclarations; /// Number of component lists. unsigned NumComponentLists; /// Total number of expression components. unsigned NumComponents; OMPMappableExprListSizeTy() = default; OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} }; /// This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// Number of component lists in this clause. unsigned NumComponentLists; /// Total number of components in this clause. unsigned NumComponents; /// C++ nested name specifier for the associated user-defined mapper. NestedNameSpecifierLoc MapperQualifierLoc; /// The associated user-defined mapper identifier information. DeclarationNameInfo MapperIdInfo; protected: /// Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. /// \param MapperQualifierLocPtr C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfoPtr The identifier of associated user-defined mapper. OMPMappableExprListClause( OpenMPClauseKind K, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes, NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr, DeclarationNameInfo *MapperIdInfoPtr = nullptr) : OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc, Sizes.NumVars), NumUniqueDeclarations(Sizes.NumUniqueDeclarations), NumComponentLists(Sizes.NumComponentLists), NumComponents(Sizes.NumComponents) { if (MapperQualifierLocPtr) MapperQualifierLoc = *MapperQualifierLocPtr; if (MapperIdInfoPtr) MapperIdInfo = *MapperIdInfoPtr; } /// Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// Set the nested name specifier of associated user-defined mapper. void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) { MapperQualifierLoc = NNSL; } /// Set the name of associated user-defined mapper. void setMapperIdInfo(DeclarationNameInfo MapperId) { MapperIdInfo = MapperId; } /// Get the user-defined mapper references that are in the trailing objects of /// the class. MutableArrayRef<Expr *> getUDMapperRefs() { return llvm::makeMutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Get the user-defined mappers references that are in the trailing objects /// of the class. ArrayRef<Expr *> getUDMapperRefs() const { return llvm::makeArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Set the user-defined mappers that are in the trailing objects of the /// class. void setUDMapperRefs(ArrayRef<Expr *> DMDs) { assert(DMDs.size() == OMPVarListClause<T>::varlist_size() && "Unexpected number of user-defined mappers."); std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin()); } public: /// Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// Gets the nested name specifier for associated user-defined mapper. NestedNameSpecifierLoc getMapperQualifierLoc() const { return MapperQualifierLoc; } /// Gets the name info for associated user-defined mapper. const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; } /// Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Remaining lists for the current declaration. unsigned RemainingLists = 0; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize = 0; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; } /// Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::pair<const ValueDecl *, MappableExprComponentListRef> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); return std::make_pair( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize)); } std::pair<const ValueDecl *, MappableExprComponentListRef> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; return *this; } }; using const_component_lists_range = llvm::iterator_range<const_component_lists_iterator>; /// Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end())); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator; using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator; using const_all_num_lists_range = llvm::iterator_range<const_all_num_lists_iterator>; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator; using const_all_lists_sizes_range = llvm::iterator_range<const_all_lists_sizes_iterator>; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } using const_all_components_iterator = ArrayRef<MappableComponent>::iterator; using const_all_components_range = llvm::iterator_range<const_all_components_iterator>; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } using mapperlist_iterator = MutableArrayRef<Expr *>::iterator; using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator; using mapperlist_range = llvm::iterator_range<mapperlist_iterator>; using mapperlist_const_range = llvm::iterator_range<mapperlist_const_iterator>; mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); } mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); } mapperlist_const_iterator mapperlist_begin() const { return getUDMapperRefs().begin(); } mapperlist_const_iterator mapperlist_end() const { return getUDMapperRefs().end(); } mapperlist_range mapperlists() { return mapperlist_range(mapperlist_begin(), mapperlist_end()); } mapperlist_const_range mapperlists() const { return mapperlist_const_range(mapperlist_begin(), mapperlist_end()); } }; /// This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } private: /// Map-type-modifiers for the 'map' clause. OpenMPMapModifierKind MapTypeModifiers[NumberOfOMPMapClauseModifiers] = { OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown}; /// Location of map-type-modifiers for the 'map' clause. SourceLocation MapTypeModifiersLoc[NumberOfOMPMapClauseModifiers]; /// Map type for the 'map' clause. OpenMPMapClauseKind MapType = OMPC_MAP_unknown; /// Is this an implicit map type or not. bool MapTypeIsImplicit = false; /// Location of the map type. SourceLocation MapLoc; /// Colon location. SourceLocation ColonLoc; /// Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Locations of map-type-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, Locs, Sizes, &MapperQualifierLoc, &MapperIdInfo), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) { assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() && "Unexpected number of map type modifiers."); llvm::copy(MapModifiers, std::begin(MapTypeModifiers)); assert(llvm::array_lengthof(MapTypeModifiersLoc) == MapModifiersLoc.size() && "Unexpected number of map type modifier locations."); llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, OMPVarListLocTy(), Sizes) {} /// Set map-type-modifier for the clause. /// /// \param I index for map-type-modifier. /// \param T map-type-modifier for the clause. void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) { assert(I < NumberOfOMPMapClauseModifiers && "Unexpected index to store map type modifier, exceeds array size."); MapTypeModifiers[I] = T; } /// Set location for the map-type-modifier. /// /// \param I index for map-type-modifier location. /// \param TLoc map-type-modifier location. void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMapClauseModifiers && "Index to store map type modifier location exceeds array size."); MapTypeModifiersLoc[I] = TLoc; } /// Set type for the clause. /// /// \param T Type for the clause. void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// Set type location. /// /// \param TLoc Type location. void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Location of map-type-modifiers. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. static OMPMapClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// Creates an empty clause with the place for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPMapClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// Fetches the map-type-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for map-type-modifier. OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier exceeds the total number of modifiers."); return MapTypeModifiers[Cnt]; } /// Fetches the map-type-modifier location at 'Cnt' index of array of /// modifiers' locations. /// /// \param Cnt index for map-type-modifier location. SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier location exceeds total number of modifiers."); return MapTypeModifiersLoc[Cnt]; } /// Fetches ArrayRef of map-type-modifiers. ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiers); } /// Fetches ArrayRef of location of map-type-modifiers. ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiersLoc); } /// Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPMapClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom) return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { auto Children = const_cast<OMPMapClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_map; } }; /// This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// NumTeams number. Stmt *NumTeams = nullptr; /// Set the NumTeams number. /// /// \param E NumTeams number. void setNumTeams(Expr *E) { NumTeams = E; } public: /// Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPNumTeamsClause() : OMPClause(llvm::omp::OMPC_num_teams, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } const_child_range children() const { return const_child_range(&NumTeams, &NumTeams + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_teams; } }; /// This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// ThreadLimit number. Stmt *ThreadLimit = nullptr; /// Set the ThreadLimit number. /// /// \param E ThreadLimit number. void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPThreadLimitClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_thread_limit, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPThreadLimitClause() : OMPClause(llvm::omp::OMPC_thread_limit, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } const_child_range children() const { return const_child_range(&ThreadLimit, &ThreadLimit + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_thread_limit; } }; /// This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Priority number. Stmt *Priority = nullptr; /// Set the Priority number. /// /// \param E Priority number. void setPriority(Expr *E) { Priority = E; } public: /// Build 'priority' clause. /// /// \param Priority Expression associated with this clause. /// \param HelperPriority Helper priority for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPPriorityClause(Expr *Priority, Stmt *HelperPriority, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_priority, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Priority(Priority) { setPreInitStmt(HelperPriority, CaptureRegion); } /// Build an empty clause. OMPPriorityClause() : OMPClause(llvm::omp::OMPC_priority, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } child_range children() { return child_range(&Priority, &Priority + 1); } const_child_range children() const { return const_child_range(&Priority, &Priority + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPPriorityClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_priority; } }; /// This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Grainsize = nullptr; /// Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPGrainsizeClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_grainsize, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Grainsize(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPGrainsizeClause() : OMPClause(llvm::omp::OMPC_grainsize, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } const_child_range children() const { return const_child_range(&Grainsize, &Grainsize + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_grainsize; } }; /// This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. class OMPNogroupClause : public OMPClause { public: /// Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nogroup, StartLoc, EndLoc) {} /// Build an empty clause. OMPNogroupClause() : OMPClause(llvm::omp::OMPC_nogroup, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nogroup; } }; /// This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *NumTasks = nullptr; /// Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNumTasksClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_tasks, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTasks(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPNumTasksClause() : OMPClause(llvm::omp::OMPC_num_tasks, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } const_child_range children() const { return const_child_range(&NumTasks, &NumTasks + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNumTasksClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_tasks; } }; /// This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Hint expression of the 'hint' clause. Stmt *Hint = nullptr; /// Set hint expression. void setHint(Expr *H) { Hint = H; } public: /// Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// Build an empty clause. OMPHintClause() : OMPClause(llvm::omp::OMPC_hint, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } child_range children() { return child_range(&Hint, &Hint + 1); } const_child_range children() const { return const_child_range(&Hint, &Hint + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_hint; } }; /// This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown; /// Start location of the schedule kind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(llvm::omp::OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// Build an empty clause. explicit OMPDistScheduleClause() : OMPClause(llvm::omp::OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Get kind of the clause. OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPDistScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dist_schedule; } }; /// This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown; /// Locations of modifiers. SourceLocation ModifierLoc; /// A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown; /// Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// Set defaultmap kind. /// /// \param K Defaultmap kind. void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// Set the defaultmap modifier. /// /// \param M Defaultmap modifier. void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// Set location of the defaultmap modifier. void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(llvm::omp::OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// Build an empty clause. explicit OMPDefaultmapClause() : OMPClause(llvm::omp::OMPC_defaultmap, SourceLocation(), SourceLocation()) {} /// Get kind of the clause. OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// Get the modifier of the clause. OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// Get the modifier location. SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_defaultmap; } }; /// This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, Locs, Sizes, &MapperQualifierLoc, &MapperIdInfo) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPToClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPToClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_to; } }; /// This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, Locs, Sizes, &MapperQualifierLoc, &MapperIdInfo) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPFromClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPFromClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFromClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_from; } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, Locs, Sizes) { } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_ptr; } }; /// This represents clause 'use_device_addr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_addr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_addr' with the variables 'a' and 'b'. class OMPUseDeviceAddrClause final : public OMPMappableExprListClause<OMPUseDeviceAddrClause>, private llvm::TrailingObjects< OMPUseDeviceAddrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDeviceAddrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDeviceAddrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDeviceAddrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_addr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPIsDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPIsDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_is_device_ptr; } }; /// This represents clause 'nontemporal' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp simd nontemporal(a) /// \endcode /// In this example directive '#pragma omp simd' has clause 'nontemporal' for /// the variable 'a'. class OMPNontemporalClause final : public OMPVarListClause<OMPNontemporalClause>, private llvm::TrailingObjects<OMPNontemporalClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPNontemporalClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPNontemporalClause>(llvm::omp::OMPC_nontemporal, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPNontemporalClause(unsigned N) : OMPVarListClause<OMPNontemporalClause>( llvm::omp::OMPC_nontemporal, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Get the list of privatied copies if the member expression was captured by /// one of the privatization clauses. MutableArrayRef<Expr *> getPrivateRefs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateRefs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPNontemporalClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPNontemporalClause *CreateEmpty(const ASTContext &C, unsigned N); /// Sets the list of references to private copies created in private clauses. /// \param VL List of references. void setPrivateRefs(ArrayRef<Expr *> VL); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPNontemporalClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range private_refs() { return child_range(reinterpret_cast<Stmt **>(getPrivateRefs().begin()), reinterpret_cast<Stmt **>(getPrivateRefs().end())); } const_child_range private_refs() const { auto Children = const_cast<OMPNontemporalClause *>(this)->private_refs(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nontemporal; } }; /// This represents 'order' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp simd order(concurrent) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'order' /// clause with kind 'concurrent'. class OMPOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Argument of clause. void setKind(OpenMPOrderClauseKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'order' clause with argument \p A ('concurrent'). /// /// \param A Argument of the clause ('concurrent'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPOrderClause() : OMPClause(llvm::omp::OMPC_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPOrderClauseKind getKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_order; } }; /// This represents 'destroy' clause in the '#pragma omp depobj' /// directive. /// /// \code /// #pragma omp depobj(a) destroy /// \endcode /// In this example directive '#pragma omp depobj' has 'destroy' clause. class OMPDestroyClause final : public OMPClause { public: /// Build 'destroy' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc) {} /// Build an empty clause. OMPDestroyClause() : OMPClause(llvm::omp::OMPC_destroy, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_destroy; } }; /// This represents 'detach' clause in the '#pragma omp task' directive. /// /// \code /// #pragma omp task detach(evt) /// \endcode /// In this example directive '#pragma omp detach' has simple 'detach' clause /// with the variable 'evt'. class OMPDetachClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression of the 'detach' clause. Stmt *Evt = nullptr; /// Set condition. void setEventHandler(Expr *E) { Evt = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'detach' clause with event-handler \a Evt. /// /// \param Evt Event handler expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_detach, StartLoc, EndLoc), LParenLoc(LParenLoc), Evt(Evt) {} /// Build an empty clause. OMPDetachClause() : OMPClause(llvm::omp::OMPC_detach, SourceLocation(), SourceLocation()) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns event-handler expression. Expr *getEventHandler() const { return cast_or_null<Expr>(Evt); } child_range children() { return child_range(&Evt, &Evt + 1); } const_child_range children() const { return const_child_range(&Evt, &Evt + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_detach; } }; /// This represents clause 'inclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan inclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'inclusive' /// with the variables 'a' and 'b'. class OMPInclusiveClause final : public OMPVarListClause<OMPInclusiveClause>, private llvm::TrailingObjects<OMPInclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPInclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInclusiveClause(unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPInclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_inclusive; } }; /// This represents clause 'exclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan exclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'exclusive' /// with the variables 'a' and 'b'. class OMPExclusiveClause final : public OMPVarListClause<OMPExclusiveClause>, private llvm::TrailingObjects<OMPExclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPExclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPExclusiveClause(unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPExclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPExclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPExclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_exclusive; } }; /// This represents clause 'uses_allocators' in the '#pragma omp target'-based /// directives. /// /// \code /// #pragma omp target uses_allocators(default_allocator, my_allocator(traits)) /// \endcode /// In this example directive '#pragma omp target' has clause 'uses_allocators' /// with the allocators 'default_allocator' and user-defined 'my_allocator'. class OMPUsesAllocatorsClause final : public OMPClause, private llvm::TrailingObjects<OMPUsesAllocatorsClause, Expr *, SourceLocation> { public: /// Data for list of allocators. struct Data { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; private: friend class OMPClauseReader; friend TrailingObjects; enum class ExprOffsets { Allocator, AllocatorTraits, Total, }; enum class ParenLocsOffsets { LParen, RParen, Total, }; /// Location of '('. SourceLocation LParenLoc; /// Total number of allocators in the clause. unsigned NumOfAllocators = 0; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of allocators asssociated with the clause. OMPUsesAllocatorsClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, StartLoc, EndLoc), LParenLoc(LParenLoc), NumOfAllocators(N) {} /// Build an empty clause. /// \param N Number of allocators asssociated with the clause. /// explicit OMPUsesAllocatorsClause(unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, SourceLocation(), SourceLocation()), NumOfAllocators(N) {} unsigned numTrailingObjects(OverloadToken<Expr *>) const { return NumOfAllocators * static_cast<int>(ExprOffsets::Total); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the allocators data for the clause. void setAllocatorsData(ArrayRef<OMPUsesAllocatorsClause::Data> Data); public: /// Creates clause with a list of allocators \p Data. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Data List of allocators. static OMPUsesAllocatorsClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<OMPUsesAllocatorsClause::Data> Data); /// Creates an empty clause with the place for \p N allocators. /// /// \param C AST context. /// \param N The number of allocators. static OMPUsesAllocatorsClause *CreateEmpty(const ASTContext &C, unsigned N); /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of allocators associated with the clause. unsigned getNumberOfAllocators() const { return NumOfAllocators; } /// Returns data for the specified allocator. OMPUsesAllocatorsClause::Data getAllocatorData(unsigned I) const; // Iterators child_range children() { Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>()); return child_range(Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } const_child_range children() const { Stmt *const *Begin = reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>()); return const_child_range( Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_uses_allocators; } }; /// This represents clause 'affinity' in the '#pragma omp task'-based /// directives. /// /// \code /// #pragma omp task affinity(iterator(i = 0:n) : ([3][n])a, b[:n], c[i]) /// \endcode /// In this example directive '#pragma omp task' has clause 'affinity' with the /// affinity modifer 'iterator(i = 0:n)' and locator items '([3][n])a', 'b[:n]' /// and 'c[i]'. class OMPAffinityClause final : public OMPVarListClause<OMPAffinityClause>, private llvm::TrailingObjects<OMPAffinityClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':' symbol. SourceLocation ColonLoc; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of locators asssociated with the clause. OMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// \param N Number of locators asssociated with the clause. /// explicit OMPAffinityClause(unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the affinity modifier for the clause, if any. void setModifier(Expr *E) { getTrailingObjects<Expr *>()[varlist_size()] = E; } /// Sets the location of ':' symbol. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a modifier a list of locator items. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param Locators List of locator items. static OMPAffinityClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Creates an empty clause with the place for \p N locator items. /// /// \param C AST context. /// \param N The number of locator items. static OMPAffinityClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets affinity modifier. Expr *getModifier() { return getTrailingObjects<Expr *>()[varlist_size()]; } Expr *getModifier() const { return getTrailingObjects<Expr *>()[varlist_size()]; } /// Gets the location of ':' symbol. SourceLocation getColonLoc() const { return ColonLoc; } // Iterators child_range children() { int Offset = getModifier() ? 1 : 0; return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end() + Offset)); } const_child_range children() const { auto Children = const_cast<OMPAffinityClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_affinity; } }; /// This class implements a simple visitor for OMPClause /// subclasses. template<class ImplClass, template <typename> class Ptr, typename RetTy> class OMPClauseVisitorBase { public: #define PTR(CLASS) Ptr<CLASS> #define DISPATCH(CLASS) \ return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S)) #define OMP_CLAUSE_CLASS(Enum, Str, Class) \ RetTy Visit ## Class (PTR(Class) S) { DISPATCH(Class); } #include "llvm/Frontend/OpenMP/OMPKinds.def" RetTy Visit(PTR(OMPClause) S) { // Top switch clause: visit each OMPClause. switch (S->getClauseKind()) { #define OMP_CLAUSE_CLASS(Enum, Str, Class) \ case llvm::omp::Clause::Enum: \ return Visit##Class(static_cast<PTR(Class)>(S)); #define OMP_CLAUSE_NO_CLASS(Enum, Str) \ case llvm::omp::Clause::Enum: \ break; #include "llvm/Frontend/OpenMP/OMPKinds.def" default: break; } } // Base case, ignore it. :) RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); } #undef PTR #undef DISPATCH }; template <typename T> using const_ptr = std::add_pointer_t<std::add_const_t<T>>; template <class ImplClass, typename RetTy = void> class OMPClauseVisitor : public OMPClauseVisitorBase<ImplClass, std::add_pointer_t, RetTy> {}; template<class ImplClass, typename RetTy = void> class ConstOMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {}; class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> { raw_ostream &OS; const PrintingPolicy &Policy; /// Process clauses with list of variables. template <typename T> void VisitOMPClauseList(T *Node, char StartSym); public: OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy) : OS(OS), Policy(Policy) {} #define OMP_CLAUSE_CLASS(Enum, Str, Class) \ void Visit##Class(Class *S); #include "llvm/Frontend/OpenMP/OMPKinds.def" }; struct OMPTraitProperty { llvm::omp::TraitProperty Kind = llvm::omp::TraitProperty::invalid; }; struct OMPTraitSelector { Expr *ScoreOrCondition = nullptr; llvm::omp::TraitSelector Kind = llvm::omp::TraitSelector::invalid; llvm::SmallVector<OMPTraitProperty, 1> Properties; }; struct OMPTraitSet { llvm::omp::TraitSet Kind = llvm::omp::TraitSet::invalid; llvm::SmallVector<OMPTraitSelector, 2> Selectors; }; /// Helper data structure representing the traits in a match clause of an /// `declare variant` or `metadirective`. The outer level is an ordered /// collection of selector sets, each with an associated kind and an ordered /// collection of selectors. A selector has a kind, an optional score/condition, /// and an ordered collection of properties. class OMPTraitInfo { /// Private constructor accesible only by ASTContext. OMPTraitInfo() {} friend class ASTContext; public: /// Reconstruct a (partial) OMPTraitInfo object from a mangled name. OMPTraitInfo(StringRef MangledName); /// The outermost level of selector sets. llvm::SmallVector<OMPTraitSet, 2> Sets; bool anyScoreOrCondition( llvm::function_ref<bool(Expr *&, bool /* IsScore */)> Cond) { return llvm::any_of(Sets, [&](OMPTraitSet &Set) { return llvm::any_of( Set.Selectors, [&](OMPTraitSelector &Selector) { return Cond(Selector.ScoreOrCondition, /* IsScore */ Selector.Kind != llvm::omp::TraitSelector::user_condition); }); }); } /// Create a variant match info object from this trait info object. While the /// former is a flat representation the actual main difference is that the /// latter uses clang::Expr to store the score/condition while the former is /// independent of clang. Thus, expressions and conditions are evaluated in /// this method. void getAsVariantMatchInfo(ASTContext &ASTCtx, llvm::omp::VariantMatchInfo &VMI) const; /// Return a string representation identifying this context selector. std::string getMangledName() const; /// Print a human readable representation into \p OS. void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const; }; llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI); llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI); } // namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
ordering_op-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file ordering_op-inl.h * \brief Function definition of ordering operators */ #ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_ #define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_ #include <mxnet/operator_util.h> #include <dmlc/optional.h> #include <mshadow/tensor.h> #include <algorithm> #include <vector> #include <type_traits> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "./sort_op.h" #include "./indexing_op.h" namespace mshadow { template<typename xpu, int src_dim, typename DType, int dst_dim> inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src, Shape<dst_dim> target_shape) { CHECK_EQ(src.CheckContiguous(), true); return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_); } }; namespace mxnet { namespace op { // These enums are only visible within this header namespace topk_enum { enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth}; } // topk_enum struct TopKParam : public dmlc::Parameter<TopKParam> { dmlc::optional<int> axis; int k; int ret_typ; bool is_ascend; int dtype; DMLC_DECLARE_PARAMETER(TopKParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to choose the top k indices." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(k).set_default(1) .describe("Number of top elements to select," " should be always smaller than or equal to the element number in the given axis." " A global sort is performed if set k < 1."); DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices) .add_enum("value", topk_enum::kReturnValue) .add_enum("indices", topk_enum::kReturnIndices) .add_enum("mask", topk_enum::kReturnMask) .add_enum("both", topk_enum::kReturnBoth) .describe("The return type.\n" " \"value\" means to return the top k values," " \"indices\" means to return the indices of the top k values," " \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values." " \"both\" means to return a list of both values and indices of top k elements."); DMLC_DECLARE_FIELD(is_ascend).set_default(false) .describe("Whether to choose k largest or k smallest elements." " Top K largest elements will be chosen if set to false."); DMLC_DECLARE_FIELD(dtype) .add_enum("uint8", mshadow::kUint8) .add_enum("int32", mshadow::kInt32) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(mshadow::kFloat32) .describe("DType of the output indices when ret_typ is \"indices\" or \"both\". " "An error will be raised if the selected data type cannot precisely represent the " "indices."); } }; struct SortParam : public dmlc::Parameter<SortParam> { dmlc::optional<int> axis; bool is_ascend; DMLC_DECLARE_PARAMETER(SortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to choose sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); } }; struct ArgSortParam : public dmlc::Parameter<ArgSortParam> { dmlc::optional<int> axis; bool is_ascend; int dtype; DMLC_DECLARE_PARAMETER(ArgSortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); DMLC_DECLARE_FIELD(dtype) .add_enum("uint8", mshadow::kUint8) .add_enum("int32", mshadow::kInt32) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(mshadow::kFloat32) .describe("DType of the output indices. It is only valid when ret_typ is \"indices\" or" " \"both\". An error will be raised if the selected data type cannot precisely " "represent the indices."); } }; inline void ParseTopKParam(const mxnet::TShape& src_shape, const TopKParam& param, mxnet::TShape *target_shape, int *batch_size, int *element_num, int *axis, int *k, bool *do_transpose, bool *is_ascend) { *do_transpose = false; *k = param.k; *is_ascend = param.is_ascend; // get batch_size, axis and element_num if (!static_cast<bool>(param.axis)) { // No axis given *axis = 0; *batch_size = 1; *element_num = src_shape.Size(); } else { *axis = param.axis.value(); if (*axis < 0) { *axis += src_shape.ndim(); } CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim())) << "Invalid axis! axis should be between 0 and " << src_shape.ndim() << ", found axis=" << *axis; *batch_size = src_shape.Size() / src_shape[*axis]; *element_num = src_shape[*axis]; if (*axis != src_shape.ndim() - 1) { *do_transpose = true; } } // get k if (param.k <= 0) { *k = *element_num; } // get target_shape if (!static_cast<bool>(param.axis)) { if (param.ret_typ != topk_enum::kReturnMask) { *target_shape = mshadow::Shape1(*k); } else { *target_shape = src_shape; } } else { *target_shape = src_shape; if (param.ret_typ != topk_enum::kReturnMask) { (*target_shape)[*axis] = *k; } } CHECK(*k >= 1 && *k <= *element_num) << "k must be smaller than " << *element_num << ", get k = " << *k; } using namespace mshadow; struct fill_ind_to_one { template<typename DType> MSHADOW_XINLINE static void Map(int i, const int* indices, DType* out) { out[indices[i]] = static_cast<DType>(1); } }; struct fill_ind { template<typename DType> MSHADOW_XINLINE static void Map(int i, const int* indices, const DType* val, int req, DType* out) { KERNEL_ASSIGN(out[indices[i]], req, val[i]); } }; template<typename DType> MSHADOW_FORCE_INLINE void TopKSort(const Tensor<cpu, 1, DType>& dat, const Tensor<cpu, 1, int>& ind, const Tensor<cpu, 1, char>& work, int K, int N, bool is_ascend, Stream<cpu> *s) { // Use full sort when K is relatively large. const bool full_sort(K*8 > N); // Batch size. const int M(work.size(0)/(sizeof(DType)*N)); const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()); #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < M; ++i) { // Tensor `work` stores the flattened source data, while `dat` stores the sorted result. DType *vals = reinterpret_cast<DType*>(work.dptr_); DType *sorted_vals = dat.dptr_+i*N; int *indices = ind.dptr_+i*N; if (is_ascend) { if (full_sort) { std::sort(indices, indices+N, [&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; }); } else { std::partial_sort(indices, indices+K, indices+N, [&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; }); } } else { if (full_sort) { std::sort(indices, indices+N, [&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; }); } else { std::partial_sort(indices, indices+K, indices+N, [&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; }); } } for (int j = 0; j < K; ++j) { sorted_vals[j] = vals[indices[j]]; } } } #ifdef __CUDACC__ template<typename DType> MSHADOW_XINLINE bool TopKCompare(DType val1, int ind1, DType val2, int ind2, bool is_ascend) { // Negative indices denote undefined values which are considered arbitrary small resp. large. return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2))); } template<typename DType> MSHADOW_XINLINE void MergeTopK(int K, DType *val1, int *ind1, DType *val2, int *ind2, bool is_ascend) { // In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals // [0,..,i1], [0,..i2] of the two lists that will be part of the merged list. int i1(K-1), i2(K-1); for (int i = 0; i < K; ++i) { if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) { --i2; } else { --i1; } } // Now merge the lists from back to front. for (int i = K; i--;) { if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) { val1[i] = val1[i1]; ind1[i] = ind1[i1]; --i1; } else { val1[i] = val2[i2]; ind1[i] = ind2[i2]; --i2; } } } template<typename DType> __global__ void PartialSortSmallK(int K, int N, DType *val, int *ind, bool is_ascend) { // Buffer for pairwise reduction. extern __shared__ int buff[]; // Start of buffer sections associated with this thread. const int offset(threadIdx.x*K); int *ind_buff = &buff[offset]; DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset; // Initialize top-K values for this thread. for (int i = 0; i < K; ++i) { ind_buff[i] = -1; } // Range of values this thread cares about. Each thread block processes // a different batch item (i.e. a different set of ind/val where we // have to select the top-K elements). All threads within the same // block work on the same batch item. const int first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N); // Select top-K from this range and store it sorted in the buffer. // We assume a small K, so linear insertion is o.k. for (int i = first; i < last; i += blockDim.x) { DType cur_val(val[i]); int cur_ind(ind[i]); for (int j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend); ) { if (j+1 < K) { val_buff[j+1] = val_buff[j]; ind_buff[j+1] = ind_buff[j]; } val_buff[j] = cur_val; ind_buff[j] = cur_ind; } } // Recursive merge of sorted lists for this thread block. Note that blockDim.x is not // necessary a power of two, therefore the additional checks for last_s. for (unsigned int s = (blockDim.x+1)/2, last_s = blockDim.x; last_s > 1; last_s = s, s = (s+1)/2) { __syncthreads(); if (threadIdx.x < s && threadIdx.x+s < last_s) { MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend); } } // Final updates on master thread. if (threadIdx.x == 0) { for (int i = 0; i < K; ++i) { ind[blockIdx.x*N+i] = ind_buff[i]; val[blockIdx.x*N+i] = val_buff[i]; } } } template<typename DType> MSHADOW_FORCE_INLINE void TopKSort(const Tensor<gpu, 1, DType>& dat, const Tensor<gpu, 1, int>& ind, const Tensor<gpu, 1, char>& work, int K, int N, bool is_ascend, Stream<gpu> *s) { // Use full sort for all but very small K for which we // can do a partial sort entirely within shared memory. const bool full_sort(K > 5); // Batch size. const int M(dat.size(0)/N); if (full_sort) { // Divide workspace into two parts. The first one is needed to store batch ids. size_t alignment = std::max(sizeof(DType), sizeof(int)); size_t id_size = PadBytes(sizeof(int) * ind.size(0), alignment); Tensor<gpu, 1, int> batch_id(reinterpret_cast<int*>(work.dptr_), Shape1(ind.size(0)), s); Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s); mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work); if (M > 1) { // Back to back sorting. Note that mxnet::op::SortByKey is a stable sort. batch_id = ind / N; mxnet::op::SortByKey(batch_id, dat, true, &sort_work); batch_id = ind / N; mxnet::op::SortByKey(batch_id, ind, true, &sort_work); } } else { const int nthreads(mshadow::cuda::kBaseThreadNum); PartialSortSmallK<<<M, nthreads, nthreads*K*(sizeof(int)+sizeof(DType)), mshadow::Stream<gpu>::GetStream(s)>>> (K, N, dat.dptr_, ind.dptr_, is_ascend); } } #endif /*! * \brief Implementation of the TopK operation * * * \param ctx the running context * \param resource temporary resource handler * \param src the Source blob * \param ret the destination blobs * \param k the K elements to keep * \param param the topk parameters * \tparam xpu the device type. * \tparam DType type of the output value/mask. * \tparam IDType type of the output indices. */ template<typename xpu, typename DType, typename IDType> void TopKImpl(const RunContext &ctx, const Resource &resource, const std::vector<OpReqType>& req, const TBlob& src, const std::vector<TBlob>& ret, const TopKParam& param) { using namespace mshadow; using namespace mshadow::expr; // 1. Parse and initialize information Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 1, char> workspace; Tensor<xpu, 1, char> temp_workspace; Tensor<xpu, 1, DType> sorted_dat; Tensor<xpu, 1, int> indices, sel_indices; int batch_size, element_num; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; int k = 0; size_t alignment = std::max(sizeof(DType), sizeof(int)); mxnet::TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>()) << "'IDType' does not have a sufficient precision to represent the indices of the input array. " << "The total element_num is " << element_num << ", but the selected IDType can only represent " << mxnet::common::MaxIntegerValue<IDType>() << " elements"; Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s); size_t temp_size = 0; // Temp space needed by the gpu-based full sorts. temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<int, int, xpu>(src.Size())); temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<int, DType, xpu>(src.Size())); temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<DType, int, xpu>(src.Size())); // Additional temp space for gpu full sorts for batch ids. temp_size += PadBytes(sizeof(int) * src.Size(), alignment); // Temp space for cpu sorts. temp_size = std::max(temp_size, static_cast<size_t>(sizeof(DType) * src.Size())); size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment) + PadBytes(sizeof(int) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { workspace_size += PadBytes(sizeof(int) * batch_size * k, alignment); } workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); char* workspace_curr_ptr = workspace.dptr_; sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); // contain sorted dat workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment); indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr), Shape1(src.Size()), s); // indices in the original matrix workspace_curr_ptr += PadBytes(sizeof(int) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { sel_indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr), Shape1(batch_size * k), s); workspace_curr_ptr += PadBytes(sizeof(int) * batch_size * k, alignment); CHECK_EQ(sel_indices.CheckContiguous(), true); } if (std::is_same<xpu, cpu>::value) { Tensor<xpu, 1, DType> flattened_data; if (do_transpose) { flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); workspace_curr_ptr += sizeof(DType) * src.Size(); flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); CHECK_EQ(flattened_data.CheckContiguous(), true); } else { flattened_data = src.FlatTo1D<xpu, DType>(s); } // `temp_workspace` stores the flattened data temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_), Shape1(sizeof(DType)*src.Size()), s); CHECK_EQ(temp_workspace.CheckContiguous(), true); } else { if (do_transpose) { sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); } else { sorted_dat = reshape(dat, Shape1(src.Size())); } CHECK_EQ(sorted_dat.CheckContiguous(), true); temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space workspace_curr_ptr += temp_size; } mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, 0, 1, kWriteTo, indices.dptr_); CHECK_EQ(indices.CheckContiguous(), true); // 2. Perform inplace batch sort. // After sorting, each batch in `sorted_dat` will be sorted in the corresponding order // up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat` // `temp_workspace` is used to store the flattend source data for CPU device, and it's used as // a temporal buffer for GPU device. TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s); // 3. Assign results to the ret blob // When returning indices, only update(modulo) required elements instead of full elements // to avoid redundant calculation. // Cast `ret_indices` from int to real_t could introduce conversion error when the element_num // is large enough. if (param.ret_typ == topk_enum::kReturnMask) { Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s); ret_mask = scalar<DType>(0); sel_indices = reshape(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), Shape1(batch_size * k)); if (do_transpose) { mxnet::TShape src_shape = src.shape_.FlatTo3D(axis); CHECK_EQ(sel_indices.CheckContiguous(), true); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } if (req[0] == kNullOp) { return; } else if (req[0] == kWriteTo) { mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, ret_mask.dptr_); } else { LOG(FATAL) << "req=" << req[0] << " is not supported yet."; } } else if (param.ret_typ == topk_enum::kReturnIndices) { if (do_transpose) { Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, IDType> ret_indices = ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } else { if (do_transpose) { Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s); Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_value, req[0], transpose( slice<2>(inplace_reshape(sorted_dat, Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)), 0, k), Shape3(0, 2, 1))); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, DType> ret_value = ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s); Tensor<xpu, 2, IDType> ret_indices = ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_value, req[0], slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k)); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } } template<typename xpu> void TopK(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnBoth) { MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { TopKImpl<xpu, DType, IDType>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); }) }); } else { MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { TopKImpl<xpu, DType, int>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); }); } } template<typename xpu> void Sort(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const SortParam& param = nnvm::get<SortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { TopKImpl<xpu, DType, int>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); }); } template<typename xpu> void ArgSort(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.dtype = param.dtype; topk_param.ret_typ = topk_enum::kReturnIndices; MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { TopKImpl<xpu, DType, IDType>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); }); }); } template<typename xpu, typename DType, typename IDType> void TopKBackwardImpl(const OpContext &ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs, const TopKParam& param) { CHECK_NE(req[0], kWriteInplace); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>(); CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth); int batch_size, element_num; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; int k = 0; mxnet::TShape target_shape; ParseTopKParam(outputs[0].shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>()) << "'IDType' does not have a sufficient precision to represent the indices of the input array. " << "The total element_num is " << element_num << ", but the selected IDType can only represent " << mxnet::common::MaxIntegerValue<IDType>() << " elements"; Tensor<xpu, 1, int> workspace = ctx.requested[0].get_space_typed<xpu, 1, int>(Shape1(batch_size * k + batch_size), s); Tensor<xpu, 1, int> sel_indices = Tensor<xpu, 1, int>(workspace.dptr_, Shape1(batch_size * k), s); Tensor<xpu, 1, int> batch_shift = Tensor<xpu, 1, int>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s); Tensor<xpu, 2, DType> out_grad = inputs[0].get_with_shape<xpu, 2, DType>(Shape2(inputs[0].shape_.Size(), 1), s); Tensor<xpu, 2, DType> in_grad = outputs[0].get_with_shape<xpu, 2, DType>(Shape2(outputs[0].shape_.Size(), 1), s); mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, 0, element_num, kWriteTo, batch_shift.dptr_); if (do_transpose) { Tensor<xpu, 1, IDType> indices = inputs[2].FlatTo1D<xpu, IDType>(s); mxnet::TShape src_shape = outputs[0].shape_.FlatTo3D(axis); sel_indices = reshape(transpose( broadcast_to(inplace_reshape(batch_shift, Shape3(src_shape[0], src_shape[2], 1)), mxnet::TShape(Shape3(src_shape[0], src_shape[2], k))), Shape3(0, 2, 1)), Shape1(batch_size * k)); sel_indices += tcast<int>(indices); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } else { Tensor<xpu, 2, IDType> indices = inputs[2].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); sel_indices = reshape(tcast<int>(indices) + broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)), mxnet::TShape(Shape2(batch_size, k))), Shape1(batch_size * k)); } CHECK_EQ(sel_indices.CheckContiguous(), true); if (kWriteTo == req[0] || kAddTo == req[0]) { if (kWriteTo == req[0]) { in_grad = scalar<DType>(0); } mxnet_op::Kernel<fill_ind, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, out_grad.dptr_, req[0], in_grad.dptr_); } else { LOG(FATAL) << "Not Implemented!"; } } template<typename xpu> void TopKBackward_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnBoth) { MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { TopKBackwardImpl<xpu, DType, IDType>(ctx, inputs, req, outputs, param); }); }); } else if (param.ret_typ == topk_enum::kReturnValue) { MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { TopKBackwardImpl<xpu, DType, int>(ctx, inputs, req, outputs, param); }); } else { LOG(FATAL) << "Not Implemented"; } } inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { return static_cast<uint32_t>(1); } else { return static_cast<uint32_t>(2); } } inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnBoth) { return static_cast<uint32_t>(2); } else { return static_cast<uint32_t>(1); } } inline bool TopKType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); int data_type = -1; size_t in_size = in_attrs->size(); size_t out_size = out_attrs->size(); CHECK_EQ(in_size, 1); CHECK(out_size == 1 || out_size == 2); if (out_size > 1) { if (param.ret_typ == topk_enum::kReturnValue) { CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32)) << "Failed to set the type of ret_indices."; } else { CHECK(type_assign(&(*out_attrs)[1], param.dtype)) << "Failed to set the type of ret_indices."; } } if (param.ret_typ == topk_enum::kReturnIndices) { CHECK(type_assign(&(*out_attrs)[0], param.dtype)) << "Failed to set the type of ret_indices."; } else { CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; if (data_type == -1) return false; } return true; } inline bool TopKShapeImpl(const TopKParam& param, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { CHECK_EQ(out_attrs->size(), 1U); } else { CHECK_EQ(out_attrs->size(), 2U); } mxnet::TShape& in_shape = (*in_attrs)[0]; int batch_size, element_num; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; int k = 0; mxnet::TShape target_shape; ParseTopKParam(in_shape, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); } else { SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape); } return true; } inline bool TopKShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); return TopKShapeImpl(param, in_attrs, out_attrs); } inline bool SortType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { int data_type = -1; size_t in_size = in_attrs->size(); size_t out_size = out_attrs->size(); CHECK_EQ(in_size, 1); CHECK_EQ(out_size, 2); CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32)) << "Failed to set the type of ret_indices to int32."; CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; if (data_type == -1) return false; return true; } inline bool SortShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SortParam& param = nnvm::get<SortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; return TopKShapeImpl(topk_param, in_attrs, out_attrs); } inline bool ArgSortType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); CHECK(type_assign(&(*out_attrs)[0], param.dtype)) << "Failed to set the type of ret_indices to int32."; return true; } inline bool ArgSortShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnIndices; return TopKShapeImpl(topk_param, in_attrs, out_attrs); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
GB_unaryop__lnot_uint32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint32_int64 // op(A') function: GB_tran__lnot_uint32_int64 // C type: uint32_t // A type: int64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint32_int64 ( uint32_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rawSHA256_ng_fmt_plug.c
/* * Copyright 2013, epixoip. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that redistribution of source * retains the above copyright. */ #include "arch.h" #if defined (__SSE2__) || defined (_MSC_VER) #if FMT_EXTERNS_H extern struct fmt_main fmt_rawSHA256_ng; #elif FMT_REGISTERS_H john_register_one(&fmt_rawSHA256_ng); #else #ifdef _OPENMP #include <omp.h> #if defined __XOP__ #define OMP_SCALE 512 /* AMD */ #else #define OMP_SCALE 512 /* Intel */ #endif #endif // These compilers claim to be __GNUC__ but warn on gcc pragmas. #if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__) && !defined(__llvm__) && !defined (_MSC_VER) #pragma GCC optimize 3 #endif //#define DEBUG #include <string.h> #include "stdint.h" #include <emmintrin.h> #if defined __XOP__ #include <x86intrin.h> #elif defined __SSE4_1__ #include <smmintrin.h> #elif defined __SSSE3__ #include <tmmintrin.h> #endif #include "common.h" #include "formats.h" #include "aligned.h" #include "rawSHA256_common.h" #include "memdbg.h" #if defined __XOP__ #define SIMD_TYPE "XOP" #elif defined __SSE4_1__ #define SIMD_TYPE "SSE4.1" #elif defined __SSSE3__ #define SIMD_TYPE "SSSE3" #else #define SIMD_TYPE "SSE2" #endif #define FORMAT_LABEL "Raw-SHA256-ng" #define FORMAT_NAME "" #define ALGORITHM_NAME "SHA256 128/128 " SIMD_TYPE " 4x" #define VWIDTH 4 #define NUMKEYS VWIDTH #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MAXLEN 55 #define CIPHERTEXT_LENGTH 64 #define DIGEST_SIZE 32 #define BINARY_SIZE 32 #define BINARY_ALIGN 4 #define SALT_SIZE 0 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT NUMKEYS #define MAX_KEYS_PER_CRYPT NUMKEYS #ifndef __XOP__ #define _mm_roti_epi32(x, n) \ ( \ _mm_xor_si128 ( \ _mm_srli_epi32(x, ~n + 1), \ _mm_slli_epi32(x, 32 + n) \ ) \ ) #define _mm_cmov_si128(y, z, x) \ ( \ _mm_xor_si128 (z, \ _mm_and_si128 (x, \ _mm_xor_si128 (y, z) \ ) \ ) \ ) #endif #ifdef __SSSE3__ #define SWAP_ENDIAN(n) \ { \ n = _mm_shuffle_epi8 (n, \ _mm_set_epi32 (0x0c0d0e0f, 0x08090a0b, \ 0x04050607, 0x00010203 \ ) \ ); \ } #else #define ROT16(n) \ ( \ _mm_shufflelo_epi16 ( \ _mm_shufflehi_epi16 (n, 0xb1), 0xb1 \ ) \ ) #define SWAP_ENDIAN(n) \ ( \ n = _mm_xor_si128 ( \ _mm_srli_epi16 (ROT16(n), 8), \ _mm_slli_epi16 (ROT16(n), 8) \ ) \ ) #endif #ifdef __SSE4_1__ #define GATHER(x, y, z) \ { \ x = _mm_cvtsi32_si128 ( y[index][z] ); \ x = _mm_insert_epi32 (x, y[index + 1][z], 1); \ x = _mm_insert_epi32 (x, y[index + 2][z], 2); \ x = _mm_insert_epi32 (x, y[index + 3][z], 3); \ } #endif #define S0(x) \ ( \ _mm_xor_si128 ( \ _mm_roti_epi32 (x, -22), \ _mm_xor_si128 ( \ _mm_roti_epi32 (x, -2), \ _mm_roti_epi32 (x, -13) \ ) \ ) \ ) #define S1(x) \ ( \ _mm_xor_si128 ( \ _mm_roti_epi32 (x, -25), \ _mm_xor_si128 ( \ _mm_roti_epi32 (x, -6), \ _mm_roti_epi32 (x, -11) \ ) \ ) \ ) #define s0(x) \ ( \ _mm_xor_si128 ( \ _mm_srli_epi32 (x, 3), \ _mm_xor_si128 ( \ _mm_roti_epi32 (x, -7), \ _mm_roti_epi32 (x, -18) \ ) \ ) \ ) #define s1(x) \ ( \ _mm_xor_si128 ( \ _mm_srli_epi32 (x, 10), \ _mm_xor_si128 ( \ _mm_roti_epi32 (x, -17), \ _mm_roti_epi32 (x, -19) \ ) \ ) \ ) #define Maj(x,y,z) _mm_cmov_si128 (x, y, _mm_xor_si128 (z, y)) #define Ch(x,y,z) _mm_cmov_si128 (y, z, x) #define R(t) \ { \ w[t] = _mm_add_epi32 (s1(w[t - 2]), w[t - 7]); \ w[t] = _mm_add_epi32 (s0(w[t - 15]), w[t]); \ w[t] = _mm_add_epi32 ( w[t - 16], w[t]); \ } #define SHA256_STEP(a,b,c,d,e,f,g,h,x,K) \ { \ if (x > 15) R(x); \ tmp1 = _mm_add_epi32 (h, S1(e)); \ tmp1 = _mm_add_epi32 (tmp1, Ch(e,f,g)); \ tmp1 = _mm_add_epi32 (tmp1, _mm_set1_epi32(K)); \ tmp1 = _mm_add_epi32 (tmp1, w[x]); \ tmp2 = _mm_add_epi32 (S0(a),Maj(a,b,c)); \ d = _mm_add_epi32 (tmp1, d); \ h = _mm_add_epi32 (tmp1, tmp2); \ } static struct fmt_tests tests[] = { {"71c3f65d17745f05235570f1799d75e69795d469d9fcb83e326f82f1afa80dea", "epixoip"}, {HEX_TAG "71c3f65d17745f05235570f1799d75e69795d469d9fcb83e326f82f1afa80dea", "epixoip"}, {"25b64f637b373d33a8aa2b7579784e99a20e6b7dfea99a71af124394b8958f27", "doesthiswork"}, {"5e884898da28047151d0e56f8dc6292773603d0d6aabbdd62a11ef721d1542d8", "password"}, {"27c6794c8aa2f70f5f6dc93d3bfb25ca6de9b0752c8318614cbd4ad203bea24c", "ALLCAPS"}, {"04cdd6c523673bf448efe055711a9b184817d7843b0a76c2046f5398b5854152", "TestTESTt3st"}, {HEX_TAG "ef797c8118f02dfb649607dd5d3f8c7623048c9c063d532cc95c5ed7a898a64f", "12345678"}, {HEX_TAG "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", ""}, {HEX_TAG "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", ""}, {"LcV6aBcc/53FoCJjXQMd7rBUDEpeevrK8V5jQVoJEhU", "password"}, {"$cisco4$LcV6aBcc/53FoCJjXQMd7rBUDEpeevrK8V5jQVoJEhU", "password"}, {"a49c2c9d0c006c8cb55a9a7a38822b83e0cd442614cb416af952fa50156761dc", "openwall"}, {"9e7d3e56996c5a06a6a378567e62f5aa7138ebb0f55c0bdaf73666bf77f73380", "mot\xf6rhead"}, {"1b4f0e9851971998e732078544c96b36c3d01cedf7caa332359d6f1d83567014", "test1"}, {"fd61a03af4f77d870fc21e05e7e80678095c92d808cfb3b5c279ee04c74aca13", "test3"}, {"d150eb0383c8ef7478248d7e6cf18db333e8753d05e15a8a83714b7cf63922b3", "thatsworking"}, #ifdef DEBUG {"c775e7b757ede630cd0aa1113bd102661ab38829ca52a6422ab782862f268646", "1234567890"}, {"6ed645ef0e1abea1bf1e4e935ff04f9e18d39812387f63cda3415b46240f0405", "12345678901234567890"}, {"f54e5c8f810648e7638d25eb7ed6d24b7e5999d588e88826f2aa837d2ee52ecd", "123456789012345678901234567890"}, {"a4ebdd541454b84cc670c9f1f5508baf67ffd3fe59b883267808781f992a0b1d", "1234567890123456789012345678901234567890"}, {"f58fffba129aa67ec63bf12571a42977c0b785d3b2a93cc0538557c91da2115d", "12345678901234567890123456789012345678901234567890"}, {"3874d5c9cc5ab726e6bbebadee22c680ce530004d4f0bb32f765d42a0a6c6dc1", "123456789012345678901234567890123456789012345678901"}, {"03c3a70e99ed5eeccd80f73771fcf1ece643d939d9ecc76f25544b0233f708e9", "1234567890123456789012345678901234567890123456789012345"}, {"0f46e4b0802fee6fed599682a16287d0397699cfd742025482c086a70979e56a", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}, // 31 {"c62e4615bd39e222572f3a1bf7c2132ea1e65b17ec805047bd6b2842c593493f", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}, // 32 {"d5e285683cd4efc02d021a5c62014694958901005d6f71e89e0989fac77e4072", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}, // 55 {"$cisco4$OsOmQzwozC4ROs/CzpczJoShdCeW9lp7k/tGrPS5Kog", "1"}, {"$cisco4$d7kgbEk.P6mpKdduC66fUy1BF0MImo3eyJ9uI/JbMRk", "openwall"}, {"$cisco4$p5BSCWNS3ivUDpZlWthR.k4Q/xWqlFyEqXdaPikHenI", "2"}, {"$cisco4$HwUf7ev9Fx84X2vvspULAeDbmwlg9jgm/Wk63kc3vfU", "11"}, {"$cisco4$bsPEUMVATKKO9yeUlJfE3OCzHlgf0s6goJpg3P1k0UU", "test"}, {"$cisco4$hUsuWZSE8dZERUBYNwRK8Aa8VxEGIHsuZFUCjNj2.Ac", "verylongbutweakpassword"}, {"$cisco4$fLUL1VG98zYDf9Q.M40nZ5blVT3M6UBex74Blw.UDCc", "thismaximumpasswordlength"}, {"$cisco4$Xq81UiuCj7bz9B..EX2BZumsU/d8pF5gs2NlRMW6sTk", "applesucks"}, {"$cisco4$O/D/cn1nawcByQoJfBxrNnUx6jjfWV.FNFx5TzmzihU", "AppleSucks"}, #endif {NULL} }; static uint32_t (*saved_key)[64]; static uint32_t *crypt_key[ 8]; static void init(struct fmt_main *self) { int i; #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); for (i = 0; i < 8; i++) crypt_key[i] = mem_calloc_tiny(sizeof(uint32_t) * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); } static void *get_binary (char *ciphertext) { static unsigned char *out; int i; if (!out) out = mem_alloc_tiny (DIGEST_SIZE, MEM_ALIGN_WORD); ciphertext += HEX_TAG_LEN; for(i=0; i < BINARY_SIZE; i++) out[i] = atoi16[ARCH_INDEX(ciphertext[i*2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i*2 + 1])]; alter_endianity (out, DIGEST_SIZE); return (void *) out; } static int get_hash_0 (int index) { return crypt_key[0][index] & 0xf; } static int get_hash_1 (int index) { return crypt_key[0][index] & 0xff; } static int get_hash_2 (int index) { return crypt_key[0][index] & 0xfff; } static int get_hash_3 (int index) { return crypt_key[0][index] & 0xffff; } static int get_hash_4 (int index) { return crypt_key[0][index] & 0xfffff; } static int get_hash_5 (int index) { return crypt_key[0][index] & 0xffffff; } static int get_hash_6 (int index) { return crypt_key[0][index] & 0x7ffffff; } static void set_key (char *key, int index) { uint32_t *buf32 = (uint32_t *) &saved_key[index]; uint8_t *buf8 = (uint8_t *) buf32; int len = 0; while (*key) buf8[len++] = *key++; buf32[15] = len << 3; buf8[len++] = 0x80; while (buf8[len] && len <= MAXLEN) buf8[len++] = 0; } static char *get_key (int index) { uint32_t *buf = (uint32_t *) &saved_key[index]; static char out[MAXLEN + 1]; int len = buf[15] >> 3; memset (out, 0, MAXLEN + 1); memcpy (out, buf, len); return (char *) out; } #if FMT_MAIN_VERSION > 10 static int crypt_all (int *pcount, struct db_salt *salt) #else static void crypt_all (int count) #endif { #if FMT_MAIN_VERSION > 10 int count = *pcount; #endif int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += VWIDTH) #endif { __m128i a, b, c, d, e, f, g, h; __m128i w[64], tmp1, tmp2; int i; #ifdef __SSE4_1__ for (i=0; i < 16; i++) GATHER (w[i], saved_key, i); for (i=0; i < 15; i++) SWAP_ENDIAN (w[i]); #else JTR_ALIGN(16) uint32_t __w[16][VWIDTH]; int j; for (i=0; i < VWIDTH; i++) for (j=0; j < 16; j++) __w[j][i] = saved_key[index + i][j]; for (i=0; i < 15; i++) { w[i] = _mm_load_si128 ((__m128i *) __w[i]); SWAP_ENDIAN (w[i]); } w[15] = _mm_load_si128 ((__m128i *) __w[15]); #endif a = _mm_set1_epi32 (0x6a09e667); b = _mm_set1_epi32 (0xbb67ae85); c = _mm_set1_epi32 (0x3c6ef372); d = _mm_set1_epi32 (0xa54ff53a); e = _mm_set1_epi32 (0x510e527f); f = _mm_set1_epi32 (0x9b05688c); g = _mm_set1_epi32 (0x1f83d9ab); h = _mm_set1_epi32 (0x5be0cd19); SHA256_STEP(a, b, c, d, e, f, g, h, 0, 0x428a2f98); SHA256_STEP(h, a, b, c, d, e, f, g, 1, 0x71374491); SHA256_STEP(g, h, a, b, c, d, e, f, 2, 0xb5c0fbcf); SHA256_STEP(f, g, h, a, b, c, d, e, 3, 0xe9b5dba5); SHA256_STEP(e, f, g, h, a, b, c, d, 4, 0x3956c25b); SHA256_STEP(d, e, f, g, h, a, b, c, 5, 0x59f111f1); SHA256_STEP(c, d, e, f, g, h, a, b, 6, 0x923f82a4); SHA256_STEP(b, c, d, e, f, g, h, a, 7, 0xab1c5ed5); SHA256_STEP(a, b, c, d, e, f, g, h, 8, 0xd807aa98); SHA256_STEP(h, a, b, c, d, e, f, g, 9, 0x12835b01); SHA256_STEP(g, h, a, b, c, d, e, f, 10, 0x243185be); SHA256_STEP(f, g, h, a, b, c, d, e, 11, 0x550c7dc3); SHA256_STEP(e, f, g, h, a, b, c, d, 12, 0x72be5d74); SHA256_STEP(d, e, f, g, h, a, b, c, 13, 0x80deb1fe); SHA256_STEP(c, d, e, f, g, h, a, b, 14, 0x9bdc06a7); SHA256_STEP(b, c, d, e, f, g, h, a, 15, 0xc19bf174); SHA256_STEP(a, b, c, d, e, f, g, h, 16, 0xe49b69c1); SHA256_STEP(h, a, b, c, d, e, f, g, 17, 0xefbe4786); SHA256_STEP(g, h, a, b, c, d, e, f, 18, 0x0fc19dc6); SHA256_STEP(f, g, h, a, b, c, d, e, 19, 0x240ca1cc); SHA256_STEP(e, f, g, h, a, b, c, d, 20, 0x2de92c6f); SHA256_STEP(d, e, f, g, h, a, b, c, 21, 0x4a7484aa); SHA256_STEP(c, d, e, f, g, h, a, b, 22, 0x5cb0a9dc); SHA256_STEP(b, c, d, e, f, g, h, a, 23, 0x76f988da); SHA256_STEP(a, b, c, d, e, f, g, h, 24, 0x983e5152); SHA256_STEP(h, a, b, c, d, e, f, g, 25, 0xa831c66d); SHA256_STEP(g, h, a, b, c, d, e, f, 26, 0xb00327c8); SHA256_STEP(f, g, h, a, b, c, d, e, 27, 0xbf597fc7); SHA256_STEP(e, f, g, h, a, b, c, d, 28, 0xc6e00bf3); SHA256_STEP(d, e, f, g, h, a, b, c, 29, 0xd5a79147); SHA256_STEP(c, d, e, f, g, h, a, b, 30, 0x06ca6351); SHA256_STEP(b, c, d, e, f, g, h, a, 31, 0x14292967); SHA256_STEP(a, b, c, d, e, f, g, h, 32, 0x27b70a85); SHA256_STEP(h, a, b, c, d, e, f, g, 33, 0x2e1b2138); SHA256_STEP(g, h, a, b, c, d, e, f, 34, 0x4d2c6dfc); SHA256_STEP(f, g, h, a, b, c, d, e, 35, 0x53380d13); SHA256_STEP(e, f, g, h, a, b, c, d, 36, 0x650a7354); SHA256_STEP(d, e, f, g, h, a, b, c, 37, 0x766a0abb); SHA256_STEP(c, d, e, f, g, h, a, b, 38, 0x81c2c92e); SHA256_STEP(b, c, d, e, f, g, h, a, 39, 0x92722c85); SHA256_STEP(a, b, c, d, e, f, g, h, 40, 0xa2bfe8a1); SHA256_STEP(h, a, b, c, d, e, f, g, 41, 0xa81a664b); SHA256_STEP(g, h, a, b, c, d, e, f, 42, 0xc24b8b70); SHA256_STEP(f, g, h, a, b, c, d, e, 43, 0xc76c51a3); SHA256_STEP(e, f, g, h, a, b, c, d, 44, 0xd192e819); SHA256_STEP(d, e, f, g, h, a, b, c, 45, 0xd6990624); SHA256_STEP(c, d, e, f, g, h, a, b, 46, 0xf40e3585); SHA256_STEP(b, c, d, e, f, g, h, a, 47, 0x106aa070); SHA256_STEP(a, b, c, d, e, f, g, h, 48, 0x19a4c116); SHA256_STEP(h, a, b, c, d, e, f, g, 49, 0x1e376c08); SHA256_STEP(g, h, a, b, c, d, e, f, 50, 0x2748774c); SHA256_STEP(f, g, h, a, b, c, d, e, 51, 0x34b0bcb5); SHA256_STEP(e, f, g, h, a, b, c, d, 52, 0x391c0cb3); SHA256_STEP(d, e, f, g, h, a, b, c, 53, 0x4ed8aa4a); SHA256_STEP(c, d, e, f, g, h, a, b, 54, 0x5b9cca4f); SHA256_STEP(b, c, d, e, f, g, h, a, 55, 0x682e6ff3); SHA256_STEP(a, b, c, d, e, f, g, h, 56, 0x748f82ee); SHA256_STEP(h, a, b, c, d, e, f, g, 57, 0x78a5636f); SHA256_STEP(g, h, a, b, c, d, e, f, 58, 0x84c87814); SHA256_STEP(f, g, h, a, b, c, d, e, 59, 0x8cc70208); SHA256_STEP(e, f, g, h, a, b, c, d, 60, 0x90befffa); SHA256_STEP(d, e, f, g, h, a, b, c, 61, 0xa4506ceb); SHA256_STEP(c, d, e, f, g, h, a, b, 62, 0xbef9a3f7); SHA256_STEP(b, c, d, e, f, g, h, a, 63, 0xc67178f2); a = _mm_add_epi32 (a, _mm_set1_epi32 (0x6a09e667)); b = _mm_add_epi32 (b, _mm_set1_epi32 (0xbb67ae85)); c = _mm_add_epi32 (c, _mm_set1_epi32 (0x3c6ef372)); d = _mm_add_epi32 (d, _mm_set1_epi32 (0xa54ff53a)); e = _mm_add_epi32 (e, _mm_set1_epi32 (0x510e527f)); f = _mm_add_epi32 (f, _mm_set1_epi32 (0x9b05688c)); g = _mm_add_epi32 (g, _mm_set1_epi32 (0x1f83d9ab)); h = _mm_add_epi32 (h, _mm_set1_epi32 (0x5be0cd19)); _mm_store_si128 ((__m128i *) &crypt_key[0][index], a); _mm_store_si128 ((__m128i *) &crypt_key[1][index], b); _mm_store_si128 ((__m128i *) &crypt_key[2][index], c); _mm_store_si128 ((__m128i *) &crypt_key[3][index], d); _mm_store_si128 ((__m128i *) &crypt_key[4][index], e); _mm_store_si128 ((__m128i *) &crypt_key[5][index], f); _mm_store_si128 ((__m128i *) &crypt_key[6][index], g); _mm_store_si128 ((__m128i *) &crypt_key[7][index], h); } #if FMT_MAIN_VERSION > 10 return count; #endif } static int cmp_all (void *binary, int count) { #ifdef _OPENMP int i; for (i = 0; i < count; i++) if (((uint32_t *) binary)[0] == crypt_key[0][i]) return 1; return 0; #else static const __m128i zero = {0}; __m128i tmp; __m128i bin; __m128i digest; digest = _mm_load_si128 ((__m128i *) crypt_key[0]); bin = _mm_set1_epi32 (((uint32_t *) binary)[0]); tmp = _mm_cmpeq_epi32 (bin, digest); return _mm_movemask_epi8 (_mm_cmpeq_epi32 (tmp, zero)) != 0xffff; #endif } static int cmp_one (void *binary, int index) { int i; for (i = 0; i < 8; i++) if (((uint32_t *) binary)[i] != crypt_key[i][index]) return 0; return 1; } static int cmp_exact (char *source, int index) { return 1; } struct fmt_main fmt_rawSHA256_ng = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, MAXLEN, BINARY_SIZE, #if FMT_MAIN_VERSION > 9 BINARY_ALIGN, #endif SALT_SIZE, #if FMT_MAIN_VERSION > 9 SALT_ALIGN, #endif MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, #if FMT_MAIN_VERSION > 10 fmt_default_done, fmt_default_reset, #endif prepare, valid, split, get_binary, fmt_default_salt, #if FMT_MAIN_VERSION > 9 #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, #endif { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* __SSE2__ */
truncate.c
/* Generated by Cython 0.29.21 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [ "/cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/core/include/numpy/arrayobject.h", "/cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/core/include/numpy/ufuncobject.h", "draco/util/truncate.hpp" ], "extra_compile_args": [ "-fopenmp" ], "extra_link_args": [ "-fopenmp" ], "include_dirs": [ "draco/util", "/cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/core/include" ], "name": "draco.util.truncate", "sources": [ "draco/util/truncate.pyx" ] }, "module_name": "draco.util.truncate" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_21" #define CYTHON_HEX_VERSION 0x001D15F0 #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if PY_VERSION_HEX >= 0x030900A4 #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) #else #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__draco__util__truncate #define __PYX_HAVE_API__draco__util__truncate /* Early includes */ #include <string.h> #include <stdio.h> #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" /* NumPy API declarations from "numpy/__init__.pxd" */ #include "truncate.hpp" #include "pythread.h" #include <stdlib.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* Header.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "draco/util/truncate.pyx", "__init__.pxd", "stringsource", "type.pxd", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":689 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":690 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":691 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":692 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":696 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":697 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":698 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":699 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":703 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":704 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":713 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":714 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":715 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":717 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":718 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":719 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":721 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":722 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":724 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":725 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":726 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":728 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":729 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":730 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":732 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* PyIntCompare.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_NeObjC(PyObject *op1, PyObject *op2, long intval, long inplace); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* None.proto */ static CYTHON_INLINE long __Pyx_div_long(long, long); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* TypeImport.proto */ #ifndef __PYX_HAVE_RT_ImportType_proto #define __PYX_HAVE_RT_ImportType_proto enum __Pyx_ImportType_CheckSize { __Pyx_ImportType_CheckSize_Error = 0, __Pyx_ImportType_CheckSize_Warn = 1, __Pyx_ImportType_CheckSize_Ignore = 2 }; static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); #endif /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_float(PyObject *, int writable_flag); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_float(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_float(const char *itemp, PyObject *obj); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* RealImag.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX\ && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_float(a, b) ((a)==(b)) #define __Pyx_c_sum_float(a, b) ((a)+(b)) #define __Pyx_c_diff_float(a, b) ((a)-(b)) #define __Pyx_c_prod_float(a, b) ((a)*(b)) #define __Pyx_c_quot_float(a, b) ((a)/(b)) #define __Pyx_c_neg_float(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_float(z) ((z)==(float)0) #define __Pyx_c_conj_float(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_float(z) (::std::abs(z)) #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_float(z) ((z)==0) #define __Pyx_c_conj_float(z) (conjf(z)) #if 1 #define __Pyx_c_abs_float(z) (cabsf(z)) #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_double(a, b) ((a)==(b)) #define __Pyx_c_sum_double(a, b) ((a)+(b)) #define __Pyx_c_diff_double(a, b) ((a)-(b)) #define __Pyx_c_prod_double(a, b) ((a)*(b)) #define __Pyx_c_quot_double(a, b) ((a)/(b)) #define __Pyx_c_neg_double(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_double(z) ((z)==(double)0) #define __Pyx_c_conj_double(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_double(z) (::std::abs(z)) #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_double(z) ((z)==0) #define __Pyx_c_conj_double(z) (conj(z)) #if 1 #define __Pyx_c_abs_double(z) (cabs(z)) #define __Pyx_c_pow_double(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cython.view' */ /* Module declarations from 'cython' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; /* Module declarations from 'draco.util.truncate' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static CYTHON_INLINE PyObject *__Pyx_carray_to_py_Py_ssize_t(Py_ssize_t *, Py_ssize_t); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_carray_to_tuple_Py_ssize_t(Py_ssize_t *, Py_ssize_t); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "draco.util.truncate" extern int __pyx_module_is_main_draco__util__truncate; int __pyx_module_is_main_draco__util__truncate = 0; /* Implementation of 'draco.util.truncate' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ImportError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_i[] = "i"; static const char __pyx_k_n[] = "n"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_err[] = "err"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_val[] = "val"; static const char __pyx_k_wgt[] = "wgt"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_prec[] = "prec"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_asarray[] = "asarray"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_fallback[] = "fallback"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_ImportError[] = "ImportError"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_bit_truncate[] = "bit_truncate"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_bit_truncate_fixed[] = "bit_truncate_fixed"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_draco_util_truncate[] = "draco.util.truncate"; static const char __pyx_k_bit_truncate_weights[] = "bit_truncate_weights"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_draco_truncation_utils[] = "draco truncation utils"; static const char __pyx_k_Input_array_must_be_1_d[] = "Input array must be 1-d."; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_draco_util_truncate_pyx[] = "draco/util/truncate.pyx"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_Weight_and_value_arrays_must_hav[] = "Weight and value arrays must have same shape ({:d} != {:d})"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Input_array_must_be_1_d; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_kp_s_Weight_and_value_arrays_must_hav; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_asarray; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_bit_truncate; static PyObject *__pyx_n_s_bit_truncate_fixed; static PyObject *__pyx_n_s_bit_truncate_weights; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_draco_util_truncate; static PyObject *__pyx_kp_s_draco_util_truncate_pyx; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_err; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_fallback; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_n; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_prec; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_val; static PyObject *__pyx_n_s_wgt; static PyObject *__pyx_pf_5draco_4util_8truncate_bit_truncate(CYTHON_UNUSED PyObject *__pyx_self, float __pyx_v_val, float __pyx_v_err); /* proto */ static PyObject *__pyx_pf_5draco_4util_8truncate_2bit_truncate_weights(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_val, __Pyx_memviewslice __pyx_v_wgt, float __pyx_v_fallback); /* proto */ static PyObject *__pyx_pf_5draco_4util_8truncate_4bit_truncate_fixed(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_val, float __pyx_v_prec); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__18; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__26; static PyObject *__pyx_tuple__28; static PyObject *__pyx_tuple__29; static PyObject *__pyx_tuple__30; static PyObject *__pyx_tuple__31; static PyObject *__pyx_tuple__32; static PyObject *__pyx_tuple__33; static PyObject *__pyx_codeobj__23; static PyObject *__pyx_codeobj__25; static PyObject *__pyx_codeobj__27; static PyObject *__pyx_codeobj__34; /* Late includes */ /* "draco/util/truncate.pyx":12 * inline float bit_truncate_float(float val, float err) nogil * * def bit_truncate(float val, float err): # <<<<<<<<<<<<<< * return bit_truncate_float(val, err) * */ /* Python wrapper */ static PyObject *__pyx_pw_5draco_4util_8truncate_1bit_truncate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5draco_4util_8truncate_1bit_truncate = {"bit_truncate", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5draco_4util_8truncate_1bit_truncate, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5draco_4util_8truncate_1bit_truncate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { float __pyx_v_val; float __pyx_v_err; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bit_truncate (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_val,&__pyx_n_s_err,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_val)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_err)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bit_truncate", 1, 2, 2, 1); __PYX_ERR(0, 12, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bit_truncate") < 0)) __PYX_ERR(0, 12, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_val = __pyx_PyFloat_AsFloat(values[0]); if (unlikely((__pyx_v_val == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 12, __pyx_L3_error) __pyx_v_err = __pyx_PyFloat_AsFloat(values[1]); if (unlikely((__pyx_v_err == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 12, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bit_truncate", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 12, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("draco.util.truncate.bit_truncate", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5draco_4util_8truncate_bit_truncate(__pyx_self, __pyx_v_val, __pyx_v_err); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5draco_4util_8truncate_bit_truncate(CYTHON_UNUSED PyObject *__pyx_self, float __pyx_v_val, float __pyx_v_err) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bit_truncate", 0); /* "draco/util/truncate.pyx":13 * * def bit_truncate(float val, float err): * return bit_truncate_float(val, err) # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(bit_truncate_float(__pyx_v_val, __pyx_v_err)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "draco/util/truncate.pyx":12 * inline float bit_truncate_float(float val, float err) nogil * * def bit_truncate(float val, float err): # <<<<<<<<<<<<<< * return bit_truncate_float(val, err) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("draco.util.truncate.bit_truncate", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "draco/util/truncate.pyx":17 * @cython.boundscheck(False) * @cython.wraparound(False) * def bit_truncate_weights(float[:] val, float[:] wgt, float fallback): # <<<<<<<<<<<<<< * cdef int n = val.shape[0] * if val.ndim != 1: */ /* Python wrapper */ static PyObject *__pyx_pw_5draco_4util_8truncate_3bit_truncate_weights(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5draco_4util_8truncate_3bit_truncate_weights = {"bit_truncate_weights", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5draco_4util_8truncate_3bit_truncate_weights, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5draco_4util_8truncate_3bit_truncate_weights(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_val = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_wgt = { 0, 0, { 0 }, { 0 }, { 0 } }; float __pyx_v_fallback; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bit_truncate_weights (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_val,&__pyx_n_s_wgt,&__pyx_n_s_fallback,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_val)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wgt)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bit_truncate_weights", 1, 3, 3, 1); __PYX_ERR(0, 17, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_fallback)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bit_truncate_weights", 1, 3, 3, 2); __PYX_ERR(0, 17, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bit_truncate_weights") < 0)) __PYX_ERR(0, 17, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_val = __Pyx_PyObject_to_MemoryviewSlice_ds_float(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_val.memview)) __PYX_ERR(0, 17, __pyx_L3_error) __pyx_v_wgt = __Pyx_PyObject_to_MemoryviewSlice_ds_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wgt.memview)) __PYX_ERR(0, 17, __pyx_L3_error) __pyx_v_fallback = __pyx_PyFloat_AsFloat(values[2]); if (unlikely((__pyx_v_fallback == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 17, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bit_truncate_weights", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 17, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("draco.util.truncate.bit_truncate_weights", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5draco_4util_8truncate_2bit_truncate_weights(__pyx_self, __pyx_v_val, __pyx_v_wgt, __pyx_v_fallback); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5draco_4util_8truncate_2bit_truncate_weights(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_val, __Pyx_memviewslice __pyx_v_wgt, float __pyx_v_fallback) { int __pyx_v_n; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; double __pyx_t_13; Py_ssize_t __pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bit_truncate_weights", 0); /* "draco/util/truncate.pyx":18 * @cython.wraparound(False) * def bit_truncate_weights(float[:] val, float[:] wgt, float fallback): * cdef int n = val.shape[0] # <<<<<<<<<<<<<< * if val.ndim != 1: * raise ValueError("Input array must be 1-d.") */ __pyx_v_n = (__pyx_v_val.shape[0]); /* "draco/util/truncate.pyx":19 * def bit_truncate_weights(float[:] val, float[:] wgt, float fallback): * cdef int n = val.shape[0] * if val.ndim != 1: # <<<<<<<<<<<<<< * raise ValueError("Input array must be 1-d.") * if wgt.shape[0] != n: */ __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_val, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_NeObjC(__pyx_t_2, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 19, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(__pyx_t_3)) { /* "draco/util/truncate.pyx":20 * cdef int n = val.shape[0] * if val.ndim != 1: * raise ValueError("Input array must be 1-d.") # <<<<<<<<<<<<<< * if wgt.shape[0] != n: * raise ValueError( */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 20, __pyx_L1_error) /* "draco/util/truncate.pyx":19 * def bit_truncate_weights(float[:] val, float[:] wgt, float fallback): * cdef int n = val.shape[0] * if val.ndim != 1: # <<<<<<<<<<<<<< * raise ValueError("Input array must be 1-d.") * if wgt.shape[0] != n: */ } /* "draco/util/truncate.pyx":21 * if val.ndim != 1: * raise ValueError("Input array must be 1-d.") * if wgt.shape[0] != n: # <<<<<<<<<<<<<< * raise ValueError( * "Weight and value arrays must have same " */ __pyx_t_3 = (((__pyx_v_wgt.shape[0]) != __pyx_v_n) != 0); if (unlikely(__pyx_t_3)) { /* "draco/util/truncate.pyx":24 * raise ValueError( * "Weight and value arrays must have same " * "shape ({:d} != {:d})".format(wgt.shape, n) # <<<<<<<<<<<<<< * ) * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_Weight_and_value_arrays_must_hav, __pyx_n_s_format); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_carray_to_py_Py_ssize_t(__pyx_v_wgt.shape, 8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_n); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_t_4, __pyx_t_5}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_t_4, __pyx_t_5}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; } __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_t_5); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "draco/util/truncate.pyx":22 * raise ValueError("Input array must be 1-d.") * if wgt.shape[0] != n: * raise ValueError( # <<<<<<<<<<<<<< * "Weight and value arrays must have same " * "shape ({:d} != {:d})".format(wgt.shape, n) */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 22, __pyx_L1_error) /* "draco/util/truncate.pyx":21 * if val.ndim != 1: * raise ValueError("Input array must be 1-d.") * if wgt.shape[0] != n: # <<<<<<<<<<<<<< * raise ValueError( * "Weight and value arrays must have same " */ } /* "draco/util/truncate.pyx":27 * ) * * cdef int i = 0 # <<<<<<<<<<<<<< * * for i in prange(n, nogil=True): */ __pyx_v_i = 0; /* "draco/util/truncate.pyx":29 * cdef int i = 0 * * for i in prange(n, nogil=True): # <<<<<<<<<<<<<< * if wgt[i] != 0: * val[i] = bit_truncate_float(val[i], 1. / wgt[i]**0.5) */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_7 = __pyx_v_n; if ((1 == 0)) abort(); { int __pyx_parallel_temp0 = ((int)0xbad0bad0); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_10 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_10 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_3) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_10; __pyx_t_9++){ if (__pyx_parallel_why < 2) { __pyx_v_i = (int)(0 + 1 * __pyx_t_9); /* "draco/util/truncate.pyx":30 * * for i in prange(n, nogil=True): * if wgt[i] != 0: # <<<<<<<<<<<<<< * val[i] = bit_truncate_float(val[i], 1. / wgt[i]**0.5) * else: */ __pyx_t_11 = __pyx_v_i; __pyx_t_3 = (((*((float *) ( /* dim=0 */ (__pyx_v_wgt.data + __pyx_t_11 * __pyx_v_wgt.strides[0]) ))) != 0.0) != 0); if (__pyx_t_3) { /* "draco/util/truncate.pyx":31 * for i in prange(n, nogil=True): * if wgt[i] != 0: * val[i] = bit_truncate_float(val[i], 1. / wgt[i]**0.5) # <<<<<<<<<<<<<< * else: * val[i] = bit_truncate_float(val[i], fallback * val[i]) */ __pyx_t_11 = __pyx_v_i; __pyx_t_12 = __pyx_v_i; __pyx_t_13 = pow(((double)(*((float *) ( /* dim=0 */ (__pyx_v_wgt.data + __pyx_t_12 * __pyx_v_wgt.strides[0]) )))), 0.5); if (unlikely(__pyx_t_13 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif __PYX_ERR(0, 31, __pyx_L10_error) } __pyx_t_12 = __pyx_v_i; *((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_12 * __pyx_v_val.strides[0]) )) = bit_truncate_float((*((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_11 * __pyx_v_val.strides[0]) ))), (1. / __pyx_t_13)); /* "draco/util/truncate.pyx":30 * * for i in prange(n, nogil=True): * if wgt[i] != 0: # <<<<<<<<<<<<<< * val[i] = bit_truncate_float(val[i], 1. / wgt[i]**0.5) * else: */ goto __pyx_L12; } /* "draco/util/truncate.pyx":33 * val[i] = bit_truncate_float(val[i], 1. / wgt[i]**0.5) * else: * val[i] = bit_truncate_float(val[i], fallback * val[i]) # <<<<<<<<<<<<<< * * return np.asarray(val) */ /*else*/ { __pyx_t_11 = __pyx_v_i; __pyx_t_12 = __pyx_v_i; __pyx_t_14 = __pyx_v_i; *((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_14 * __pyx_v_val.strides[0]) )) = bit_truncate_float((*((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_11 * __pyx_v_val.strides[0]) ))), (__pyx_v_fallback * (*((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_12 * __pyx_v_val.strides[0]) ))))); } __pyx_L12:; goto __pyx_L14; __pyx_L10_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L13; __pyx_L13:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates0) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_i; } __pyx_L14:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_i = __pyx_parallel_temp0; switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L6_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "draco/util/truncate.pyx":29 * cdef int i = 0 * * for i in prange(n, nogil=True): # <<<<<<<<<<<<<< * if wgt[i] != 0: * val[i] = bit_truncate_float(val[i], 1. / wgt[i]**0.5) */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L7; } __pyx_L6_error: { #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L7:; } } /* "draco/util/truncate.pyx":35 * val[i] = bit_truncate_float(val[i], fallback * val[i]) * * return np.asarray(val) # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 35, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_asarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 35, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_val, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 35, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_8))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_8); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_8, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_8, __pyx_t_5, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 35, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "draco/util/truncate.pyx":17 * @cython.boundscheck(False) * @cython.wraparound(False) * def bit_truncate_weights(float[:] val, float[:] wgt, float fallback): # <<<<<<<<<<<<<< * cdef int n = val.shape[0] * if val.ndim != 1: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("draco.util.truncate.bit_truncate_weights", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_val, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_wgt, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "draco/util/truncate.pyx":39 * @cython.boundscheck(False) * @cython.wraparound(False) * def bit_truncate_fixed(float[:] val, float prec): # <<<<<<<<<<<<<< * cdef int n = val.shape[0] * cdef int i = 0 */ /* Python wrapper */ static PyObject *__pyx_pw_5draco_4util_8truncate_5bit_truncate_fixed(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_5draco_4util_8truncate_5bit_truncate_fixed = {"bit_truncate_fixed", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5draco_4util_8truncate_5bit_truncate_fixed, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_5draco_4util_8truncate_5bit_truncate_fixed(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_val = { 0, 0, { 0 }, { 0 }, { 0 } }; float __pyx_v_prec; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bit_truncate_fixed (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_val,&__pyx_n_s_prec,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_val)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_prec)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bit_truncate_fixed", 1, 2, 2, 1); __PYX_ERR(0, 39, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bit_truncate_fixed") < 0)) __PYX_ERR(0, 39, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_val = __Pyx_PyObject_to_MemoryviewSlice_ds_float(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_val.memview)) __PYX_ERR(0, 39, __pyx_L3_error) __pyx_v_prec = __pyx_PyFloat_AsFloat(values[1]); if (unlikely((__pyx_v_prec == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 39, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bit_truncate_fixed", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 39, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("draco.util.truncate.bit_truncate_fixed", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5draco_4util_8truncate_4bit_truncate_fixed(__pyx_self, __pyx_v_val, __pyx_v_prec); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5draco_4util_8truncate_4bit_truncate_fixed(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_val, float __pyx_v_prec) { int __pyx_v_n; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bit_truncate_fixed", 0); /* "draco/util/truncate.pyx":40 * @cython.wraparound(False) * def bit_truncate_fixed(float[:] val, float prec): * cdef int n = val.shape[0] # <<<<<<<<<<<<<< * cdef int i = 0 * */ __pyx_v_n = (__pyx_v_val.shape[0]); /* "draco/util/truncate.pyx":41 * def bit_truncate_fixed(float[:] val, float prec): * cdef int n = val.shape[0] * cdef int i = 0 # <<<<<<<<<<<<<< * * for i in range(n): */ __pyx_v_i = 0; /* "draco/util/truncate.pyx":43 * cdef int i = 0 * * for i in range(n): # <<<<<<<<<<<<<< * val[i] = bit_truncate_float(val[i], prec * val[i]) * */ __pyx_t_1 = __pyx_v_n; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "draco/util/truncate.pyx":44 * * for i in range(n): * val[i] = bit_truncate_float(val[i], prec * val[i]) # <<<<<<<<<<<<<< * * return np.asarray(val) */ __pyx_t_4 = __pyx_v_i; __pyx_t_5 = __pyx_v_i; __pyx_t_6 = __pyx_v_i; *((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_6 * __pyx_v_val.strides[0]) )) = bit_truncate_float((*((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_4 * __pyx_v_val.strides[0]) ))), (__pyx_v_prec * (*((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_5 * __pyx_v_val.strides[0]) ))))); } /* "draco/util/truncate.pyx":46 * val[i] = bit_truncate_float(val[i], prec * val[i]) * * return np.asarray(val) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 46, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_asarray); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 46, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __pyx_memoryview_fromslice(__pyx_v_val, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 46, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_10 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_10)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_10); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); } } __pyx_t_7 = (__pyx_t_10) ? __Pyx_PyObject_Call2Args(__pyx_t_9, __pyx_t_10, __pyx_t_8) : __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 46, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; /* "draco/util/truncate.pyx":39 * @cython.boundscheck(False) * @cython.wraparound(False) * def bit_truncate_fixed(float[:] val, float prec): # <<<<<<<<<<<<<< * cdef int n = val.shape[0] * cdef int i = 0 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("draco.util.truncate.bit_truncate_fixed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_val, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":734 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":735 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 735, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":734 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":737 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":738 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 738, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":737 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":740 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":741 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 741, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":740 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":743 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":744 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 744, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":743 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":746 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":747 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline tuple PyDataType_SHAPE(dtype d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 747, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":746 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":749 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":750 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return <tuple>d.subarray.shape * else: */ __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); if (__pyx_t_1) { /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":751 * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape # <<<<<<<<<<<<<< * else: * return () */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); goto __pyx_L0; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":750 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return <tuple>d.subarray.shape * else: */ } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":753 * return <tuple>d.subarray.shape * else: * return () # <<<<<<<<<<<<<< * * */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_empty_tuple); __pyx_r = __pyx_empty_tuple; goto __pyx_L0; } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":749 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":868 * int _import_umath() except -1 * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("set_array_base", 0); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":869 * * cdef inline void set_array_base(ndarray arr, object base): * Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<< * PyArray_SetBaseObject(arr, base) * */ Py_INCREF(__pyx_v_base); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":870 * cdef inline void set_array_base(ndarray arr, object base): * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base)); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":868 * int _import_umath() except -1 * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":872 * PyArray_SetBaseObject(arr, base) * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * base = PyArray_BASE(arr) * if base is NULL: */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_v_base; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":873 * * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) # <<<<<<<<<<<<<< * if base is NULL: * return None */ __pyx_v_base = PyArray_BASE(__pyx_v_arr); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":874 * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) * if base is NULL: # <<<<<<<<<<<<<< * return None * return <object>base */ __pyx_t_1 = ((__pyx_v_base == NULL) != 0); if (__pyx_t_1) { /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":875 * base = PyArray_BASE(arr) * if base is NULL: * return None # <<<<<<<<<<<<<< * return <object>base * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":874 * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) * if base is NULL: # <<<<<<<<<<<<<< * return None * return <object>base */ } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":876 * if base is NULL: * return None * return <object>base # <<<<<<<<<<<<<< * * # Versions of the import_* functions which are more suitable for */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_base)); __pyx_r = ((PyObject *)__pyx_v_base); goto __pyx_L0; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":872 * PyArray_SetBaseObject(arr, base) * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * base = PyArray_BASE(arr) * if base is NULL: */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":880 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * __pyx_import_array() */ static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("import_array", 0); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":881 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * __pyx_import_array() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":882 * cdef inline int import_array() except -1: * try: * __pyx_import_array() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.multiarray failed to import") */ __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 882, __pyx_L3_error) /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":881 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * __pyx_import_array() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":883 * try: * __pyx_import_array() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.multiarray failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 883, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":884 * __pyx_import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 884, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 884, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":881 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * __pyx_import_array() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":880 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * __pyx_import_array() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":886 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("import_umath", 0); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":887 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":888 * cdef inline int import_umath() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 888, __pyx_L3_error) /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":887 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":889 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 889, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":890 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 890, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 890, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":887 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":886 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":892 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("import_ufunc", 0); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":893 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":894 * cdef inline int import_ufunc() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 894, __pyx_L3_error) /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":893 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":895 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 895, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":896 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef extern from *: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 896, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 896, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":893 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":892 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "carray.to_py":112 * * @cname("__Pyx_carray_to_py_Py_ssize_t") * cdef inline list __Pyx_carray_to_py_Py_ssize_t(base_type *v, Py_ssize_t length): # <<<<<<<<<<<<<< * cdef size_t i * cdef object value */ static CYTHON_INLINE PyObject *__Pyx_carray_to_py_Py_ssize_t(Py_ssize_t *__pyx_v_v, Py_ssize_t __pyx_v_length) { size_t __pyx_v_i; PyObject *__pyx_v_value = 0; PyObject *__pyx_v_l = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; size_t __pyx_t_2; size_t __pyx_t_3; size_t __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_carray_to_py_Py_ssize_t", 0); /* "carray.to_py":115 * cdef size_t i * cdef object value * l = PyList_New(length) # <<<<<<<<<<<<<< * for i in range(<size_t>length): * value = v[i] */ __pyx_t_1 = PyList_New(__pyx_v_length); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_l = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "carray.to_py":116 * cdef object value * l = PyList_New(length) * for i in range(<size_t>length): # <<<<<<<<<<<<<< * value = v[i] * Py_INCREF(value) */ __pyx_t_2 = ((size_t)__pyx_v_length); __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "carray.to_py":117 * l = PyList_New(length) * for i in range(<size_t>length): * value = v[i] # <<<<<<<<<<<<<< * Py_INCREF(value) * PyList_SET_ITEM(l, i, value) */ __pyx_t_1 = PyInt_FromSsize_t((__pyx_v_v[__pyx_v_i])); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 117, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XDECREF_SET(__pyx_v_value, __pyx_t_1); __pyx_t_1 = 0; /* "carray.to_py":118 * for i in range(<size_t>length): * value = v[i] * Py_INCREF(value) # <<<<<<<<<<<<<< * PyList_SET_ITEM(l, i, value) * return l */ Py_INCREF(__pyx_v_value); /* "carray.to_py":119 * value = v[i] * Py_INCREF(value) * PyList_SET_ITEM(l, i, value) # <<<<<<<<<<<<<< * return l * */ PyList_SET_ITEM(__pyx_v_l, __pyx_v_i, __pyx_v_value); } /* "carray.to_py":120 * Py_INCREF(value) * PyList_SET_ITEM(l, i, value) * return l # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_l); __pyx_r = __pyx_v_l; goto __pyx_L0; /* "carray.to_py":112 * * @cname("__Pyx_carray_to_py_Py_ssize_t") * cdef inline list __Pyx_carray_to_py_Py_ssize_t(base_type *v, Py_ssize_t length): # <<<<<<<<<<<<<< * cdef size_t i * cdef object value */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("carray.to_py.__Pyx_carray_to_py_Py_ssize_t", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_value); __Pyx_XDECREF(__pyx_v_l); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "carray.to_py":124 * * @cname("__Pyx_carray_to_tuple_Py_ssize_t") * cdef inline tuple __Pyx_carray_to_tuple_Py_ssize_t(base_type *v, Py_ssize_t length): # <<<<<<<<<<<<<< * cdef size_t i * cdef object value */ static CYTHON_INLINE PyObject *__Pyx_carray_to_tuple_Py_ssize_t(Py_ssize_t *__pyx_v_v, Py_ssize_t __pyx_v_length) { size_t __pyx_v_i; PyObject *__pyx_v_value = 0; PyObject *__pyx_v_t = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; size_t __pyx_t_2; size_t __pyx_t_3; size_t __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_carray_to_tuple_Py_ssize_t", 0); /* "carray.to_py":127 * cdef size_t i * cdef object value * t = PyTuple_New(length) # <<<<<<<<<<<<<< * for i in range(<size_t>length): * value = v[i] */ __pyx_t_1 = PyTuple_New(__pyx_v_length); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 127, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_t = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "carray.to_py":128 * cdef object value * t = PyTuple_New(length) * for i in range(<size_t>length): # <<<<<<<<<<<<<< * value = v[i] * Py_INCREF(value) */ __pyx_t_2 = ((size_t)__pyx_v_length); __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "carray.to_py":129 * t = PyTuple_New(length) * for i in range(<size_t>length): * value = v[i] # <<<<<<<<<<<<<< * Py_INCREF(value) * PyTuple_SET_ITEM(t, i, value) */ __pyx_t_1 = PyInt_FromSsize_t((__pyx_v_v[__pyx_v_i])); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 129, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XDECREF_SET(__pyx_v_value, __pyx_t_1); __pyx_t_1 = 0; /* "carray.to_py":130 * for i in range(<size_t>length): * value = v[i] * Py_INCREF(value) # <<<<<<<<<<<<<< * PyTuple_SET_ITEM(t, i, value) * return t */ Py_INCREF(__pyx_v_value); /* "carray.to_py":131 * value = v[i] * Py_INCREF(value) * PyTuple_SET_ITEM(t, i, value) # <<<<<<<<<<<<<< * return t */ PyTuple_SET_ITEM(__pyx_v_t, __pyx_v_i, __pyx_v_value); } /* "carray.to_py":132 * Py_INCREF(value) * PyTuple_SET_ITEM(t, i, value) * return t # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_t); __pyx_r = __pyx_v_t; goto __pyx_L0; /* "carray.to_py":124 * * @cname("__Pyx_carray_to_tuple_Py_ssize_t") * cdef inline tuple __Pyx_carray_to_tuple_Py_ssize_t(base_type *v, Py_ssize_t length): # <<<<<<<<<<<<<< * cdef size_t i * cdef object value */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("carray.to_py.__Pyx_carray_to_tuple_Py_ssize_t", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_value); __Pyx_XDECREF(__pyx_v_t); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(2, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(2, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(2, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(2, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(2, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(2, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(2, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(2, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(2, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(2, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(2, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(2, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(2, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(2, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(2, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(2, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(2, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(2, 180, __pyx_L1_error) } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(2, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(2, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(2, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(2, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(2, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(2, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(2, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(2, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * elif (<__pyx_buffer *> &self.view).obj == Py_None: * */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ goto __pyx_L3; } /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * elif (<__pyx_buffer *> &self.view).obj == Py_None: * * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< * Py_DECREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; /* "View.MemoryView":378 * * (<__pyx_buffer *> &self.view).obj = NULL * Py_DECREF(Py_None) # <<<<<<<<<<<<<< * * cdef int i */ Py_DECREF(Py_None); /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ } __pyx_L3:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":385 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":388 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":387 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":389 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":391 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":395 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 397, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(2, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(2, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(2, 397, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":398 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 398, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(2, 398, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":400 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":405 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":407 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(2, 407, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(2, 407, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(2, 410, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":411 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":413 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(2, 413, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":414 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(2, 418, __pyx_L1_error) /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":420 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(2, 420, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(2, 420, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 422, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":423 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 424, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":425 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":427 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(2, 427, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":429 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":435 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 435, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":436 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(2, 436, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":437 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":439 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; __Pyx_memviewslice *__pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(2, 445, __pyx_L1_error) __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 445, __pyx_L1_error) /* "View.MemoryView":446 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(2, 446, __pyx_L1_error) __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 446, __pyx_L1_error) /* "View.MemoryView":447 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 445, __pyx_L1_error) /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":451 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":456 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 456, __pyx_L1_error) __pyx_v_dst_slice = __pyx_t_1; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_2) { /* "View.MemoryView":459 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":461 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(2, 461, __pyx_L1_error) /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":462 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":464 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":466 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_2) { /* "View.MemoryView":468 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":470 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 470, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L8:; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":475 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 475, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":476 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":479 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":482 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(2, 482, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":483 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":488 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":491 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":493 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":498 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 498, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":499 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":494 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(2, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 495, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(2, 495, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":504 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":510 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(2, 510, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":512 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(2, 512, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(2, 514, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 520, __pyx_L1_error) /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":525 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":528 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":530 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":533 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":535 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":538 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":540 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":542 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":543 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":544 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":545 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":546 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":547 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":554 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(2, 554, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":555 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(2, 555, __pyx_L1_error) /* "View.MemoryView":556 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(2, 564, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(2, 570, __pyx_L1_error) /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":572 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(2, 572, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__15, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":579 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(2, 579, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":596 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":598 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":599 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":601 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":603 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":607 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":609 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":613 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":616 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":622 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":623 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":628 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 628, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":633 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":636 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(2, 636, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":645 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":647 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":648 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(2, 648, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":653 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":658 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":659 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":660 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":664 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":672 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":674 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":676 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":677 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":678 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 679, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(2, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(2, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(2, 679, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(2, 682, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__18); __Pyx_GIVEREF(__pyx_slice__18); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__18); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":683 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":685 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__18); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 685, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":686 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":689 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(2, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(2, 689, __pyx_L1_error) /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":691 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":692 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 692, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":694 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(2, 694, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":696 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__18); __Pyx_GIVEREF(__pyx_slice__18); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__18); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 696, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":698 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(2, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":701 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(2, 703, __pyx_L1_error) /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":711 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":718 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":722 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(2, 722, __pyx_L1_error) } } #endif /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":725 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(2, 725, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":726 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":728 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":729 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":735 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":736 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":741 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":742 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 746, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(2, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(2, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(2, 746, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":751 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 751, __pyx_L1_error) /* "View.MemoryView":748 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(2, 748, __pyx_L1_error) /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":755 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":756 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":757 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":758 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":760 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 760, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 760, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":761 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 761, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 761, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":762 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 762, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 762, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":764 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 764, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":765 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":766 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":768 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(2, 768, __pyx_L1_error) /* "View.MemoryView":774 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":778 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(2, 778, __pyx_L1_error) } /* "View.MemoryView":779 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(2, 779, __pyx_L1_error) } /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(2, 777, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":783 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(2, 782, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":830 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":832 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 832, __pyx_L1_error) /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":835 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 838, __pyx_L1_error) /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":850 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":855 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":861 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":863 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":868 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":871 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":875 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":884 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":885 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":886 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":890 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":892 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":897 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":899 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":900 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 899, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":902 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":904 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":912 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":913 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(2, 917, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(2, 917, __pyx_L1_error) } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":918 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":920 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":921 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":926 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":928 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 928, __pyx_L1_error) /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":931 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 931, __pyx_L1_error) /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":933 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":935 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":937 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":944 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":946 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":947 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":951 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":952 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":953 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":954 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":957 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 957, __pyx_L1_error) /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":959 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":977 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":981 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":983 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":987 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(2, 987, __pyx_L1_error) /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":989 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":993 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1008 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1013 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1015 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1016 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1018 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1019 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1021 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1022 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1023 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1024 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1025 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1028 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1030 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1032 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1033 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1036 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1037 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1039 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1040 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1042 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1043 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1043, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1044 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 1044, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1046 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1047 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1049 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1056 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(2, 1056, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1057 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1059 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1060 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1067 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1068 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1069 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1071 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1072 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1074 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1075 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1076 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1077 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1083 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1084 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1095 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1096 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1098 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1099 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1101 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1103 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1111 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1113 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1121 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1122 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1124 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1126 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1127 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1129 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1132 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1135 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1137 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1147 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1149 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1150 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1154 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1155 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1157 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1158 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1159 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1160 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1162 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1163 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1167 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1168 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1173 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; /* "View.MemoryView":1179 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for shape in src.shape[:ndim]: */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1181 * cdef Py_ssize_t shape, size = src.memview.view.itemsize * * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< * size *= shape * */ __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_shape = (__pyx_t_2[0]); /* "View.MemoryView":1182 * * for shape in src.shape[:ndim]: * size *= shape # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * __pyx_v_shape); } /* "View.MemoryView":1184 * size *= shape * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1197 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1198 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1199 * for idx in range(ndim): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1201 * stride *= shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1202 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1203 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1205 * stride *= shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1219 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1220 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1222 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1224 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 1224, __pyx_L1_error) /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1227 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1228 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1229 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1230 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1231 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1233 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1237 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1244 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1246 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1254 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1253 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(2, 1253, __pyx_L1_error) /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1258 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(2, 1258, __pyx_L1_error) /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1263 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(2, 1263, __pyx_L1_error) /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1265 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(2, 1265, __pyx_L1_error) } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1276 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1277 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1279 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1280 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1281 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1285 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1287 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1289 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1291 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1294 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1295 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1297 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 1297, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 1300, __pyx_L1_error) /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1305 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1307 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(2, 1307, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1308 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1314 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1316 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1320 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1321 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1323 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1324 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1329 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(2, 1329, __pyx_L1_error) /* "View.MemoryView":1330 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(2, 1330, __pyx_L1_error) /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1332 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1333 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1334 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1336 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1337 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1344 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1346 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1347 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1348 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1349 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1351 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1352 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1353 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1354 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1367 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1374 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1381 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1384 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1386 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1388 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1389 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1391 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1400 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1401 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1403 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1411 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1412 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1415 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1416 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1417 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1419 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1420 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1422 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(2, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(2, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(2, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(2, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(2, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(2, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(2, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(2, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(2, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_array___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "draco.util.truncate.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "draco.util.truncate.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_memoryview___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "draco.util.truncate.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_memoryviewslice___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "draco.util.truncate._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_truncate(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_truncate}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "truncate", __pyx_k_draco_truncation_utils, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Input_array_must_be_1_d, __pyx_k_Input_array_must_be_1_d, sizeof(__pyx_k_Input_array_must_be_1_d), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_kp_s_Weight_and_value_arrays_must_hav, __pyx_k_Weight_and_value_arrays_must_hav, sizeof(__pyx_k_Weight_and_value_arrays_must_hav), 0, 0, 1, 0}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_asarray, __pyx_k_asarray, sizeof(__pyx_k_asarray), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_bit_truncate, __pyx_k_bit_truncate, sizeof(__pyx_k_bit_truncate), 0, 0, 1, 1}, {&__pyx_n_s_bit_truncate_fixed, __pyx_k_bit_truncate_fixed, sizeof(__pyx_k_bit_truncate_fixed), 0, 0, 1, 1}, {&__pyx_n_s_bit_truncate_weights, __pyx_k_bit_truncate_weights, sizeof(__pyx_k_bit_truncate_weights), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_draco_util_truncate, __pyx_k_draco_util_truncate, sizeof(__pyx_k_draco_util_truncate), 0, 0, 1, 1}, {&__pyx_kp_s_draco_util_truncate_pyx, __pyx_k_draco_util_truncate_pyx, sizeof(__pyx_k_draco_util_truncate_pyx), 0, 0, 1, 0}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_err, __pyx_k_err, sizeof(__pyx_k_err), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_fallback, __pyx_k_fallback, sizeof(__pyx_k_fallback), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_prec, __pyx_k_prec, sizeof(__pyx_k_prec), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_val, __pyx_k_val, sizeof(__pyx_k_val), 0, 0, 1, 1}, {&__pyx_n_s_wgt, __pyx_k_wgt, sizeof(__pyx_k_wgt), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 20, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 43, __pyx_L1_error) __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 884, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(2, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(2, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(2, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(2, 404, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(2, 613, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(2, 832, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "draco/util/truncate.pyx":20 * cdef int n = val.shape[0] * if val.ndim != 1: * raise ValueError("Input array must be 1-d.") # <<<<<<<<<<<<<< * if wgt.shape[0] != n: * raise ValueError( */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Input_array_must_be_1_d); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":884 * __pyx_import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 884, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":890 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 890, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(2, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(2, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(2, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(2, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(2, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(2, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(2, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(2, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(2, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(2, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(2, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__15 = PyTuple_New(1); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(2, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__15, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__15); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(2, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(2, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__18 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__18)) __PYX_ERR(2, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__18); __Pyx_GIVEREF(__pyx_slice__18); /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(2, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(2, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(2, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "draco/util/truncate.pyx":12 * inline float bit_truncate_float(float val, float err) nogil * * def bit_truncate(float val, float err): # <<<<<<<<<<<<<< * return bit_truncate_float(val, err) * */ __pyx_tuple__22 = PyTuple_Pack(2, __pyx_n_s_val, __pyx_n_s_err); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(0, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); __pyx_codeobj__23 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__22, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_draco_util_truncate_pyx, __pyx_n_s_bit_truncate, 12, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__23)) __PYX_ERR(0, 12, __pyx_L1_error) /* "draco/util/truncate.pyx":17 * @cython.boundscheck(False) * @cython.wraparound(False) * def bit_truncate_weights(float[:] val, float[:] wgt, float fallback): # <<<<<<<<<<<<<< * cdef int n = val.shape[0] * if val.ndim != 1: */ __pyx_tuple__24 = PyTuple_Pack(5, __pyx_n_s_val, __pyx_n_s_wgt, __pyx_n_s_fallback, __pyx_n_s_n, __pyx_n_s_i); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); __pyx_codeobj__25 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__24, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_draco_util_truncate_pyx, __pyx_n_s_bit_truncate_weights, 17, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__25)) __PYX_ERR(0, 17, __pyx_L1_error) /* "draco/util/truncate.pyx":39 * @cython.boundscheck(False) * @cython.wraparound(False) * def bit_truncate_fixed(float[:] val, float prec): # <<<<<<<<<<<<<< * cdef int n = val.shape[0] * cdef int i = 0 */ __pyx_tuple__26 = PyTuple_Pack(4, __pyx_n_s_val, __pyx_n_s_prec, __pyx_n_s_n, __pyx_n_s_i); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(0, 39, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(2, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_draco_util_truncate_pyx, __pyx_n_s_bit_truncate_fixed, 39, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(0, 39, __pyx_L1_error) /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__28 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(2, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__28); __Pyx_GIVEREF(__pyx_tuple__28); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(2, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__29); __Pyx_GIVEREF(__pyx_tuple__29); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__30 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(2, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__30); __Pyx_GIVEREF(__pyx_tuple__30); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__31 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(2, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__31); __Pyx_GIVEREF(__pyx_tuple__31); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__32 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(2, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__32); __Pyx_GIVEREF(__pyx_tuple__32); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__33 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(2, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__33); __Pyx_GIVEREF(__pyx_tuple__33); __pyx_codeobj__34 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__33, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__34)) __PYX_ERR(2, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_array.tp_print = 0; #endif if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(2, 279, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_MemviewEnum.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(2, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryview.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryviewslice.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(3, 9, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 199, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 222, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 226, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 238, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 764, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC inittruncate(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC inittruncate(void) #else __Pyx_PyMODINIT_FUNC PyInit_truncate(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_truncate(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_truncate(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; static PyThread_type_lock __pyx_t_2[8]; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'truncate' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_truncate(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("truncate", __pyx_methods, __pyx_k_draco_truncation_utils, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_draco__util__truncate) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "draco.util.truncate")) { if (unlikely(PyDict_SetItemString(modules, "draco.util.truncate", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "draco/util/truncate.pyx":6 * from cython.parallel import prange * * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as cnp * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 6, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "draco/util/truncate.pyx":12 * inline float bit_truncate_float(float val, float err) nogil * * def bit_truncate(float val, float err): # <<<<<<<<<<<<<< * return bit_truncate_float(val, err) * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5draco_4util_8truncate_1bit_truncate, NULL, __pyx_n_s_draco_util_truncate); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bit_truncate, __pyx_t_1) < 0) __PYX_ERR(0, 12, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "draco/util/truncate.pyx":17 * @cython.boundscheck(False) * @cython.wraparound(False) * def bit_truncate_weights(float[:] val, float[:] wgt, float fallback): # <<<<<<<<<<<<<< * cdef int n = val.shape[0] * if val.ndim != 1: */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5draco_4util_8truncate_3bit_truncate_weights, NULL, __pyx_n_s_draco_util_truncate); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bit_truncate_weights, __pyx_t_1) < 0) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "draco/util/truncate.pyx":39 * @cython.boundscheck(False) * @cython.wraparound(False) * def bit_truncate_fixed(float[:] val, float prec): # <<<<<<<<<<<<<< * cdef int n = val.shape[0] * cdef int i = 0 */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5draco_4util_8truncate_5bit_truncate_fixed, NULL, __pyx_n_s_draco_util_truncate); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 39, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bit_truncate_fixed, __pyx_t_1) < 0) __PYX_ERR(0, 39, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "draco/util/truncate.pyx":1 * """draco truncation utils""" # <<<<<<<<<<<<<< * * cimport cython */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__28, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__32, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_2[0] = PyThread_allocate_lock(); __pyx_t_2[1] = PyThread_allocate_lock(); __pyx_t_2[2] = PyThread_allocate_lock(); __pyx_t_2[3] = PyThread_allocate_lock(); __pyx_t_2[4] = PyThread_allocate_lock(); __pyx_t_2[5] = PyThread_allocate_lock(); __pyx_t_2[6] = PyThread_allocate_lock(); __pyx_t_2[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":549 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 549, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 549, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":995 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 995, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(2, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init draco.util.truncate", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init draco.util.truncate"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* PyIntCompare */ static CYTHON_INLINE PyObject* __Pyx_PyInt_NeObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED long inplace) { if (op1 == op2) { Py_RETURN_FALSE; } #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long a = PyInt_AS_LONG(op1); if (a != b) Py_RETURN_TRUE; else Py_RETURN_FALSE; } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { int unequal; unsigned long uintval; Py_ssize_t size = Py_SIZE(op1); const digit* digits = ((PyLongObject*)op1)->ob_digit; if (intval == 0) { if (size != 0) Py_RETURN_TRUE; else Py_RETURN_FALSE; } else if (intval < 0) { if (size >= 0) Py_RETURN_TRUE; intval = -intval; size = -size; } else { if (size <= 0) Py_RETURN_TRUE; } uintval = (unsigned long) intval; #if PyLong_SHIFT * 4 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 4)) { unequal = (size != 5) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[4] != ((uintval >> (4 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 3 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 3)) { unequal = (size != 4) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 2 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 2)) { unequal = (size != 3) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 1 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 1)) { unequal = (size != 2) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK)); if (unequal != 0) Py_RETURN_TRUE; else Py_RETURN_FALSE; } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); if ((double)a != (double)b) Py_RETURN_TRUE; else Py_RETURN_FALSE; } return ( PyObject_RichCompare(op1, op2, Py_NE)); } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (unlikely(memviewslice->memview || memviewslice->data)) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) return; if (unlikely(__pyx_get_slice_count(memview) < 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (unlikely(first_time)) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) { memslice->memview = NULL; return; } if (unlikely(__pyx_get_slice_count(memview) <= 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (unlikely(last_time)) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } if (unlikely(stop <= start)) return __Pyx_NewRef(__pyx_empty_unicode); length = stop - start; cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* None */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size) { PyObject *result = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif result = PyObject_GetAttrString(module, class_name); if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if ((size_t)basicsize < size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(result); return NULL; } #endif /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case '?': return "'bool'"; case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number, ndim; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ndim = ctx->head->field->type->ndim; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (unlikely(buf->strides[dim] != sizeof(void *))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (unlikely(buf->strides[dim] != buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (unlikely(stride < buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (unlikely(buf->suboffsets)) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (unlikely(buf->ndim != ndim)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; } if (unlikely((unsigned) buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->len > 0) { for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) goto fail; if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) goto fail; } if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) goto fail; } if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_float(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS_RO | writable_flag, 1, &__Pyx_TypeInfo_float, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_float(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(float *) itemp); } static CYTHON_INLINE int __pyx_memview_set_float(const char *itemp, PyObject *obj) { float value = __pyx_PyFloat_AsFloat(obj); if ((value == (float)-1) && PyErr_Occurred()) return 0; *(float *) itemp = value; return 1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabsf(b.real) >= fabsf(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { float r = b.imag / b.real; float s = (float)(1.0) / (b.real + b.imag * r); return __pyx_t_float_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { float r = b.real / b.imag; float s = (float)(1.0) / (b.imag + b.real * r); return __pyx_t_float_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else { float denom = b.real * b.real + b.imag * b.imag; return __pyx_t_float_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: return __Pyx_c_prod_float(a, a); case 3: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, a); case 4: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = powf(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2f(0.0, -1.0); } } else { r = __Pyx_c_abs_float(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabs(b.real) >= fabs(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { double r = b.imag / b.real; double s = (double)(1.0) / (b.real + b.imag * r); return __pyx_t_double_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { double r = b.real / b.imag; double s = (double)(1.0) / (b.imag + b.real * r); return __pyx_t_double_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else { double denom = b.real * b.real + b.imag * b.imag; return __pyx_t_double_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: return __Pyx_c_prod_double(a, a); case 3: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, a); case 4: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = pow(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2(0.0, -1.0); } } else { r = __Pyx_c_abs_double(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (unlikely(from_mvs->suboffsets[i] >= 0)) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *x) { const size_t neg_one = (size_t) ((size_t) 0 - (size_t) 1), const_zero = (size_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(size_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(size_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (size_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (size_t) 0; case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, digits[0]) case 2: if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) >= 2 * PyLong_SHIFT) { return (size_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } } break; case 3: if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) >= 3 * PyLong_SHIFT) { return (size_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } } break; case 4: if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) >= 4 * PyLong_SHIFT) { return (size_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (size_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(size_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(size_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (size_t) 0; case -1: __PYX_VERIFY_RETURN_INT(size_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, +digits[0]) case -2: if (8 * sizeof(size_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { return (size_t) (((size_t)-1)*(((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case 2: if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { return (size_t) ((((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case -3: if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { return (size_t) (((size_t)-1)*(((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case 3: if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { return (size_t) ((((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case -4: if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) { return (size_t) (((size_t)-1)*(((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case 4: if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) { return (size_t) ((((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; } #endif if (sizeof(size_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(size_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else size_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (size_t) -1; } } else { size_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (size_t) -1; val = __Pyx_PyInt_As_size_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to size_t"); return (size_t) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
omp_section_lastprivate.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" int test_omp_section_lastprivate() { int i0 = -1; int sum = 0; int i; int sum0 = 0; int known_sum; i0 = -1; sum = 0; #pragma omp parallel { #pragma omp sections lastprivate(i0) private(i,sum0) { #pragma omp section { sum0 = 0; for (i = 1; i < 400; i++) { sum0 = sum0 + i; i0 = i; } #pragma omp critical { sum = sum + sum0; } /*end of critical*/ } /* end of section */ #pragma omp section { sum0 = 0; for(i = 400; i < 700; i++) { sum0 = sum0 + i; i0 = i; } #pragma omp critical { sum = sum + sum0; } /*end of critical*/ } #pragma omp section { sum0 = 0; for(i = 700; i < 1000; i++) { sum0 = sum0 + i; i0 = i; } #pragma omp critical { sum = sum + sum0; } /*end of critical*/ } /* end of section */ } /* end of sections*/ } /* end of parallel*/ known_sum = (999 * 1000) / 2; return ((known_sum == sum) && (i0 == 999) ); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_section_lastprivate()) { num_failed++; } } return num_failed; }
cpu_transducer.h
#pragma once #include <tuple> #include <limits> #include <numeric> #include <iostream> #include <algorithm> #if !defined(TRANSDUCER_DISABLE_OMP)&&!defined(APPLE) #include <omp.h> #endif #include "transducer_helper.h" template <typename ProbT> class CpuTransducer { public: CpuTransducer(int alphabet_size, int minibatch, void* workspace, int num_threads, int null_label) : alphabet_size_(alphabet_size), minibatch_(minibatch), num_threads_(num_threads), workspace_(workspace), null_label_(null_label) { #if defined TRANSDUCER_DISABLE_OMP || defined(APPLE) #else if(num_threads>0) { omp_set_threads(num_threads); }else { num_threads_=omp_get_max_threads(); } #endif }; transducerStatus_t cost_and_grad(const ProbT* const predict_acts, const ProbT* const trans_acts, ProbT *predict_grads,ProbT *trans_grads, ProbT* costs, const int* const flat_labels, const int* const input_lengths, const int* const label_lengths); transducerStatus_t score_forward(const ProbT* const trans_acts, const ProbT* const predict_acts, ProbT* costs, const int* const flat_labels, const int* const input_lengths, const int* const label_lengths); private: class CpuTransducer_metadata { private: void setup_probs(const ProbT* const trans_exp,const ProbT* const predict_exp,ProbT* probs_tuk,int T,int U,int alphabet_size,int minibatch,int mb); public: CpuTransducer_metadata(const ProbT* const trans_exp,const ProbT * const predict_exp,int T, int U, int alphabet_size,int minibatch,int mb, void* workspace, size_t bytes_used); ProbT* alphas; ProbT* betas; ProbT* probs_tuk; ProbT* grads_tuk; }; int alphabet_size_; // Number of characters plus null label int minibatch_; int num_threads_; int null_label_; void* workspace_; void exp_matrix(const ProbT* const trans_act,const ProbT* const predict_probs,ProbT* trans_exp,ProbT* predict_exp,const int* const input_lengths,const int* const label_lengths); ProbT compute_alphas(const ProbT* const probs_tuk, ProbT *const alphas,int T,int U,const int * label); ProbT compute_betas_and_grad(ProbT * trans_grads,ProbT * predict_grads, const ProbT* const probs_tuk,ProbT* const grads_tuk,const int * const label,int U, int T,ProbT* alphas,ProbT* betas); std::pair<ProbT,bool> cost_and_grad_kernel(ProbT *grads_trans,ProbT * grads_predict, const ProbT* const trans_exp,const ProbT* predict_exp,const int* const labels,int T, int U,int alphabet_size,int minibatch,int mb, size_t bytes_used); }; template<typename ProbT> CpuTransducer<ProbT>::CpuTransducer_metadata::CpuTransducer_metadata(const ProbT* const trans_exp,const ProbT* const predict_exp,int T, int U, int alphabet_size,int minibatch,int mb,void* workspace, size_t bytes_used) { alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(ProbT) * U * T; std::fill(alphas, alphas + U* T,0); betas=reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(ProbT) * U ; std::fill(betas,betas+U,0); probs_tuk = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used); bytes_used += sizeof(ProbT) *T*U*alphabet_size; grads_tuk=reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used); bytes_used+=sizeof(ProbT) *T*U*alphabet_size; setup_probs(trans_exp,predict_exp,probs_tuk,T,U,alphabet_size,minibatch,mb); } template<typename ProbT> void CpuTransducer<ProbT>::CpuTransducer_metadata::setup_probs(const ProbT* const trans_exp,const ProbT* const predict_exp,ProbT* probs_tuk,int T,int U,int alphabet_size,int minibatch,int mb) { ProbT sum; int tuk_index,trans_col_offset,predict_col_offset; for(int t=0;t<T;++t) { trans_col_offset=(mb+minibatch*t)*alphabet_size; for(int u=0;u<U;++u) { tuk_index=(u+t*U)*alphabet_size; predict_col_offset=(mb+minibatch*u)*alphabet_size; sum=0; for(int r=0;r<alphabet_size;r++) { ProbT tmp=trans_exp[trans_col_offset+r]*predict_exp[predict_col_offset+r]; probs_tuk[tuk_index+r]=tmp; sum+=tmp; } for(int r=0;r<alphabet_size;r++) { probs_tuk[tuk_index+r]/=sum; } } } } template<typename ProbT> void CpuTransducer<ProbT>::exp_matrix(const ProbT* const trans_acts,const ProbT* const predict_acts,ProbT* trans_exp,ProbT* predict_exp,const int* const input_lengths,const int* const label_lengths) { for (int mb = 0; mb < minibatch_; ++mb) { for(int c=0;c<input_lengths[mb];++c) { for(int r = 0; r < alphabet_size_; ++r) { int col_offset = (mb + minibatch_ * c) * alphabet_size_; trans_exp[r + col_offset] = std::exp(trans_acts[r+col_offset]); } } for(int c=0;c<label_lengths[mb]+1;c++) { for(int r = 0; r < alphabet_size_; ++r) { int col_offset = (mb + minibatch_ * c) * alphabet_size_; predict_exp[r + col_offset] = std::exp(predict_acts[r+ col_offset]); } } } } template<typename ProbT> ProbT CpuTransducer<ProbT>::compute_alphas(const ProbT* const probs_tuk,ProbT * const alphas,int T,int U,const int* label) { alphas[0]=1; int tuk_index=0,tuk_null_index,tuk_forward_index; int alphabet_index=0; for(int t=0;t<T;t++) { alphabet_index=t*U; for(int u=0;u<U;u++) { if(t>0) { int tuk_index_tmp=(t-1)*U*alphabet_size_; tuk_null_index=tuk_index_tmp+u*alphabet_size_+null_label_; alphas[alphabet_index+u] += alphas[(t-1)*(U)+u]*probs_tuk[tuk_null_index]; } if(u>0) { int tuk_index_tmp=t*U*alphabet_size_; tuk_forward_index=tuk_index_tmp+(u-1)*alphabet_size_+label[u-1]; alphas[alphabet_index+u] += alphas[alphabet_index+u-1]*probs_tuk[tuk_forward_index]; } } } tuk_null_index=(T-1)*U*alphabet_size_+(U-1)*alphabet_size_+null_label_; ProbT loglike=alphas[(T-1)*U+(U-1)]*probs_tuk[tuk_null_index]; return std::log(loglike); } template<typename ProbT> ProbT CpuTransducer<ProbT>::compute_betas_and_grad(ProbT* trans_grads,ProbT* predict_grads, const ProbT* const probs_tuk,ProbT* const grads_tuk,const int * const label,int T, int U,ProbT* alphas,ProbT* betas) { std::fill(trans_grads, trans_grads + T*alphabet_size_, 0); std::fill(predict_grads, predict_grads + U*alphabet_size_, 0); int tuk_null_index=(T-1)*U*alphabet_size_+(U-1)*alphabet_size_+null_label_; std::fill(betas,betas+U,probs_tuk[tuk_null_index]); ProbT pr_yx=alphas[(T-1)*U+(U-1)]*probs_tuk[tuk_null_index]; ProbT beta_tu=probs_tuk[tuk_null_index],beta_tu_=probs_tuk[tuk_null_index],beta_t_u=probs_tuk[tuk_null_index]; for(int t=T-1;t>=0;t--) { int alphas_index=t*U; for(int u=U-1;u>=0;u--) { beta_tu_=beta_tu; beta_t_u=betas[u]; int tu_index=t*U*alphabet_size_+u*alphabet_size_; beta_tu=0; if(t<T-1) { beta_tu+=beta_t_u*probs_tuk[tu_index+null_label_]; } if(u<U-1) { beta_tu+=beta_tu_*probs_tuk[tu_index+label[u]]; } if(t==T-1&&u==U-1) { beta_tu=probs_tuk[tuk_null_index]; } betas[u]=beta_tu; int grads_tuk_index=(u+t*U)*alphabet_size_; for(int k=0;k<alphabet_size_;k++) { if(k==null_label_) { grads_tuk[grads_tuk_index+k]=-(alphas[alphas_index+u]/pr_yx)*beta_t_u; } else if(k==label[u]) { grads_tuk[grads_tuk_index+k]=-(alphas[alphas_index+u]/pr_yx)*beta_tu_; } else { grads_tuk[grads_tuk_index+k]=0; } } } } for(int t=0;t<T;t++) { int trans_grads_index=t*alphabet_size_; for(int k=0;k<alphabet_size_;k++) { for(int u=0;u<U;u++) { int tuk_index=(u+t*U)*alphabet_size_; for(int k_tmp=0;k_tmp<alphabet_size_;k_tmp++) { trans_grads[trans_grads_index+k]+=grads_tuk[tuk_index+k_tmp]*probs_tuk[tuk_index+k_tmp]*(k==k_tmp?1:0-probs_tuk[tuk_index+k]); } } } } for(int u=0;u<U;u++) { int predict_grads_index=u*alphabet_size_; for(int k=0;k<alphabet_size_;k++) { for(int t=0;t<T;t++) { int tuk_index=(u+t*U)*alphabet_size_; for(int k_tmp=0;k_tmp<alphabet_size_;k_tmp++) { predict_grads[predict_grads_index+k]+=grads_tuk[tuk_index+k_tmp]*probs_tuk[tuk_index+k_tmp]*(1-probs_tuk[tuk_index+k]); } } } } return std::log(beta_tu); } template<typename ProbT> std::pair<ProbT,bool> CpuTransducer<ProbT>::cost_and_grad_kernel(ProbT *grads_trans,ProbT * grads_predict, const ProbT* const trans_exp,const ProbT* predict_exp,const int* const labels,int T, int U,int alphabet_size,int minibatch,int mb, size_t bytes_used) { { CpuTransducer_metadata transducerm(trans_exp,predict_exp,T,U, alphabet_size,minibatch,mb,workspace_, bytes_used); bool over_threshold = false; if (U-1> T) { return std::make_pair(ProbT(0), over_threshold); // TODO, not right to return 0 } ProbT llForward = compute_alphas(transducerm.probs_tuk,transducerm.alphas,T,U,labels); ProbT llBackward = compute_betas_and_grad(grads_trans,grads_predict, transducerm.probs_tuk,transducerm.grads_tuk,labels,T,U,transducerm.alphas,transducerm.betas); ProbT diff = std::abs(llForward - llBackward); if (diff > transducer_helper::threshold) { over_threshold = true; } return std::make_pair(-llBackward,over_threshold); } } template<typename ProbT> transducerStatus_t CpuTransducer<ProbT>::cost_and_grad(const ProbT* const trans_acts, const ProbT* const predict_acts, ProbT* grads_trans,ProbT* grads_predict,ProbT* costs,const int* const flat_labels,const int* const input_lengths, const int* const label_lengths) { if (trans_acts == nullptr || predict_acts==nullptr|| grads_trans == nullptr || grads_predict==nullptr|| costs == nullptr || flat_labels == nullptr || label_lengths == nullptr || input_lengths == nullptr ) return TRANSDUCER_STATUS_INVALID_VALUE; ProbT* trans_exp=static_cast<ProbT *>(workspace_); int maxT =*std::max_element(input_lengths,input_lengths+minibatch_); int maxU=*std::max_element(label_lengths,label_lengths+minibatch_)+1; int trans_used= minibatch_ * alphabet_size_ * maxT; ProbT* predict_exp=trans_exp+trans_used; int predict_used=minibatch_ * alphabet_size_ * maxU; size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * (maxT+maxU); size_t per_minibatch_bytes=0; per_minibatch_bytes += sizeof(ProbT) * maxU * (maxT+1); per_minibatch_bytes += sizeof(ProbT) * maxU * maxT*alphabet_size_; per_minibatch_bytes += sizeof(ProbT) * maxU * maxT*alphabet_size_; exp_matrix(trans_acts,predict_acts,trans_exp,predict_exp,input_lengths,label_lengths); #pragma omp parallel for for (int mb = 0; mb < minibatch_; ++mb) { const int T = input_lengths[mb]; // Length of utterance (time) const int U = label_lengths[mb]+1; // Number of labels in transcription bool mb_status; std::tie(costs[mb], mb_status) =cost_and_grad_kernel(grads_trans,grads_predict,trans_exp,predict_exp, flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0), T, U, alphabet_size_,minibatch_, mb, bytes_used + mb * per_minibatch_bytes); } return TRANSDUCER_STATUS_SUCCESS; } template<typename ProbT> transducerStatus_t CpuTransducer<ProbT>::score_forward(const ProbT* const trans_acts, const ProbT* const predict_acts, ProbT* costs, const int* const flat_labels, const int* const input_lengths, const int* const label_lengths) { if (predict_acts == nullptr || trans_acts==nullptr|| costs == nullptr || flat_labels == nullptr || label_lengths == nullptr || input_lengths == nullptr ) return TRANSDUCER_STATUS_INVALID_VALUE; //node:! trans_exp is maxT*mini_batch_*alalphabet_size ProbT* trans_exp=static_cast<ProbT *>(workspace_); int maxT =*std::max_element(input_lengths,input_lengths+minibatch_); int maxU=*std::max_element(label_lengths,label_lengths+minibatch_)+1; int trans_used= minibatch_ * alphabet_size_ * maxT; ProbT* predict_exp=trans_exp+trans_used; int predict_used=minibatch_ * alphabet_size_ * maxU; size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * (maxT+maxU); size_t per_minibatch_bytes=0; per_minibatch_bytes += sizeof(ProbT) * maxU * (maxT+1); per_minibatch_bytes += sizeof(ProbT) * maxU * maxT*alphabet_size_; per_minibatch_bytes += sizeof(ProbT) * maxU * maxT*alphabet_size_; exp_matrix(trans_acts,predict_acts,trans_exp,predict_exp,input_lengths,label_lengths); //compute_pr(trans_exp,predict_exp,probs_utk,maxU,maxT,input_lengths,label_lengths); int T=0,U=0; for (int mb = 0; mb < minibatch_; ++mb) { const int T = input_lengths[mb]; // Length of utterance (time) const int U = label_lengths[mb]+1; CpuTransducer_metadata transducerm(trans_exp,predict_exp,T,U, alphabet_size_,minibatch_,mb,workspace_, bytes_used); costs[mb] = -compute_alphas(transducerm.probs_tuk,transducerm.alphas,T,U,flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0)); } return TRANSDUCER_STATUS_SUCCESS; }
parallel_execution_omp.h
/* * Copyright 2018 Universidad Carlos III de Madrid * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef GRPPI_OMP_PARALLEL_EXECUTION_OMP_H #define GRPPI_OMP_PARALLEL_EXECUTION_OMP_H #ifdef GRPPI_OMP #include "../common/mpmc_queue.h" #include "../common/iterator.h" #include "../common/execution_traits.h" #include "../common/configuration.h" #include "grppi/seq/sequential_execution.h" #include <type_traits> #include <tuple> #if __cplusplus < 201703L #include <experimental/optional> #else #include <optional> #endif #include <omp.h> namespace grppi { /** \brief OpenMP parallel execution policy. This policy uses OpenMP as implementation back-end. */ class parallel_execution_omp { public: /** \brief Default construct an OpenMP parallel execution policy. Creates an OpenMP parallel execution object. The concurrency degree is determined by the platform according to OpenMP rules. */ parallel_execution_omp() noexcept{}; parallel_execution_omp(int concurrency_degree) noexcept : concurrency_degree_{concurrency_degree} { omp_set_num_threads(concurrency_degree_); } /** @brief Set num_threads to _threads in order to run in parallel * * @param _threads number of threads used in the parallel mode */ /** \brief Constructs an OpenMP parallel execution policy. Creates an OpenMP parallel execution object selecting the concurrency degree and ordering. \param concurrency_degree Number of threads used for parallel algorithms. \param order Whether ordered executions is enabled or disabled. */ parallel_execution_omp(int concurrency_degree, bool order) noexcept : concurrency_degree_{concurrency_degree}, ordering_{order} { omp_set_num_threads(concurrency_degree_); } /** \brief Set number of grppi threads. */ void set_concurrency_degree(int degree) noexcept { concurrency_degree_ = degree; omp_set_num_threads(concurrency_degree_); } /** \brief Get number of grppi threads. */ int concurrency_degree() const noexcept { return concurrency_degree_; } /** \brief Enable ordering. */ void enable_ordering() noexcept { ordering_=true; } /** \brief Disable ordering. */ void disable_ordering() noexcept { ordering_=false; } /** \brief Is execution ordered. */ bool is_ordered() const noexcept { return ordering_; } /** \brief Sets the attributes for the queues built through make_queue<T>(() */ void set_queue_attributes(int size, queue_mode mode) noexcept { queue_size_ = size; queue_mode_ = mode; } /** \brief Makes a communication queue for elements of type T. Constructs a queue using the attributes that can be set via set_queue_attributes(). The value is returned via move semantics. */ template <typename T> mpmc_queue<T> make_queue() const { return {queue_size_, queue_mode_}; } /** \brief Returns the reference of a communication queue for elements of type T if the queue has been created in an outer pattern. Returns the reference of the queue received as argument. \tparam T Element type for the queue. \tparam Transformers List of the next transformers. \param queue Reference of a queue of type T */ template <typename T, typename ... Transformers> mpmc_queue<T>& get_output_queue(mpmc_queue<T> & queue, Transformers &&...) const { return queue; } /** \brief Makes a communication queue for elements of type T if the queue has not been created in an outer pattern. Call to the make_queue function and the value is returned via move semantics. \tparam T Element type for the queue. \tparam Transformers List of the next transformers. */ template <typename T, typename ... Transformers> mpmc_queue<T> get_output_queue(Transformers &&... ) const{ return std::move(make_queue<T>()); } /** \brief Get index of current thread in the thread table */ [[deprecated("Thread ids are deprecated.\n" "If you have a specific use case file a bug")]] int get_thread_id() const noexcept { int result; #pragma omp parallel { result = omp_get_thread_num(); } return result; } /** \brief Applies a transformation to multiple sequences leaving the result in another sequence using available OpenMP parallelism \tparam InputIterators Iterator types for input sequences. \tparam OutputIterator Iterator type for the output sequence. \tparam Transformer Callable object type for the transformation. \param firsts Tuple of iterators to input sequences. \param first_out Iterator to the output sequence. \param sequence_size Size of the input sequences. \param transform_op Transformation callable object. \pre For every I iterators in the range `[get<I>(firsts), next(get<I>(firsts),sequence_size))` are valid. \pre Iterators in the range `[first_out, next(first_out,sequence_size)]` are valid. */ template <typename ... InputIterators, typename OutputIterator, typename Transformer> void map(std::tuple<InputIterators...> firsts, OutputIterator first_out, std::size_t sequence_size, Transformer transform_op) const; /** \brief Applies a reduction to a sequence of data items. \tparam InputIterator Iterator type for the input sequence. \tparam Identity Type for the identity value. \tparam Combiner Callable object type for the combination. \param first Iterator to the first element of the sequence. \param sequence_size Size of the input sequence. \param identity Identity value for the reduction. \param combine_op Combination callable object. \pre Iterators in the range `[first,last)` are valid. \return The reduction result */ template <typename InputIterator, typename Identity, typename Combiner> auto reduce(InputIterator first, std::size_t sequence_size, Identity && identity, Combiner && combine_op) const; /** \brief Applies a map/reduce operation to a sequence of data items. \tparam InputIterator Iterator type for the input sequence. \tparam Identity Type for the identity value. \tparam Transformer Callable object type for the transformation. \tparam Combiner Callable object type for the combination. \param first Iterator to the first element of the sequence. \param sequence_size Size of the input sequence. \param identity Identity value for the reduction. \param transform_op Transformation callable object. \param combine_op Combination callable object. \pre Iterators in the range `[first,last)` are valid. \return The map/reduce result. */ template <typename ... InputIterators, typename Identity, typename Transformer, typename Combiner> auto map_reduce(std::tuple<InputIterators...> firsts, std::size_t sequence_size, Identity && identity, Transformer && transform_op, Combiner && combine_op) const; /** \brief Applies a stencil to multiple sequences leaving the result in another sequence. \tparam InputIterators Iterator types for input sequences. \tparam OutputIterator Iterator type for the output sequence. \tparam StencilTransformer Callable object type for the stencil transformation. \tparam Neighbourhood Callable object for generating neighbourhoods. \param firsts Tuple of iterators to input sequences. \param first_out Iterator to the output sequence. \param sequence_size Size of the input sequences. \param transform_op Stencil transformation callable object. \param neighbour_op Neighbourhood callable object. \pre For every I iterators in the range `[get<I>(firsts), next(get<I>(firsts),sequence_size))` are valid. \pre Iterators in the range `[first_out, next(first_out,sequence_size)]` are valid. */ template <typename ... InputIterators, typename OutputIterator, typename StencilTransformer, typename Neighbourhood> void stencil(std::tuple<InputIterators...> firsts, OutputIterator first_out, std::size_t sequence_size, StencilTransformer && transform_op, Neighbourhood && neighbour_op) const; /** \brief Invoke \ref md_divide-conquer. \tparam Input Type used for the input problem. \tparam Divider Callable type for the divider operation. \tparam Solver Callable type for the solver operation. \tparam Combiner Callable type for the combiner operation. \param ex Sequential execution policy object. \param input Input problem to be solved. \param divider_op Divider operation. \param solver_op Solver operation. \param combine_op Combiner operation. */ template <typename Input, typename Divider, typename Solver, typename Combiner> [[deprecated("Use new interface with predicate argument")]] auto divide_conquer(Input && input, Divider && divide_op, Solver && solve_op, Combiner && combine_op) const; /** \brief Invoke \ref md_divide-conquer. \tparam Input Type used for the input problem. \tparam Divider Callable type for the divider operation. \tparam Predicate Callable type for the stop condition predicate. \tparam Solver Callable type for the solver operation. \tparam Combiner Callable type for the combiner operation. \param ex Sequential execution policy object. \param input Input problem to be solved. \param divider_op Divider operation. \param predicate_op Predicate operation. \param solver_op Solver operation. \param combine_op Combiner operation. */ template <typename Input, typename Divider, typename Predicate, typename Solver, typename Combiner> auto divide_conquer(Input && input, Divider && divide_op, Predicate && predicate_op, Solver && solve_op, Combiner && combine_op) const; /** \brief Invoke \ref md_pipeline. \tparam Generator Callable type for the generator operation. \tparam Transformers Callable types for the transformers in the pipeline. \param generate_op Generator operation. \param transform_ops Transformer operations. */ template <typename Generator, typename ... Transformers> void pipeline(Generator && generate_op, Transformers && ... transform_op) const; /** \brief Invoke \ref md_pipeline coming from another context that uses mpmc_queues as communication channels. \tparam InputType Type of the input stream. \tparam Transformers Callable types for the transformers in the pipeline. \tparam InputType Type of the output stream. \param input_queue Input stream communicator. \param transform_ops Transformer operations. \param output_queue Input stream communicator. */ template <typename InputType, typename Transformer, typename OutputType> void pipeline(mpmc_queue<InputType> & input_queue, Transformer && transform_op, mpmc_queue<OutputType> &output_queue) const { do_pipeline(input_queue, std::forward<Transformer>(transform_op), output_queue); } private: template <typename Input, typename Divider, typename Solver, typename Combiner> auto divide_conquer(Input && input, Divider && divide_op, Solver && solve_op, Combiner && combine_op, std::atomic<int> & num_threads) const; template <typename Input, typename Divider, typename Predicate, typename Solver, typename Combiner> auto divide_conquer(Input && input, Divider && divide_op, Predicate && predicate_op, Solver && solve_op, Combiner && combine_op, std::atomic<int> & num_threads) const; template <typename Queue, typename Consumer, requires_no_pattern<Consumer> = 0> void do_pipeline(Queue & input_queue, Consumer && consume_op) const; template <typename Inqueue, typename Transformer, typename output_type, requires_no_pattern<Transformer> = 0> void do_pipeline(Inqueue & input_queue, Transformer && transform_op, mpmc_queue<output_type> & output_queue) const; template <typename T, typename ... Others> void do_pipeline(mpmc_queue<T> & in_q, mpmc_queue<T> & same_queue, Others &&... ops) const; template <typename T> void do_pipeline(mpmc_queue<T> &) const {} template <typename Queue, typename Transformer, typename ... OtherTransformers, requires_no_pattern<Transformer> = 0> void do_pipeline(Queue & input_queue, Transformer && transform_op, OtherTransformers && ... other_ops) const; template <typename Queue, typename Execution, typename Transformer, template <typename, typename> class Context, typename ... OtherTransformers, requires_context<Context<Execution,Transformer>> = 0> void do_pipeline(Queue & input_queue, Context<Execution,Transformer> && context_op, OtherTransformers &&... other_ops) const; template <typename Queue, typename Execution, typename Transformer, template <typename, typename> class Context, typename ... OtherTransformers, requires_context<Context<Execution,Transformer>> = 0> void do_pipeline(Queue & input_queue, Context<Execution,Transformer> & context_op, OtherTransformers &&... other_ops) const { do_pipeline(input_queue, std::move(context_op), std::forward<OtherTransformers>(other_ops)...); } template <typename Queue, typename FarmTransformer, template <typename> class Farm, requires_farm<Farm<FarmTransformer>> = 0> void do_pipeline(Queue & input_queue, Farm<FarmTransformer> & farm_obj) const { do_pipeline(input_queue, std::move(farm_obj)); } template <typename Queue, typename FarmTransformer, template <typename> class Farm, requires_farm<Farm<FarmTransformer>> = 0> void do_pipeline(Queue & input_queue, Farm<FarmTransformer> && farm_obj) const; template <typename Queue, typename FarmTransformer, template <typename> class Farm, typename ... OtherTransformers, requires_farm<Farm<FarmTransformer>> =0> void do_pipeline(Queue & input_queue, Farm<FarmTransformer> & farm_obj, OtherTransformers && ... other_transform_ops) const { do_pipeline(input_queue, std::move(farm_obj), std::forward<OtherTransformers>(other_transform_ops)...); } template <typename Queue, typename FarmTransformer, template <typename> class Farm, typename ... OtherTransformers, requires_farm<Farm<FarmTransformer>> =0> void do_pipeline(Queue & input_queue, Farm<FarmTransformer> && farm_obj, OtherTransformers && ... other_transform_ops) const; template <typename Queue, typename Predicate, template <typename> class Filter, requires_filter<Filter<Predicate>> = 0> void do_pipeline(Queue & input_queue, Filter<Predicate> & filter_obj) const { do_pipeline(input_queue, std::move(filter_obj)); } template <typename Queue, typename Predicate, template <typename> class Filter, requires_filter<Filter<Predicate>> = 0> void do_pipeline(Queue & input_queue, Filter<Predicate> && filter_obj) const; template <typename Queue, typename Predicate, template <typename> class Filter, typename ... OtherTransformers, requires_filter<Filter<Predicate>> =0> void do_pipeline(Queue & input_queue, Filter<Predicate> & filter_obj, OtherTransformers && ... other_transform_ops) const { do_pipeline(input_queue, std::move(filter_obj), std::forward<OtherTransformers>(other_transform_ops)...); } template <typename Queue, typename Predicate, template <typename> class Filter, typename ... OtherTransformers, requires_filter<Filter<Predicate>> =0> void do_pipeline(Queue & input_queue, Filter<Predicate> && filter_obj, OtherTransformers && ... other_transform_ops) const; template <typename Queue, typename Combiner, typename Identity, template <typename C, typename I> class Reduce, typename ... OtherTransformers, requires_reduce<Reduce<Combiner,Identity>> = 0> void do_pipeline(Queue && input_queue, Reduce<Combiner,Identity> & reduce_obj, OtherTransformers && ... other_transform_ops) const { do_pipeline(input_queue, std::move(reduce_obj), std::forward<OtherTransformers>(other_transform_ops)...); } template <typename Queue, typename Combiner, typename Identity, template <typename C, typename I> class Reduce, typename ... OtherTransformers, requires_reduce<Reduce<Combiner,Identity>> = 0> void do_pipeline(Queue && input_queue, Reduce<Combiner,Identity> && reduce_obj, OtherTransformers && ... other_transform_ops) const; template <typename Queue, typename Transformer, typename Predicate, template <typename T, typename P> class Iteration, typename ... OtherTransformers, requires_iteration<Iteration<Transformer,Predicate>> =0, requires_no_pattern<Transformer> =0> void do_pipeline(Queue & input_queue, Iteration<Transformer,Predicate> & iteration_obj, OtherTransformers && ... other_transform_ops) const { do_pipeline(input_queue, std::move(iteration_obj), std::forward<OtherTransformers>(other_transform_ops)...); } template <typename Queue, typename Transformer, typename Predicate, template <typename T, typename P> class Iteration, typename ... OtherTransformers, requires_iteration<Iteration<Transformer,Predicate>> =0, requires_no_pattern<Transformer> =0> void do_pipeline(Queue & input_queue, Iteration<Transformer,Predicate> && iteration_obj, OtherTransformers && ... other_transform_ops) const; template <typename Queue, typename Transformer, typename Predicate, template <typename T, typename P> class Iteration, typename ... OtherTransformers, requires_iteration<Iteration<Transformer,Predicate>> =0, requires_pipeline<Transformer> =0> void do_pipeline(Queue & input_queue, Iteration<Transformer,Predicate> && iteration_obj, OtherTransformers && ... other_transform_ops) const; template <typename Queue, typename ... Transformers, template <typename...> class Pipeline, typename ... OtherTransformers, requires_pipeline<Pipeline<Transformers...>> = 0> void do_pipeline(Queue & input_queue, Pipeline<Transformers...> & pipeline_obj, OtherTransformers && ... other_transform_ops) const { do_pipeline(input_queue, std::move(pipeline_obj), std::forward<OtherTransformers>(other_transform_ops)...); } template <typename Queue, typename ... Transformers, template <typename...> class Pipeline, typename ... OtherTransformers, requires_pipeline<Pipeline<Transformers...>> = 0> void do_pipeline(Queue & input_queue, Pipeline<Transformers...> && pipeline_obj, OtherTransformers && ... other_transform_ops) const; template <typename Queue, typename ... Transformers, std::size_t ... I> void do_pipeline_nested( Queue & input_queue, std::tuple<Transformers...> && transform_ops, std::index_sequence<I...>) const; private: /** \brief Obtain OpenMP platform number of threads. Queries the current OpenMP number of threads so that it can be used in initialization of data members. \return The current OpenMP number of threads. \note The determination is performed inside a parallel region. */ static int impl_concurrency_degree() { int result; #pragma omp parallel { result = omp_get_num_threads(); } return result; } private: configuration<> config_{}; int concurrency_degree_= config_.concurrency_degree(); bool ordering_ = config_.ordering(); int queue_size_ = config_.queue_size(); queue_mode queue_mode_ = config_.mode(); }; /** \brief Metafunction that determines if type E is parallel_execution_omp \tparam Execution policy type. */ template <typename E> constexpr bool is_parallel_execution_omp() { return std::is_same<E, parallel_execution_omp>::value; } /** \brief Determines if an execution policy is supported in the current compilation. \note Specialization for parallel_execution_omp when GRPPI_OMP is enabled. */ template <> constexpr bool is_supported<parallel_execution_omp>() { return true; } /** \brief Determines if an execution policy supports the map pattern. \note Specialization for parallel_execution_omp when GRPPI_OMP is enabled. */ template <> constexpr bool supports_map<parallel_execution_omp>() { return true; } /** \brief Determines if an execution policy supports the reduce pattern. \note Specialization for parallel_execution_omp when GRPPI_OMP is enabled. */ template <> constexpr bool supports_reduce<parallel_execution_omp>() { return true; } /** \brief Determines if an execution policy supports the map-reduce pattern. \note Specialization for parallel_execution_omp when GRPPI_OMP is enabled. */ template <> constexpr bool supports_map_reduce<parallel_execution_omp>() { return true; } /** \brief Determines if an execution policy supports the stencil pattern. \note Specialization for parallel_execution_omp when GRPPI_OMP is enabled. */ template <> constexpr bool supports_stencil<parallel_execution_omp>() { return true; } /** \brief Determines if an execution policy supports the divide/conquer pattern. \note Specialization for parallel_execution_omp when GRPPI_OMP is enabled. */ template <> constexpr bool supports_divide_conquer<parallel_execution_omp>() { return true; } /** \brief Determines if an execution policy supports the pipeline pattern. \note Specialization for parallel_execution_omp when GRPPI_OMP is enabled. */ template <> constexpr bool supports_pipeline<parallel_execution_omp>() { return true; } template <typename ... InputIterators, typename OutputIterator, typename Transformer> void parallel_execution_omp::map( std::tuple<InputIterators...> firsts, OutputIterator first_out, std::size_t sequence_size, Transformer transform_op) const { #pragma omp parallel for for (std::size_t i=0; i<sequence_size; ++i) { first_out[i] = apply_iterators_indexed(transform_op, firsts, i); } } template <typename InputIterator, typename Identity, typename Combiner> auto parallel_execution_omp::reduce( InputIterator first, std::size_t sequence_size, Identity && identity, Combiner && combine_op) const { constexpr sequential_execution seq; using result_type = std::decay_t<Identity>; std::vector<result_type> partial_results(concurrency_degree_); auto process_chunk = [&](InputIterator f, std::size_t sz, std::size_t id) { partial_results[id] = seq.reduce(f, sz, std::forward<Identity>(identity), std::forward<Combiner>(combine_op)); }; const auto chunk_size = sequence_size/concurrency_degree_; #pragma omp parallel { #pragma omp single nowait { for (int i=0 ;i<concurrency_degree_-1; ++i) { const auto delta = chunk_size * i; const auto chunk_first = std::next(first,delta); #pragma omp task firstprivate (chunk_first, chunk_size, i) { process_chunk(chunk_first, chunk_size, i); } } //Main thread const auto delta = chunk_size * (concurrency_degree_ - 1); const auto chunk_first= std::next(first,delta); const auto chunk_sz = sequence_size - delta; process_chunk(chunk_first, chunk_sz, concurrency_degree_-1); #pragma omp taskwait } } return seq.reduce(std::next(partial_results.begin()), partial_results.size()-1, partial_results[0], std::forward<Combiner>(combine_op)); } template <typename ... InputIterators, typename Identity, typename Transformer, typename Combiner> auto parallel_execution_omp::map_reduce( std::tuple<InputIterators...> firsts, std::size_t sequence_size, Identity && identity, Transformer && transform_op, Combiner && combine_op) const { constexpr sequential_execution seq; using result_type = std::decay_t<Identity>; std::vector<result_type> partial_results(concurrency_degree_); auto process_chunk = [&](auto f, std::size_t sz, std::size_t i) { partial_results[i] = seq.map_reduce( f, sz, std::forward<Identity>(identity), std::forward<Transformer>(transform_op), std::forward<Combiner>(combine_op)); }; const auto chunk_size = sequence_size / concurrency_degree_; #pragma omp parallel { #pragma omp single nowait { for (int i=0;i<concurrency_degree_-1;++i) { #pragma omp task firstprivate(i) { const auto delta = chunk_size * i; const auto chunk_firsts = iterators_next(firsts,delta); process_chunk(chunk_firsts, chunk_size, i); } } const auto delta = chunk_size * (concurrency_degree_ - 1); auto chunk_firsts = iterators_next(firsts,delta); auto chunk_last = std::next(std::get<0>(firsts), sequence_size); process_chunk(chunk_firsts, std::distance(std::get<0>(chunk_firsts), chunk_last), concurrency_degree_ - 1); #pragma omp taskwait } } return seq.reduce(partial_results.begin(), partial_results.size(), std::forward<Identity>(identity), std::forward<Combiner>(combine_op)); } template <typename ... InputIterators, typename OutputIterator, typename StencilTransformer, typename Neighbourhood> void parallel_execution_omp::stencil( std::tuple<InputIterators...> firsts, OutputIterator first_out, std::size_t sequence_size, StencilTransformer && transform_op, Neighbourhood && neighbour_op) const { constexpr sequential_execution seq; const auto chunk_size = sequence_size / concurrency_degree_; auto process_chunk = [&](auto f, std::size_t sz, std::size_t delta) { seq.stencil(f, std::next(first_out,delta), sz, std::forward<StencilTransformer>(transform_op), std::forward<Neighbourhood>(neighbour_op)); }; #pragma omp parallel { #pragma omp single nowait { for (int i=0; i<concurrency_degree_-1; ++i) { #pragma omp task firstprivate(i) { const auto delta = chunk_size * i; const auto chunk_firsts = iterators_next(firsts,delta); process_chunk(chunk_firsts, chunk_size, delta); } } const auto delta = chunk_size * (concurrency_degree_ - 1); const auto chunk_firsts = iterators_next(firsts,delta); const auto chunk_last = std::next(std::get<0>(firsts), sequence_size); process_chunk(chunk_firsts, std::distance(std::get<0>(chunk_firsts), chunk_last), delta); #pragma omp taskwait } } } template <typename Input, typename Divider,typename Predicate, typename Solver, typename Combiner> auto parallel_execution_omp::divide_conquer( Input && input, Divider && divide_op, Predicate && predicate_op, Solver && solve_op, Combiner && combine_op) const { std::atomic<int> num_threads{concurrency_degree_-1}; return divide_conquer(std::forward<Input>(input), std::forward<Divider>(divide_op), std::forward<Predicate>(predicate_op), std::forward<Solver>(solve_op), std::forward<Combiner>(combine_op), num_threads); } template <typename Input, typename Divider, typename Solver, typename Combiner> auto parallel_execution_omp::divide_conquer( Input && input, Divider && divide_op, Solver && solve_op, Combiner && combine_op) const { std::atomic<int> num_threads{concurrency_degree_-1}; return divide_conquer(std::forward<Input>(input), std::forward<Divider>(divide_op), std::forward<Solver>(solve_op), std::forward<Combiner>(combine_op), num_threads); } template <typename Generator, typename ... Transformers> void parallel_execution_omp::pipeline( Generator && generate_op, Transformers && ... transform_ops) const { using namespace std; using result_type = decay_t<typename result_of<Generator()>::type>; auto output_queue = make_queue<pair<result_type,long>>(); #pragma omp parallel { #pragma omp single nowait { #pragma omp task shared(generate_op,output_queue) { long order = 0; for (;;) { auto item = generate_op(); output_queue.push(make_pair(item,order++)) ; if (!item) break; } } do_pipeline(output_queue, forward<Transformers>(transform_ops)...); #pragma omp taskwait } } } // PRIVATE MEMBERS template <typename Input, typename Divider,typename Predicate, typename Solver, typename Combiner> auto parallel_execution_omp::divide_conquer( Input && input, Divider && divide_op, Predicate && predicate_op, Solver && solve_op, Combiner && combine_op, std::atomic<int> & num_threads) const { constexpr sequential_execution seq; if (num_threads.load()<=0) { return seq.divide_conquer(std::forward<Input>(input), std::forward<Divider>(divide_op),std::forward<Predicate>(predicate_op), std::forward<Solver>(solve_op), std::forward<Combiner>(combine_op)); } if (predicate_op(input)) { return solve_op(std::forward<Input>(input)); } auto subproblems = divide_op(std::forward<Input>(input)); using subresult_type = std::decay_t<typename std::result_of<Solver(Input)>::type>; std::vector<subresult_type> partials(subproblems.size()-1); auto process_subproblems = [&,this](auto it, std::size_t div) { partials[div] = this->divide_conquer(std::forward<Input>(*it), std::forward<Divider>(divide_op), std::forward<Predicate>(predicate_op), std::forward<Solver>(solve_op), std::forward<Combiner>(combine_op), num_threads); }; int division = 0; subresult_type subresult; #pragma omp parallel { #pragma omp single nowait { auto i = subproblems.begin() + 1; while (i!=subproblems.end() && num_threads.load()>0) { #pragma omp task firstprivate(i,division) \ shared(partials,divide_op,solve_op,combine_op,num_threads) { process_subproblems(i, division); } num_threads --; i++; division++; } while (i!=subproblems.end()) { partials[division] = seq.divide_conquer(std::forward<Input>(*i++), std::forward<Divider>(divide_op), std::forward<Predicate>(predicate_op), std::forward<Solver>(solve_op), std::forward<Combiner>(combine_op)); } //Main thread works on the first subproblem. if (num_threads.load()>0) { subresult = divide_conquer(std::forward<Input>(*subproblems.begin()), std::forward<Divider>(divide_op),std::forward<Predicate>(predicate_op), std::forward<Solver>(solve_op), std::forward<Combiner>(combine_op), num_threads); } else { subresult = seq.divide_conquer(std::forward<Input>(*subproblems.begin()), std::forward<Divider>(divide_op), std::forward<Predicate>(predicate_op), std::forward<Solver>(solve_op), std::forward<Combiner>(combine_op)); } #pragma omp taskwait } } return seq.reduce(partials.begin(), partials.size(), std::forward<subresult_type>(subresult), combine_op); } template <typename Input, typename Divider, typename Solver, typename Combiner> auto parallel_execution_omp::divide_conquer( Input && input, Divider && divide_op, Solver && solve_op, Combiner && combine_op, std::atomic<int> & num_threads) const { constexpr sequential_execution seq; if (num_threads.load()<=0) { return seq.divide_conquer(std::forward<Input>(input), std::forward<Divider>(divide_op), std::forward<Solver>(solve_op), std::forward<Combiner>(combine_op)); } auto subproblems = divide_op(std::forward<Input>(input)); if (subproblems.size()<=1) { return solve_op(std::forward<Input>(input)); } using subresult_type = std::decay_t<typename std::result_of<Solver(Input)>::type>; std::vector<subresult_type> partials(subproblems.size()-1); auto process_subproblems = [&,this](auto it, std::size_t div) { partials[div] = this->divide_conquer(std::forward<Input>(*it), std::forward<Divider>(divide_op), std::forward<Solver>(solve_op), std::forward<Combiner>(combine_op), num_threads); }; int division = 0; subresult_type subresult; #pragma omp parallel { #pragma omp single nowait { auto i = subproblems.begin() + 1; while (i!=subproblems.end() && num_threads.load()>0) { #pragma omp task firstprivate(i,division) \ shared(partials,divide_op,solve_op,combine_op,num_threads) { process_subproblems(i, division); } num_threads --; i++; division++; } while (i!=subproblems.end()) { partials[division] = seq.divide_conquer(std::forward<Input>(*i++), std::forward<Divider>(divide_op), std::forward<Solver>(solve_op), std::forward<Combiner>(combine_op)); } //Main thread works on the first subproblem. if (num_threads.load()>0) { subresult = divide_conquer(std::forward<Input>(*subproblems.begin()), std::forward<Divider>(divide_op), std::forward<Solver>(solve_op), std::forward<Combiner>(combine_op), num_threads); } else { subresult = seq.divide_conquer(std::forward<Input>(*subproblems.begin()), std::forward<Divider>(divide_op), std::forward<Solver>(solve_op), std::forward<Combiner>(combine_op)); } #pragma omp taskwait } } return seq.reduce(partials.begin(), partials.size(), std::forward<subresult_type>(subresult), combine_op); } template <typename Queue, typename Consumer, requires_no_pattern<Consumer>> void parallel_execution_omp::do_pipeline(Queue & input_queue, Consumer && consume_op) const { using namespace std; using input_type = typename Queue::value_type; if (!is_ordered()) { for (;;) { auto item = input_queue.pop(); if (!item.first) break; consume_op(*item.first); } return; } vector<input_type> elements; long current = 0; auto item = input_queue.pop( ); while (item.first) { if (current == item.second) { consume_op(*item.first); current ++; } else { elements.push_back(item); } auto it = find_if(elements.begin(), elements.end(), [&](auto x) { return x.second== current; }); if(it != elements.end()){ consume_op(*it->first); elements.erase(it); current++; } item = input_queue.pop( ); } while(elements.size()>0){ auto it = find_if(elements.begin(), elements.end(), [&](auto x) { return x.second== current; }); if(it != elements.end()){ consume_op(*it->first); elements.erase(it); current++; } } } template <typename Inqueue, typename Transformer, typename output_type, requires_no_pattern<Transformer>> void parallel_execution_omp::do_pipeline(Inqueue & input_queue, Transformer && transform_op, mpmc_queue<output_type> & output_queue) const { using namespace std; #if __cplusplus < 201703L using namespace experimental; #endif using output_item_value_type = typename output_type::first_type::value_type; for (;;) { auto item{input_queue.pop()}; if(!item.first) break; auto out = output_item_value_type{transform_op(*item.first)}; output_queue.push(make_pair(out,item.second)) ; } } template <typename Queue, typename Execution, typename Transformer, template <typename, typename> class Context, typename ... OtherTransformers, requires_context<Context<Execution,Transformer>>> void parallel_execution_omp::do_pipeline(Queue & input_queue, Context<Execution,Transformer> && context_op, OtherTransformers &&... other_ops) const { using namespace std; #if __cplusplus < 201703L using namespace experimental; #endif using input_item_type = typename Queue::value_type; using input_item_value_type = typename input_item_type::first_type::value_type; using output_type = typename stage_return_type<input_item_value_type, Transformer>::type; using output_optional_type = optional<output_type>; using output_item_type = pair <output_optional_type, long> ; decltype(auto) output_queue = get_output_queue<output_item_type>(other_ops...); #pragma omp task shared(input_queue,context_op,output_queue) { context_op.execution_policy().pipeline(input_queue, context_op.transformer(), output_queue); output_queue.push(make_pair(output_optional_type{},-1)); } do_pipeline(output_queue, forward<OtherTransformers>(other_ops)... ); #pragma omp taskwait } template <typename Queue, typename Transformer, typename ... OtherTransformers, requires_no_pattern<Transformer>> void parallel_execution_omp::do_pipeline( Queue & input_queue, Transformer && transform_op, OtherTransformers && ... other_ops) const { using namespace std; #if __cplusplus < 201703L using namespace experimental; #endif using input_type = typename Queue::value_type; using input_value_type = typename input_type::first_type::value_type; using result_type = typename result_of<Transformer(input_value_type)>::type; using output_value_type = optional<result_type>; using output_type = pair<output_value_type,long>; decltype(auto) output_queue = get_output_queue<output_type>(other_ops...); #pragma omp task shared(transform_op, input_queue, output_queue) { for (;;) { auto item = input_queue.pop(); if (!item.first) break; auto out = output_value_type{transform_op(*item.first)}; output_queue.push(make_pair(out, item.second)); } output_queue.push(make_pair(output_value_type{}, -1)); } do_pipeline(output_queue, forward<OtherTransformers>(other_ops)...); } template <typename Queue, typename FarmTransformer, template <typename> class Farm, requires_farm<Farm<FarmTransformer>>> void parallel_execution_omp::do_pipeline( Queue & input_queue, Farm<FarmTransformer> && farm_obj) const { using namespace std; #if __cplusplus < 201703L using namespace experimental; #endif for (int i=0; i<farm_obj.cardinality(); ++i) { #pragma omp task shared(farm_obj,input_queue) { auto item = input_queue.pop(); while (item.first) { farm_obj(*item.first); item = input_queue.pop(); } input_queue.push(item); } } #pragma omp taskwait } template <typename Queue, typename FarmTransformer, template <typename> class Farm, typename ... OtherTransformers, requires_farm<Farm<FarmTransformer>>> void parallel_execution_omp::do_pipeline( Queue & input_queue, Farm<FarmTransformer> && farm_obj, OtherTransformers && ... other_transform_ops) const { using namespace std; #if __cplusplus < 201703L using namespace experimental; #endif using input_type = typename Queue::value_type; using input_value_type = typename input_type::first_type::value_type; using result_type = typename stage_return_type<input_value_type, FarmTransformer>::type; using output_optional_type = optional<result_type>; using output_type = pair<output_optional_type,long>; decltype(auto) output_queue = get_output_queue<output_type>(other_transform_ops...); // auto output_queue = make_queue<output_type>(); atomic<int> done_threads{0}; int ntask = farm_obj.cardinality(); for (int i=0; i<farm_obj.cardinality(); ++i) { #pragma omp task shared(done_threads,output_queue,farm_obj,input_queue,ntask) { do_pipeline(input_queue, farm_obj.transformer(), output_queue); done_threads++; if (done_threads == ntask){ output_queue.push(make_pair(output_optional_type{}, -1)); }else{ input_queue.push(input_type{}); } } } do_pipeline(output_queue, forward<OtherTransformers>(other_transform_ops)...); #pragma omp taskwait } template <typename Queue, typename Predicate, template <typename> class Filter, requires_filter<Filter<Predicate>>> void parallel_execution_omp::do_pipeline( Queue &, Filter<Predicate> &&) const { } template <typename Queue, typename Predicate, template <typename> class Filter, typename ... OtherTransformers, requires_filter<Filter<Predicate>>> void parallel_execution_omp::do_pipeline( Queue & input_queue, Filter<Predicate> && filter_obj, OtherTransformers && ... other_transform_ops) const { using namespace std; using input_type = typename Queue::value_type; using input_value_type = typename input_type::first_type; auto filter_queue = make_queue<input_type>(); if (is_ordered()) { auto filter_task = [&]() { { auto item{input_queue.pop()}; while (item.first) { if(filter_obj(*item.first)) { filter_queue.push(item); } else { filter_queue.push(make_pair(input_value_type{} ,item.second)); } item = input_queue.pop(); } filter_queue.push (make_pair(input_value_type{}, -1)); } }; decltype(auto) output_queue = get_output_queue<input_type>(other_transform_ops...); auto reorder_task = [&]() { vector<input_type> elements; int current = 0; long order = 0; auto item = filter_queue.pop(); for (;;) { if (!item.first && item.second == -1) break; if (item.second == current) { if (item.first) { output_queue.push(make_pair(item.first, order++)); } current++; } else { elements.push_back(item); } auto it = find_if(elements.begin(), elements.end(), [&](auto x) { return x.second== current; }); if(it != elements.end()){ if (it->first) { output_queue.push(make_pair(it->first,order)); order++; } elements.erase(it); current++; } item = filter_queue.pop(); } while (elements.size()>0) { auto it = find_if(elements.begin(), elements.end(), [&](auto x) { return x.second== current; }); if(it != elements.end()){ if (it->first) { output_queue.push(make_pair(it->first,order)); order++; } elements.erase(it); current++; } item = filter_queue.pop(); } output_queue.push(item); }; #pragma omp task shared(filter_queue,filter_obj,input_queue) { filter_task(); } #pragma omp task shared (output_queue,filter_queue) { reorder_task(); } do_pipeline(output_queue, forward<OtherTransformers>(other_transform_ops)...); #pragma omp taskwait } else { auto filter_task = [&]() { auto item = input_queue.pop( ) ; while (item.first) { if (filter_obj(*item.first)) { filter_queue.push(item); } item = input_queue.pop(); } filter_queue.push(make_pair(input_value_type{}, -1)); }; #pragma omp task shared(filter_queue,filter_obj,input_queue) { filter_task(); } do_pipeline(filter_queue, std::forward<OtherTransformers>(other_transform_ops)...); #pragma omp taskwait } } template <typename Queue, typename Combiner, typename Identity, template <typename C, typename I> class Reduce, typename ... OtherTransformers, requires_reduce<Reduce<Combiner,Identity>>> void parallel_execution_omp::do_pipeline( Queue && input_queue, Reduce<Combiner,Identity> && reduce_obj, OtherTransformers && ... other_transform_ops) const { using namespace std; #if __cplusplus < 201703L using namespace experimental; #endif using output_item_value_type = optional<decay_t<Identity>>; using output_item_type = pair<output_item_value_type,long>; decltype(auto) output_queue = get_output_queue<output_item_type>(other_transform_ops...); auto reduce_task = [&]() { auto item{input_queue.pop()}; int order = 0; while (item.first) { reduce_obj.add_item(std::forward<Identity>(*item.first)); item = input_queue.pop(); if (reduce_obj.reduction_needed()) { constexpr sequential_execution seq; auto red = reduce_obj.reduce_window(seq); output_queue.push(make_pair(red, order++)); } } output_queue.push(make_pair(output_item_value_type{}, -1)); }; #pragma omp task shared(reduce_obj,input_queue, output_queue) { reduce_task(); } do_pipeline(output_queue, std::forward<OtherTransformers>(other_transform_ops)...); #pragma omp taskwait } template <typename Queue, typename Transformer, typename Predicate, template <typename T, typename P> class Iteration, typename ... OtherTransformers, requires_iteration<Iteration<Transformer,Predicate>>, requires_no_pattern<Transformer>> void parallel_execution_omp::do_pipeline( Queue & input_queue, Iteration<Transformer,Predicate> && iteration_obj, OtherTransformers && ... other_transform_ops) const { using namespace std; #if __cplusplus < 201703L using namespace experimental; #endif using input_item_type = typename decay_t<Queue>::value_type; decltype(auto) output_queue = get_output_queue<input_item_type>(other_transform_ops...); auto iteration_task = [&]() { for (;;) { auto item = input_queue.pop(); if (!item.first) break; auto value = iteration_obj.transform(*item.first); auto new_item = input_item_type{value,item.second}; if (iteration_obj.predicate(value)) { output_queue.push(new_item); } else { input_queue.push(new_item); } } while (!input_queue.empty()) { auto item = input_queue.pop(); auto value = iteration_obj.transform(*item.first); auto new_item = input_item_type{value,item.second}; if (iteration_obj.predicate(value)) { output_queue.push(new_item); } else { input_queue.push(new_item); } } output_queue.push(input_item_type{{},-1}); }; #pragma omp task shared(iteration_obj,input_queue,output_queue) { iteration_task(); } do_pipeline(output_queue, std::forward<OtherTransformers>(other_transform_ops)...); #pragma omp taskwait } template <typename Queue, typename Transformer, typename Predicate, template <typename T, typename P> class Iteration, typename ... OtherTransformers, requires_iteration<Iteration<Transformer,Predicate>>, requires_pipeline<Transformer>> void parallel_execution_omp::do_pipeline( Queue &, Iteration<Transformer,Predicate> &&, OtherTransformers && ...) const { static_assert(!is_pipeline<Transformer>, "Not implemented"); } template <typename Queue, typename ... Transformers, template <typename...> class Pipeline, typename ... OtherTransformers, requires_pipeline<Pipeline<Transformers...>>> void parallel_execution_omp::do_pipeline( Queue & input_queue, Pipeline<Transformers...> && pipeline_obj, OtherTransformers && ... other_transform_ops) const { do_pipeline_nested( input_queue, std::tuple_cat(pipeline_obj.transformers(), std::forward_as_tuple(other_transform_ops...)), std::make_index_sequence<sizeof...(Transformers)+sizeof...(OtherTransformers)>()); } template <typename Queue, typename ... Transformers, std::size_t ... I> void parallel_execution_omp::do_pipeline_nested( Queue & input_queue, std::tuple<Transformers...> && transform_ops, std::index_sequence<I...>) const { do_pipeline(input_queue, std::forward<Transformers>(std::get<I>(transform_ops))...); } template<typename T, typename... Others> void parallel_execution_omp::do_pipeline(mpmc_queue <T> &, mpmc_queue <T> &, Others &&...) const { } } // end namespace grppi #else // GRPPI_OMP undefined namespace grppi { /// Parallel execution policy. /// Empty type if GRPPI_OMP disabled. struct parallel_execution_omp {}; /** \brief Metafunction that determines if type E is parallel_execution_omp This metafunction evaluates to false if GRPPI_OMP is disabled. \tparam Execution policy type. */ template <typename E> constexpr bool is_parallel_execution_omp() { return false; } } #endif // GRPPI_OMP #endif
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_binop__lxor_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lxor_fp64 // A.*B function (eWiseMult): GB_AemultB__lxor_fp64 // A*D function (colscale): GB_AxD__lxor_fp64 // D*A function (rowscale): GB_DxB__lxor_fp64 // C+=B function (dense accum): GB_Cdense_accumB__lxor_fp64 // C+=b function (dense accum): GB_Cdense_accumb__lxor_fp64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lxor_fp64 // C=scalar+B GB_bind1st__lxor_fp64 // C=scalar+B' GB_bind1st_tran__lxor_fp64 // C=A+scalar GB_bind2nd__lxor_fp64 // C=A'+scalar GB_bind2nd_tran__lxor_fp64 // C type: double // A type: double // B,b type: double // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) != (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_FP64 || GxB_NO_LXOR_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lxor_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lxor_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lxor_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lxor_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lxor_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lxor_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lxor_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lxor_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lxor_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lxor_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lxor_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
calcv.c
#include <stddef.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include <math.h> #ifdef _FOR_R #include <R_ext/Print.h> #define fprintf(f, message) REprintf(message) #else #include <stdio.h> #endif /* TODO: use qsort_s for argsorting, or switch to C++ std::sort */ typedef struct indexed_double {double x; size_t ix;} indexed_double; int comp_for_argsort_dbl(const void *a, const void *b) { return ( ((indexed_double*)a)->x > ((indexed_double*)b)->x )? 1 : -1; } int comp_for_argsort_szt(const void *a, const void *b) { return (((size_t*)a)[0] > ((size_t*)b)[0])? 1 : -1; } void argsort_naive_dbl(double a[], size_t out[], indexed_double buffer[], size_t n) { /* Note: this is a rather inefficient procedure and can be improve with e.g. C++'s sort */ for (size_t i = 0; i < n; i++) { buffer[i].x = a[i]; buffer[i].ix = i; } qsort(buffer, n, sizeof(indexed_double), comp_for_argsort_dbl); for (size_t i = 0; i < n; i++) { out[i] = buffer[i].ix; } } void argsort_naive_szt(size_t a[], size_t out[], size_t buffer[], size_t n) { for (size_t i = 0; i < n; i++) { buffer[i * 2] = a[i]; buffer[i * 2 + 1] = i; } qsort(buffer, n, sizeof(size_t) * 2, comp_for_argsort_szt); for (size_t i = 0; i < n; i++) { out[i] = buffer[i * 2 + 1]; } } double find_min(double a[], size_t n) { double out = HUGE_VAL; for (size_t i = 0; i < n; i++) { out = (out > a[i])? a[i] : out; } return out; } void calc_cost(double row_C[], double out[], size_t inner_order[], size_t ncol) { double min_in_row = find_min(row_C, ncol); for (size_t i = 0; i < ncol; i++) { out[i] = row_C[inner_order[i]] - min_in_row; } } void calc_rectangle_width(double cost[], double out[], size_t ncol) { for (size_t i = 0; i < ncol - 1; i++) { out[i] = cost[i + 1] - cost[i]; } } void sort_by_ix(double a[], size_t ix[], double buffer[], size_t n) { for (size_t i = 0; i < n; i++){ buffer[i] = a[ix[i]]; } memcpy(a, buffer, sizeof(double) * n); } size_t *inner_order; size_t *out_order; indexed_double *buffer_argsort_dbl; size_t *buffer_argsort_szt; double *cost_buffer; double *rectangle_width_arr; #pragma omp threadprivate(inner_order, out_order, buffer_argsort_dbl, buffer_argsort_szt, cost_buffer, rectangle_width_arr) int calculate_V(double C[], double V[], size_t nrow, size_t ncol, int nthreads) { int out_of_mem = 0; /* Note: MSVC is stuck with an older version of OpenMP (17 years old at the time or writing this) which does not support 'max' reductions */ #ifdef _OPENMP #if !defined(_MSC_VER) && _OPENMP>20080101 #pragma omp parallel reduction(max:out_of_mem) #endif #endif { inner_order = (size_t*) malloc(sizeof(size_t) * ncol); out_order = (size_t*) malloc(sizeof(size_t) * ncol); buffer_argsort_dbl = (indexed_double*) malloc(sizeof(indexed_double) * ncol); buffer_argsort_szt = (size_t*) malloc(sizeof(size_t) * ncol * 2); cost_buffer = (double*) malloc(sizeof(double) * ncol); rectangle_width_arr = (double*) malloc(sizeof(double) * (ncol - 1)); if (inner_order == NULL || out_order == NULL || buffer_argsort_dbl == NULL || buffer_argsort_szt == NULL || cost_buffer == NULL || rectangle_width_arr == NULL) { out_of_mem = 1; } } if (out_of_mem) { fprintf(stderr, "Error: Could not allocate memory for the procedure.\n"); goto cleanup; } #pragma omp parallel for schedule(static) num_threads(nthreads) firstprivate(C, V, nrow, ncol) for (size_t row = 0; row < nrow; row++) { argsort_naive_dbl(C + row * ncol, inner_order, buffer_argsort_dbl, ncol); calc_cost(C + row * ncol, cost_buffer, inner_order, ncol); argsort_naive_szt(inner_order, out_order, buffer_argsort_szt, ncol); calc_rectangle_width(cost_buffer, rectangle_width_arr, ncol); V[row * ncol] = 0; for (size_t col = 0; col < ncol - 1; col++) { V[row * ncol + col + 1] = V[row * ncol + col] + rectangle_width_arr[col] / ((double) col + 1); } sort_by_ix(V + row * ncol, out_order, cost_buffer, ncol); } cleanup: #pragma omp parallel { free(inner_order); free(buffer_argsort_dbl); free(cost_buffer); free(rectangle_width_arr); } return out_of_mem; }
deconvolution_pack8to16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_pack8to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __m512 _sum = _mm512_setzero_ps(); if (bias_data_ptr) { _sum = _mm512_loadu_ps(bias_data_ptr + p * 16); } const float* kptr = weight_data_packed.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float* sptr = m.row(sy) + sx * 8; int k = (y * kernel_w + x) * 128; __m512 _val0 = _mm512_set1_ps(sptr[0]); __m512 _val1 = _mm512_set1_ps(sptr[1]); __m512 _val2 = _mm512_set1_ps(sptr[2]); __m512 _val3 = _mm512_set1_ps(sptr[3]); __m512 _val4 = _mm512_set1_ps(sptr[4]); __m512 _val5 = _mm512_set1_ps(sptr[5]); __m512 _val6 = _mm512_set1_ps(sptr[6]); __m512 _val7 = _mm512_set1_ps(sptr[7]); __m512 _w0 = _mm512_load_ps(kptr + k); __m512 _w1 = _mm512_load_ps(kptr + k + 16); __m512 _w2 = _mm512_load_ps(kptr + k + 16 * 2); __m512 _w3 = _mm512_load_ps(kptr + k + 16 * 3); __m512 _w4 = _mm512_load_ps(kptr + k + 16 * 4); __m512 _w5 = _mm512_load_ps(kptr + k + 16 * 5); __m512 _w6 = _mm512_load_ps(kptr + k + 16 * 6); __m512 _w7 = _mm512_load_ps(kptr + k + 16 * 7); _sum = _mm512_fmadd_ps(_val0, _w0, _sum); _sum = _mm512_fmadd_ps(_val1, _w1, _sum); _sum = _mm512_fmadd_ps(_val2, _w2, _sum); _sum = _mm512_fmadd_ps(_val3, _w3, _sum); _sum = _mm512_fmadd_ps(_val4, _w4, _sum); _sum = _mm512_fmadd_ps(_val5, _w5, _sum); _sum = _mm512_fmadd_ps(_val6, _w6, _sum); _sum = _mm512_fmadd_ps(_val7, _w7, _sum); } } kptr += maxk * 128; } _sum = activation_avx512(_sum, activation_type, activation_params); _mm512_storeu_ps(outptr, _sum); outptr += 16; } } } }
lcs.c
/* Tempo Sequencial: Length of Longest Common Substring is 14 real 0m3,049s user 0m2,584s sys 0m0,459s Length of Longest Common Substring is 14 real 0m3,047s user 0m2,704s sys 0m0,336s Length of Longest Common Substring is 14 real 0m3,046s user 0m2,618s sys 0m0,423s Length of Longest Common Substring is 14 real 0m3,016s user 0m2,602s sys 0m0,409s Length of Longest Common Substring is 14 real 0m3,012s user 0m2,577s sys 0m0,429s Tempo paralelo Length of Longest Common Substring is 14 real 0m1,598s user 0m2,706s sys 0m0,417s Length of Longest Common Substring is 14 real 0m1,647s user 0m2,736s sys 0m0,469s Length of Longest Common Substring is 14 real 0m1,631s user 0m2,772s sys 0m0,413s Length of Longest Common Substring is 14 real 0m1,698s user 0m2,809s sys 0m0,463s Length of Longest Common Substring is 14 real 0m1,652s user 0m2,792s sys 0m0,425s Speedup ~= 1.9080100125156445 */ /* Dynamic Programming solution to find length of the longest common substring Adapted from http://www.geeksforgeeks.org/longest-common-substring/ */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> // Read input files char *readFile(char *filename, int *size) { char *buffer = NULL; *size = 0; /* Open your_file in read-only mode */ FILE *fp = fopen(filename, "r"); /* Get the buffer size */ fseek(fp, 0, SEEK_END); /* Go to end of file */ *size = ftell(fp); /* How many bytes did we pass ? */ /* Set position of stream to the beginning */ rewind(fp); /* Allocate the buffer (no need to initialize it with calloc) */ buffer = malloc((*size + 1) * sizeof(*buffer)); /* size + 1 byte for the \0 */ /* Read the file into the buffer */ int err = fread(buffer, *size, 1, fp); /* Read 1 chunk of size bytes from fp into buffer */ /* NULL-terminate the buffer */ buffer[*size] = '\0'; /* Print it ! */ // printf("%s\n", buffer); return (buffer); } // A utility function to find maximum of two integers int max(int a, int b) { return (a > b) ? a : b; } /* Returns length of longest common substring of X[0..m-1] and Y[0..n-1] */ int LCSubStr(char *x, char *y, int m, int n) { // Create a table to store lengths of longest common suffixes of // substrings. Notethat LCSuff[i][j] contains length of longest // common suffix of X[0..i-1] and Y[0..j-1]. The first row and // first column entries have no logical meaning, they are used only // for simplicity of program int **LCSuff = (int **)malloc((m + 1) * sizeof(int *)); for (int i = 0; i < m + 1; i++) LCSuff[i] = (int *)malloc((n + 1) * sizeof(int)); int result = 0; // To store length of the longest common substring /* Following steps build LCSuff[m+1][n+1] in bottom up fashion. */ #pragma omp target map(tofrom:result) map(tofrom:LCSuff[0:m+1]) #pragma omp teams distribute parallel for reduction(max:result) collapse(2) schedule(guided) for (int i = 0; i <= m; i++) { for (int j = 0; j <= n; j++) { if (i == 0 || j == 0) LCSuff[i][j] = 0; else if (x[i - 1] == y[j - 1]) { LCSuff[i][j] = LCSuff[i - 1][j - 1] + 1; result = max(result, LCSuff[i][j]); } else LCSuff[i][j] = 0; } } return result; } /* Driver program to test above function */ int main() { int m, n; char *x = readFile("seqA.txt", &m); char *y = readFile("seqB.txt", &n); printf("\nLength of Longest Common Substring is %d\n", LCSubStr(x, y, m, n)); return 0; }
compare.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP AAA RRRR EEEEE % % C O O MM MM P P A A R R E % % C O O M M M PPPP AAAAA RRRR EEE % % C O O M M P A A R R E % % CCCC OOO M M P A A R R EEEEE % % % % % % MagickCore Image Comparison Methods % % % % Software Design % % John Cristy % % December 2003 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/compare.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/statistic.h" #include "magick/thread-private.h" #include "magick/transform.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p a r e I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompareImageChannels() compares one or more image channels of an image % to a reconstructed image and returns the difference image. % % The format of the CompareImageChannels method is: % % Image *CompareImageChannels(const Image *image, % const Image *reconstruct_image,const ChannelType channel, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o channel: the channel. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image, const MetricType metric,double *distortion,ExceptionInfo *exception) { Image *highlight_image; highlight_image=CompareImageChannels(image,reconstruct_image, CompositeChannels,metric,distortion,exception); return(highlight_image); } MagickExport Image *CompareImageChannels(Image *image, const Image *reconstruct_image,const ChannelType channel, const MetricType metric,double *distortion,ExceptionInfo *exception) { CacheView *highlight_view, *image_view, *reconstruct_view; const char *artifact; Image *difference_image, *highlight_image; MagickBooleanType status; MagickPixelPacket highlight, lowlight, zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) ThrowImageException(ImageError,"ImageSizeDiffers"); status=GetImageChannelDistortion(image,reconstruct_image,channel,metric, distortion,exception); if (status == MagickFalse) return((Image *) NULL); difference_image=CloneImage(image,0,0,MagickTrue,exception); if (difference_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel); highlight_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (highlight_image == (Image *) NULL) { difference_image=DestroyImage(difference_image); return((Image *) NULL); } if (SetImageStorageClass(highlight_image,DirectClass) == MagickFalse) { InheritException(exception,&highlight_image->exception); difference_image=DestroyImage(difference_image); highlight_image=DestroyImage(highlight_image); return((Image *) NULL); } (void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel); (void) QueryMagickColor("#f1001ecc",&highlight,exception); artifact=GetImageArtifact(image,"highlight-color"); if (artifact != (const char *) NULL) (void) QueryMagickColor(artifact,&highlight,exception); (void) QueryMagickColor("#ffffffcc",&lowlight,exception); artifact=GetImageArtifact(image,"lowlight-color"); if (artifact != (const char *) NULL) (void) QueryMagickColor(artifact,&lowlight,exception); if (highlight_image->colorspace == CMYKColorspace) { ConvertRGBToCMYK(&highlight); ConvertRGBToCMYK(&lowlight); } /* Generate difference image. */ status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); highlight_view=AcquireAuthenticCacheView(highlight_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,highlight_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel, reconstruct_pixel; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register IndexPacket *restrict highlight_indexes; register PixelPacket *restrict r; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(highlight_view,0,y,highlight_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); highlight_indexes=GetCacheViewAuthenticIndexQueue(highlight_view); pixel=zero; reconstruct_pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { MagickStatusType difference; SetMagickPixelPacket(image,p,indexes+x,&pixel); SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x, &reconstruct_pixel); difference=MagickFalse; if (channel == CompositeChannels) { if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse) difference=MagickTrue; } else { if (((channel & RedChannel) != 0) && (GetPixelRed(p) != GetPixelRed(q))) difference=MagickTrue; if (((channel & GreenChannel) != 0) && (GetPixelGreen(p) != GetPixelGreen(q))) difference=MagickTrue; if (((channel & BlueChannel) != 0) && (GetPixelBlue(p) != GetPixelBlue(q))) difference=MagickTrue; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse) && (GetPixelOpacity(p) != GetPixelOpacity(q))) difference=MagickTrue; if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) && (GetPixelIndex(indexes+x) != GetPixelIndex(reconstruct_indexes+x))) difference=MagickTrue; } if (difference != MagickFalse) SetPixelPacket(highlight_image,&highlight,r,highlight_indexes+x); else SetPixelPacket(highlight_image,&lowlight,r,highlight_indexes+x); p++; q++; r++; } sync=SyncCacheViewAuthenticPixels(highlight_view,exception); if (sync == MagickFalse) status=MagickFalse; } highlight_view=DestroyCacheView(highlight_view); reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); (void) CompositeImage(difference_image,image->compose,highlight_image,0,0); highlight_image=DestroyImage(highlight_image); if (status == MagickFalse) difference_image=DestroyImage(difference_image); return(difference_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D i s t o r t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDistortion() compares one or more image channels of an image % to a reconstructed image and returns the specified distortion metric. % % The format of the GetImageChannelDistortion method is: % % MagickBooleanType GetImageChannelDistortion(const Image *image, % const Image *reconstruct_image,const ChannelType channel, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o channel: the channel. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDistortion(Image *image, const Image *reconstruct_image,const MetricType metric,double *distortion, ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelDistortion(image,reconstruct_image,CompositeChannels, metric,distortion,exception); return(status); } static MagickBooleanType GetAbsoluteDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel,double *distortion, ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; MagickPixelPacket zero; ssize_t y; /* Compute the absolute difference in pixels between two images. */ status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[CompositeChannels+1]; MagickPixelPacket pixel, reconstruct_pixel; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); pixel=zero; reconstruct_pixel=pixel; (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x, &reconstruct_pixel); if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse) { if ((channel & RedChannel) != 0) channel_distortion[RedChannel]++; if ((channel & GreenChannel) != 0) channel_distortion[GreenChannel]++; if ((channel & BlueChannel) != 0) channel_distortion[BlueChannel]++; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) channel_distortion[OpacityChannel]++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) channel_distortion[BlackChannel]++; channel_distortion[CompositeChannels]++; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetAbsoluteError) #endif for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static size_t GetNumberChannels(const Image *image, const ChannelType channel) { size_t channels; channels=0; if ((channel & RedChannel) != 0) channels++; if ((channel & GreenChannel) != 0) channels++; if ((channel & BlueChannel) != 0) channels++; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) channels++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) channels++; return(channels); } static MagickBooleanType GetFuzzDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t i; ssize_t y; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[CompositeChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance, Da, Sa; Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) : (QuantumRange-OpaqueOpacity)); Da=QuantumScale*(reconstruct_image->matte != MagickFalse ? GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity)); if ((channel & RedChannel) != 0) { distance=QuantumScale*(Sa*GetPixelRed(p)-Da*GetPixelRed(q)); channel_distortion[RedChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q)); channel_distortion[GreenChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q)); channel_distortion[BlueChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) || (reconstruct_image->matte != MagickFalse))) { distance=QuantumScale*((image->matte != MagickFalse ? GetPixelOpacity(p) : OpaqueOpacity)- (reconstruct_image->matte != MagickFalse ? GetPixelOpacity(q): OpaqueOpacity)); channel_distortion[OpacityChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=QuantumScale*(Sa*GetPixelIndex(indexes+x)- Da*GetPixelIndex(reconstruct_indexes+x)); channel_distortion[BlackChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetFuzzDistortion) #endif for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]/=((double) image->columns*image->rows); if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) || (reconstruct_image->matte != MagickFalse))) distortion[CompositeChannels]/=(double) (GetNumberChannels(image,channel)-1); else distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel); distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]); return(status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t i; ssize_t y; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[CompositeChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y, reconstruct_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance, Da, Sa; Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) : (QuantumRange-OpaqueOpacity)); Da=QuantumScale*(reconstruct_image->matte != MagickFalse ? GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity)); if ((channel & RedChannel) != 0) { distance=QuantumScale*fabs(Sa*GetPixelRed(p)-Da*GetPixelRed(q)); channel_distortion[RedChannel]+=distance; channel_distortion[CompositeChannels]+=distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*fabs(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q)); channel_distortion[GreenChannel]+=distance; channel_distortion[CompositeChannels]+=distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*fabs(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q)); channel_distortion[BlueChannel]+=distance; channel_distortion[CompositeChannels]+=distance; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=QuantumScale*fabs(GetPixelOpacity(p)-(double) GetPixelOpacity(q)); channel_distortion[OpacityChannel]+=distance; channel_distortion[CompositeChannels]+=distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { distance=QuantumScale*fabs(Sa*GetPixelIndex(indexes+x)-Da* GetPixelIndex(reconstruct_indexes+x)); channel_distortion[BlackChannel]+=distance; channel_distortion[CompositeChannels]+=distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanAbsoluteError) #endif for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]/=((double) image->columns*image->rows); distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel); return(status); } static MagickBooleanType GetMeanErrorPerPixel(Image *image, const Image *reconstruct_image,const ChannelType channel,double *distortion, ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; MagickRealType area, maximum_error, mean_error; ssize_t y; status=MagickTrue; area=0.0; maximum_error=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance, Da, Sa; Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) : (QuantumRange-OpaqueOpacity)); Da=QuantumScale*(reconstruct_image->matte != MagickFalse ? GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity)); if ((channel & RedChannel) != 0) { distance=fabs(Sa*GetPixelRed(p)-Da*GetPixelRed(q)); distortion[RedChannel]+=distance; distortion[CompositeChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if ((channel & GreenChannel) != 0) { distance=fabs(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q)); distortion[GreenChannel]+=distance; distortion[CompositeChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if ((channel & BlueChannel) != 0) { distance=fabs(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q)); distortion[BlueChannel]+=distance; distortion[CompositeChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=fabs((double) GetPixelOpacity(p)- GetPixelOpacity(q)); distortion[OpacityChannel]+=distance; distortion[CompositeChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=fabs(Sa*GetPixelIndex(indexes+x)-Da* GetPixelIndex(reconstruct_indexes+x)); distortion[BlackChannel]+=distance; distortion[CompositeChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p++; q++; } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=distortion[CompositeChannels]/area; image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(status); } static MagickBooleanType GetMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t i; ssize_t y; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[CompositeChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y, reconstruct_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance, Da, Sa; Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) : (QuantumRange-OpaqueOpacity)); Da=QuantumScale*(reconstruct_image->matte != MagickFalse ? GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity)); if ((channel & RedChannel) != 0) { distance=QuantumScale*(Sa*GetPixelRed(p)-Da*GetPixelRed(q)); channel_distortion[RedChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q)); channel_distortion[GreenChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q)); channel_distortion[BlueChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=QuantumScale*(GetPixelOpacity(p)-(MagickRealType) GetPixelOpacity(q)); channel_distortion[OpacityChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=QuantumScale*(Sa*GetPixelIndex(indexes+x)-Da* GetPixelIndex(reconstruct_indexes+x)); channel_distortion[BlackChannel]+=distance*distance; channel_distortion[CompositeChannels]+=distance*distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]/=((double) image->columns*image->rows); distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel); return(status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image *image,const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics, *reconstruct_statistics; MagickBooleanType status; MagickOffsetType progress; MagickRealType area; register ssize_t i; ssize_t y; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageChannelStatistics(image,exception); reconstruct_statistics=GetImageChannelStatistics(reconstruct_image,exception); status=MagickTrue; progress=0; for (i=0; i <= (ssize_t) CompositeChannels; i++) distortion[i]=0.0; area=1.0/((MagickRealType) image->columns*image->rows-1); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType Da, Sa; Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) : (QuantumRange-OpaqueOpacity)); Da=QuantumScale*(reconstruct_image->matte != MagickFalse ? GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity)); if ((channel & RedChannel) != 0) distortion[RedChannel]+=area*QuantumScale*(Sa*GetPixelRed(p)- image_statistics[RedChannel].mean)*(Da*GetPixelRed(q)- reconstruct_statistics[RedChannel].mean); if ((channel & GreenChannel) != 0) distortion[GreenChannel]+=area*QuantumScale*(Sa*GetPixelGreen(p)- image_statistics[GreenChannel].mean)*(Da*GetPixelGreen(q)- reconstruct_statistics[GreenChannel].mean); if ((channel & BlueChannel) != 0) distortion[BlueChannel]+=area*QuantumScale*(Sa*GetPixelBlue(p)- image_statistics[BlueChannel].mean)*(Da*GetPixelBlue(q)- reconstruct_statistics[BlueChannel].mean); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[OpacityChannel]+=area*QuantumScale*( GetPixelOpacity(p)-image_statistics[OpacityChannel].mean)* (GetPixelOpacity(q)-reconstruct_statistics[OpacityChannel].mean); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) distortion[BlackChannel]+=area*QuantumScale*(Sa* GetPixelIndex(indexes+x)- image_statistics[OpacityChannel].mean)*(Da* GetPixelIndex(reconstruct_indexes+x)- reconstruct_statistics[OpacityChannel].mean); p++; q++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SimilarityImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ for (i=0; i < (ssize_t) CompositeChannels; i++) { double gamma; gamma=image_statistics[i].standard_deviation* reconstruct_statistics[i].standard_deviation; gamma=PerceptibleReciprocal(gamma); distortion[i]=QuantumRange*gamma*distortion[i]; } distortion[CompositeChannels]=0.0; if ((channel & RedChannel) != 0) distortion[CompositeChannels]+=distortion[RedChannel]* distortion[RedChannel]; if ((channel & GreenChannel) != 0) distortion[CompositeChannels]+=distortion[GreenChannel]* distortion[GreenChannel]; if ((channel & BlueChannel) != 0) distortion[CompositeChannels]+=distortion[BlueChannel]* distortion[BlueChannel]; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[CompositeChannels]+=distortion[OpacityChannel]* distortion[OpacityChannel]; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) distortion[CompositeChannels]+=distortion[BlackChannel]* distortion[BlackChannel]; distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]/ GetNumberChannels(image,channel)); /* Free resources. */ reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; ssize_t y; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[CompositeChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y, reconstruct_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance, Da, Sa; Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) : (QuantumRange-OpaqueOpacity)); Da=QuantumScale*(reconstruct_image->matte != MagickFalse ? GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity)); if ((channel & RedChannel) != 0) { distance=QuantumScale*fabs(Sa*GetPixelRed(p)-Da*GetPixelRed(q)); if (distance > channel_distortion[RedChannel]) channel_distortion[RedChannel]=distance; if (distance > channel_distortion[CompositeChannels]) channel_distortion[CompositeChannels]=distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*fabs(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q)); if (distance > channel_distortion[GreenChannel]) channel_distortion[GreenChannel]=distance; if (distance > channel_distortion[CompositeChannels]) channel_distortion[CompositeChannels]=distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*fabs(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q)); if (distance > channel_distortion[BlueChannel]) channel_distortion[BlueChannel]=distance; if (distance > channel_distortion[CompositeChannels]) channel_distortion[CompositeChannels]=distance; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=QuantumScale*fabs(GetPixelOpacity(p)-(double) GetPixelOpacity(q)); if (distance > channel_distortion[OpacityChannel]) channel_distortion[OpacityChannel]=distance; if (distance > channel_distortion[CompositeChannels]) channel_distortion[CompositeChannels]=distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=QuantumScale*fabs(Sa*GetPixelIndex(indexes+x)-Da* GetPixelIndex(reconstruct_indexes+x)); if (distance > channel_distortion[BlackChannel]) channel_distortion[BlackChannel]=distance; if (distance > channel_distortion[CompositeChannels]) channel_distortion[CompositeChannels]=distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPeakAbsoluteError) #endif for (i=0; i <= (ssize_t) CompositeChannels; i++) if (channel_distortion[i] > distortion[i]) distortion[i]=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { MagickBooleanType status; status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion, exception); if ((channel & RedChannel) != 0) distortion[RedChannel]=20.0*log10((double) 1.0/sqrt( distortion[RedChannel])); if ((channel & GreenChannel) != 0) distortion[GreenChannel]=20.0*log10((double) 1.0/sqrt( distortion[GreenChannel])); if ((channel & BlueChannel) != 0) distortion[BlueChannel]=20.0*log10((double) 1.0/sqrt( distortion[BlueChannel])); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[OpacityChannel]=20.0*log10((double) 1.0/sqrt( distortion[OpacityChannel])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) distortion[BlackChannel]=20.0*log10((double) 1.0/sqrt( distortion[BlackChannel])); distortion[CompositeChannels]=20.0*log10((double) 1.0/sqrt( distortion[CompositeChannels])); return(status); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { MagickBooleanType status; status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion, exception); if ((channel & RedChannel) != 0) distortion[RedChannel]=sqrt(distortion[RedChannel]); if ((channel & GreenChannel) != 0) distortion[GreenChannel]=sqrt(distortion[GreenChannel]); if ((channel & BlueChannel) != 0) distortion[BlueChannel]=sqrt(distortion[BlueChannel]); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[OpacityChannel]=sqrt(distortion[OpacityChannel]); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) distortion[BlackChannel]=sqrt(distortion[BlackChannel]); distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]); return(status); } MagickExport MagickBooleanType GetImageChannelDistortion(Image *image, const Image *reconstruct_image,const ChannelType channel, const MetricType metric,double *distortion,ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); /* Get image distortion. */ length=CompositeChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_distortion,0,length* sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case MeanErrorPerPixelMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel, channel_distortion,exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel,channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case PeakSignalToNoiseRatioMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image,channel, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } } *distortion=channel_distortion[CompositeChannels]; channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D i s t o r t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDistortions() compares the image channels of an image to a % reconstructed image and returns the specified distortion metric for each % channel. % % The format of the GetImageChannelDistortions method is: % % double *GetImageChannelDistortions(const Image *image, % const Image *reconstruct_image,const MetricType metric, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o exception: return any errors or warnings in this structure. % */ MagickExport double *GetImageChannelDistortions(Image *image, const Image *reconstruct_image,const MetricType metric, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageError,"ImageSizeDiffers","`%s'",image->filename); return((double *) NULL); } /* Get image distortion. */ length=CompositeChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_distortion,0,length* sizeof(*channel_distortion)); status=MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,CompositeChannels, channel_distortion,exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,CompositeChannels, channel_distortion,exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, CompositeChannels,channel_distortion,exception); break; } case MeanErrorPerPixelMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,CompositeChannels, channel_distortion,exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image,CompositeChannels, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, CompositeChannels,channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, CompositeChannels,channel_distortion,exception); break; } case PeakSignalToNoiseRatioMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, CompositeChannels,channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, CompositeChannels,channel_distortion,exception); break; } } if (status == MagickFalse) { channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return((double *) NULL); } return(channel_distortion); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e s E q u a l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImagesEqual() measures the difference between colors at each pixel % location of two images. A value other than 0 means the colors match % exactly. Otherwise an error measure is computed by summing over all % pixels in an image the distance squared in RGB space between each image % pixel and its corresponding pixel in the reconstruct image. The error % measure is assigned to these image members: % % o mean_error_per_pixel: The mean error for any single pixel in % the image. % % o normalized_mean_error: The normalized mean quantization error for % any single pixel in the image. This distance measure is normalized to % a range between 0 and 1. It is independent of the range of red, green, % and blue values in the image. % % o normalized_maximum_error: The normalized maximum quantization % error for any single pixel in the image. This distance measure is % normalized to a range between 0 and 1. It is independent of the range % of red, green, and blue values in your image. % % A small normalized mean square error, accessed as % image->normalized_mean_error, suggests the images are very similar in % spatial layout and color. % % The format of the IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(Image *image, % const Image *reconstruct_image) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % */ MagickExport MagickBooleanType IsImagesEqual(Image *image, const Image *reconstruct_image) { CacheView *image_view, *reconstruct_view; ExceptionInfo *exception; MagickBooleanType status; MagickRealType area, maximum_error, mean_error, mean_error_per_pixel; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); area=0.0; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; exception=(&image->exception); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; distance=fabs(GetPixelRed(p)-(double) GetPixelRed(q)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; distance=fabs(GetPixelGreen(p)-(double) GetPixelGreen(q)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; distance=fabs(GetPixelBlue(p)-(double) GetPixelBlue(q)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; if (image->matte != MagickFalse) { distance=fabs(GetPixelOpacity(p)-(double) GetPixelOpacity(q)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if ((image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=fabs(GetPixelIndex(indexes+x)-(double) GetPixelIndex(reconstruct_indexes+x)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p++; q++; } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area); image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale* mean_error/area); image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error); status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i m i l a r i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SimilarityImage() compares the reference image of the image and returns the % best match offset. In addition, it returns a similarity image such that an % exact match location is completely white and if none of the pixels match, % black, otherwise some gray level in-between. % % The format of the SimilarityImageImage method is: % % Image *SimilarityImage(const Image *image,const Image *reference, % RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o the best match offset of the reference image within the image. % % o similarity: the computed similarity between the images. % % o exception: return any errors or warnings in this structure. % */ static double GetSimilarityMetric(const Image *image,const Image *reference, const MetricType metric,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { double distortion; Image *similarity_image; MagickBooleanType status; RectangleInfo geometry; SetGeometry(reference,&geometry); geometry.x=x_offset; geometry.y=y_offset; similarity_image=CropImage(image,&geometry,exception); if (similarity_image == (Image *) NULL) return(0.0); distortion=0.0; status=GetImageDistortion(similarity_image,reference,metric,&distortion, exception); (void) status; similarity_image=DestroyImage(similarity_image); return(distortion); } MagickExport Image *SimilarityImage(Image *image,const Image *reference, RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception) { Image *similarity_image; similarity_image=SimilarityMetricImage(image,reference, RootMeanSquaredErrorMetric,offset,similarity_metric,exception); return(similarity_image); } MagickExport Image *SimilarityMetricImage(Image *image,const Image *reference, const MetricType metric,RectangleInfo *offset,double *similarity_metric, ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *similarity_view; const char *artifact; double similarity_threshold; Image *similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference,offset); *similarity_metric=1.0; if ((reference->columns > image->columns) || (reference->rows > image->rows)) ThrowImageException(ImageError,"ImageSizeDiffers"); similarity_image=CloneImage(image,image->columns-reference->columns+1, image->rows-reference->rows+1,MagickTrue,exception); if (similarity_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(similarity_image,DirectClass) == MagickFalse) { InheritException(exception,&similarity_image->exception); similarity_image=DestroyImage(similarity_image); return((Image *) NULL); } (void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel); /* Measure similarity of reference image against image. */ similarity_threshold=(-1.0); artifact=GetImageArtifact(image,"compare:similarity-threshold"); if (artifact != (const char *) NULL) similarity_threshold=StringToDouble(artifact,(char **) NULL); status=MagickTrue; progress=0; similarity_view=AcquireVirtualCacheView(similarity_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ shared(progress,status,similarity_metric) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++) { double similarity; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) continue; q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns, 1,exception); if (q == (const PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) break; similarity=GetSimilarityMetric(image,reference,metric,x,y,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif if (similarity < *similarity_metric) { *similarity_metric=similarity; offset->x=x; offset->y=y; } SetPixelRed(q,ClampToQuantum(QuantumRange-QuantumRange*similarity)); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif proceed=SetImageProgress(image,SimilarityImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } similarity_view=DestroyCacheView(similarity_view); if (status == MagickFalse) similarity_image=DestroyImage(similarity_image); return(similarity_image); }
kmp_sch_simd_runtime_static.c
// RUN: %libomp-compile && %libomp-run // RUN: %libomp-run 1 && %libomp-run 2 // The test checks schedule(simd:runtime) // in combination with OMP_SCHEDULE=static[,chunk] #include <stdio.h> #include <stdlib.h> #include <omp.h> #if defined(WIN32) || defined(_WIN32) #include <windows.h> #define delay() Sleep(1); #define seten(a,b,c) _putenv_s((a),(b)) #else #include <unistd.h> #define delay() usleep(10); #define seten(a,b,c) setenv((a),(b),(c)) #endif #define SIMD_LEN 4 int err = 0; // --------------------------------------------------------------------------- // Various definitions copied from OpenMP RTL. enum sched { kmp_sch_static_balanced_chunked = 45, kmp_sch_guided_simd = 46, kmp_sch_runtime_simd = 47, }; typedef unsigned u32; typedef long long i64; typedef unsigned long long u64; typedef struct { int reserved_1; int flags; int reserved_2; int reserved_3; char *psource; } id; #ifdef __cplusplus extern "C" { #endif int __kmpc_global_thread_num(id*); void __kmpc_barrier(id*, int gtid); void __kmpc_dispatch_init_4(id*, int, enum sched, int, int, int, int); void __kmpc_dispatch_init_8(id*, int, enum sched, i64, i64, i64, i64); int __kmpc_dispatch_next_4(id*, int, void*, void*, void*, void*); int __kmpc_dispatch_next_8(id*, int, void*, void*, void*, void*); #ifdef __cplusplus } // extern "C" #endif // End of definitions copied from OpenMP RTL. // --------------------------------------------------------------------------- static id loc = {0, 2, 0, 0, ";file;func;0;0;;"}; // --------------------------------------------------------------------------- void run_loop( int loop_lb, // Loop lower bound. int loop_ub, // Loop upper bound. int loop_st, // Loop stride. int lchunk ) { static int volatile loop_sync = 0; int lb; // Chunk lower bound. int ub; // Chunk upper bound. int st; // Chunk stride. int rc; int nthreads = omp_get_num_threads(); int tid = omp_get_thread_num(); int gtid = __kmpc_global_thread_num(&loc); int last; int tc = (loop_ub - loop_lb) / loop_st + 1; int ch; int no_chunk = 0; if (lchunk == 0) { no_chunk = 1; lchunk = 1; } ch = lchunk * SIMD_LEN; #if _DEBUG > 1 printf("run_loop gtid %d tid %d (lb=%d, ub=%d, st=%d, ch=%d)\n", gtid, tid, (int)loop_lb, (int)loop_ub, (int)loop_st, lchunk); #endif // Don't test degenerate cases that should have been discovered by codegen. if (loop_st == 0) return; if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub) return; __kmpc_dispatch_init_4(&loc, gtid, kmp_sch_runtime_simd, loop_lb, loop_ub, loop_st, SIMD_LEN); { // Let the master thread handle the chunks alone. int chunk; // No of current chunk. int last_ub; // Upper bound of the last processed chunk. u64 cur; // Number of interations in current chunk. u64 max; // Max allowed iterations for current chunk. int undersized = 0; last_ub = loop_ub; chunk = 0; max = (loop_ub - loop_lb) / loop_st + 1; // The first chunk can consume all iterations. while (__kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st)) { ++ chunk; #if _DEBUG printf("th %d: chunk=%d, lb=%d, ub=%d ch %d\n", tid, chunk, (int)lb, (int)ub, (int)(ub-lb+1)); #endif // Check if previous chunk (it is not the final chunk) is undersized. if (undersized) printf("Error with chunk %d, th %d, err %d\n", chunk, tid, ++err); if (loop_st > 0) { if (!(ub <= loop_ub)) printf("Error with ub %d, %d, ch %d, err %d\n", (int)ub, (int)loop_ub, chunk, ++err); if (!(lb <= ub)) printf("Error with bounds %d, %d, %d, err %d\n", (int)lb, (int)ub, chunk, ++err); } else { if (!(ub >= loop_ub)) printf("Error with ub %d, %d, %d, err %d\n", (int)ub, (int)loop_ub, chunk, ++err); if (!(lb >= ub)) printf("Error with bounds %d, %d, %d, err %d\n", (int)lb, (int)ub, chunk, ++err); }; // if // Stride should not change. if (!(st == loop_st)) printf("Error with st %d, %d, ch %d, err %d\n", (int)st, (int)loop_st, chunk, ++err); cur = ( ub - lb ) / loop_st + 1; // Guided scheduling uses FP computations, so current chunk may // be a bit bigger (+1) than allowed maximum. if (!( cur <= max + 1)) printf("Error with iter %d, %d, err %d\n", cur, max, ++err); // Update maximum for the next chunk. if (last) { if (!no_chunk && cur > ch && nthreads > 1) printf("Error: too big last chunk %d (%d), tid %d, err %d\n", (int)cur, ch, tid, ++err); } else { if (cur % ch) printf("Error with chunk %d, %d, ch %d, tid %d, err %d\n", chunk, (int)cur, ch, tid, ++err); } if (cur < max) max = cur; last_ub = ub; undersized = (cur < ch); #if _DEBUG > 1 if (last) printf("under%d cur %d, ch %d, tid %d, ub %d, lb %d, st %d =======\n", undersized,cur,ch,tid,ub,lb,loop_st); #endif } // while // Must have the right last iteration index. if (loop_st > 0) { if (!(last_ub <= loop_ub)) printf("Error with last1 %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_ub, chunk, ++err); if (last && !(last_ub + loop_st > loop_ub)) printf("Error with last2 %d, %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk, ++err); } else { if (!(last_ub >= loop_ub)) printf("Error with last1 %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_ub, chunk, ++err); if (last && !(last_ub + loop_st < loop_ub)) printf("Error with last2 %d, %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk, ++err); } // if } __kmpc_barrier(&loc, gtid); } // run_loop int main(int argc, char *argv[]) { int chunk = 0; if (argc > 1) { char *buf = malloc(8 + strlen(argv[1])); // expect chunk size as a parameter chunk = atoi(argv[1]); strcpy(buf,"static,"); strcat(buf,argv[1]); seten("OMP_SCHEDULE",buf,1); printf("Testing schedule(simd:%s)\n", buf); free(buf); } else { seten("OMP_SCHEDULE","static",1); printf("Testing schedule(simd:static)\n"); } #pragma omp parallel// num_threads(num_th) run_loop(0, 26, 1, chunk); if (err) { printf("failed, err = %d\n", err); return 1; } else { printf("passed\n"); return 0; } }
re_model_template.h
/*! * This file is part of GPBoost a C++ library for combining * boosting with Gaussian process and mixed effects models * * Copyright (c) 2020 Fabio Sigrist. All rights reserved. * * Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information. */ #ifndef GPB_RE_MODEL_TEMPLATE_H_ #define GPB_RE_MODEL_TEMPLATE_H_ #include <GPBoost/log.h> #include <GPBoost/type_defs.h> #include <GPBoost/re_comp.h> #include <GPBoost/sparse_matrix_utils.h> #include <GPBoost/Vecchia_utils.h> #include <GPBoost/GP_utils.h> //#include <Eigen/src/misc/lapack.h> #include <memory> #include <mutex> #include <vector> #include <algorithm> // std::shuffle #include <random> // std::default_random_engine //#include <typeinfo> // Only needed for debugging //#include <chrono> // Only needed for debugging //#include <thread> // Only needed for debugging //Log::Info("Fine here ");// Only for debugging //std::this_thread::sleep_for(std::chrono::milliseconds(20)); namespace GPBoost { /*! * \brief Template class used in the wrapper class REModel * The template parameters T1 and T2 can either be <sp_mat_t, chol_sp_mat_t> or <den_mat_t, chol_den_mat_t> */ template<typename T1, typename T2> class REModelTemplate { public: /*! \brief Null costructor */ REModelTemplate(); /*! * \brief Costructor * \param num_data Number of data points * \param cluster_ids_data IDs / labels indicating independent realizations of random effects / Gaussian processes (same values = same process realization) * \param re_group_data Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0' * \param num_re_group Number of grouped (intercept) random effects * \param re_group_rand_coef_data Covariate data for grouped random coefficients * \param ind_effect_group_rand_coef Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting start at 1. * \param num_re_group_rand_coef Number of grouped random coefficient * \param num_gp Number of (intercept) Gaussian processes * \param gp_coords_data Coordinates (features) for Gaussian process * \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process * \param gp_rand_coef_data Covariate data for Gaussian process random coefficients * \param num_gp_rand_coef Number of Gaussian process random coefficients * \param cov_fct Type of covariance (kernel) function for Gaussian process. We follow the notation and parametrization of Diggle and Ribeiro (2007) except for the Matern covariance where we follow Rassmusen and Williams (2006) * \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance, irrelevant for some covariance functions such as the exponential or Gaussian) * \param vecchia_approx If true, the Veccia approximation is used for the Gaussian process * \param num_neighbors The number of neighbors used in the Vecchia approximation * \param vecchia_ordering Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering * \param vecchia_pred_type Type of Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions, "latent_order_obs_first_cond_obs_only" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are only observed points, "latent_order_obs_first_cond_all" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are selected among all points * \param num_neighbors_pred The number of neighbors used in the Vecchia approximation for making predictions */ REModelTemplate(data_size_t num_data, const gp_id_t* cluster_ids_data = nullptr, const char* re_group_data = nullptr, data_size_t num_re_group = 0, const double* re_group_rand_coef_data = nullptr, const int32_t* ind_effect_group_rand_coef = nullptr, data_size_t num_re_group_rand_coef = 0, data_size_t num_gp = 0, const double* gp_coords_data = nullptr, int dim_gp_coords = 2, const double* gp_rand_coef_data = nullptr, data_size_t num_gp_rand_coef = 0, const char* cov_fct = nullptr, double cov_fct_shape = 0., bool vecchia_approx = false, int num_neighbors = 30, const char* vecchia_ordering = nullptr, const char* vecchia_pred_type = nullptr, int num_neighbors_pred = 30) { num_cov_par_ = 1; CHECK(num_data > 0); num_data_ = num_data; vecchia_approx_ = vecchia_approx; //Set up GP IDs SetUpGPIds(num_data_, cluster_ids_data, num_data_per_cluster_, data_indices_per_cluster_, unique_clusters_, num_clusters_); //Indices of parameters of individual components in joint parameter vector ind_par_.push_back(0);//0+1 is starting point of parameter for first component since the first parameter is the nugget effect variance num_comps_total_ = 0; //Do some checks for grouped RE components and set meta data (number of components etc.) std::vector<std::vector<string_t>> re_group_levels;//Matrix with group levels for the grouped random effects (re_group_levels[j] contains the levels for RE number j) if (num_re_group > 0) { if (vecchia_approx) { Log::Fatal("The Veccia approximation cannot be used when there are grouped random effects (in the current implementation)."); } num_re_group_ = num_re_group; CHECK(re_group_data != nullptr); if (num_re_group_rand_coef > 0) { num_re_group_rand_coef_ = num_re_group_rand_coef; CHECK(re_group_rand_coef_data != nullptr); CHECK(ind_effect_group_rand_coef != nullptr); for (int j = 0; j < num_re_group_rand_coef_; ++j) { CHECK(0 < ind_effect_group_rand_coef[j] && ind_effect_group_rand_coef[j] <= num_re_group_); } ind_effect_group_rand_coef_ = std::vector<int>(ind_effect_group_rand_coef, ind_effect_group_rand_coef + num_re_group_rand_coef_); } num_re_group_total_ = num_re_group_ + num_re_group_rand_coef_; num_cov_par_ += num_re_group_total_; num_comps_total_ += num_re_group_total_; //Add indices of parameters of individual components in joint parameter vector for (int j = 0; j < num_re_group_total_; ++j) { ind_par_.push_back(1 + j);//end points of parameter indices of components } // Convert characters in 'const char* re_group_data' to matrix (num_re_group_ x num_data_) with strings of group labels re_group_levels = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_)); if (num_re_group_ > 0) { ConvertCharToStringGroupLevels(num_data_, num_re_group_, re_group_data, re_group_levels); } } //Do some checks for GP components and set meta data (number of components etc.) if (num_gp > 0) { if (num_gp > 2) { Log::Fatal("num_gp can only be either 0 or 1 in the current implementation"); } num_gp_ = num_gp; ind_intercept_gp_ = num_comps_total_; CHECK(dim_gp_coords > 0); CHECK(gp_coords_data != nullptr); CHECK(cov_fct != nullptr); dim_gp_coords_ = dim_gp_coords; cov_fct_ = std::string(cov_fct); cov_fct_shape_ = cov_fct_shape; if (vecchia_approx) { Log::Info("Starting nearest neighbor search for Vecchia approximation"); CHECK(num_neighbors > 0); num_neighbors_ = num_neighbors; CHECK(num_neighbors_pred > 0); num_neighbors_pred_ = num_neighbors_pred; if (vecchia_ordering == nullptr) { vecchia_ordering_ = "none"; } else { vecchia_ordering_ = std::string(vecchia_ordering); CHECK(vecchia_ordering_ == "none" || vecchia_ordering_ == "random"); } if (vecchia_pred_type == nullptr) { vecchia_pred_type_ = "order_obs_first_cond_obs_only"; } else { vecchia_pred_type_ = std::string(vecchia_pred_type); if (SUPPORTED_VECCHIA_PRED_TYPES_.find(vecchia_pred_type_) == SUPPORTED_VECCHIA_PRED_TYPES_.end()) { Log::Fatal("Prediction type '%s' is not supported for the Veccia approximation.", vecchia_pred_type_.c_str()); } } } if (num_gp_rand_coef > 0) {//Random slopes CHECK(gp_rand_coef_data != nullptr); num_gp_rand_coef_ = num_gp_rand_coef; } num_gp_total_ = num_gp_ + num_gp_rand_coef_; num_cov_par_ += (2 * num_gp_total_); num_comps_total_ += num_gp_total_; //Add indices of parameters of individual components in joint parameter vector for (int j = 0; j < num_gp_total_; ++j) { ind_par_.push_back(ind_par_.back() + 2);//end points of parameter indices of components } if (vecchia_approx) { double num_mem_d = ((double)num_gp_total_) * ((double)num_data_) * ((double)num_neighbors_) * ((double)num_neighbors_); int mem_size = (int)(num_mem_d * 8. / 1000000.); if (mem_size > 8000) { Log::Warning("The current implementation of the Vecchia approximation is not optimized for memory usage. In your case (num. obs. = %d and num. neighbors = %d), at least approximately %d mb of memory is needed. If this is a problem, contact the developer of this package and ask to implement this feature.", num_data_, num_neighbors_, mem_size); } } } if (num_re_group_ > 0 && num_gp_total_ == 0) { do_symbolic_decomposition_ = true;//Symbolic decompostion is only done if sparse matrices are used } else { do_symbolic_decomposition_ = false; } //Create RE/GP component models for (const auto& cluster_i : unique_clusters_) { ConstructI<T1>(cluster_i);//Idendity matrices needed for computing inverses of covariance matrices used in gradient descent std::vector<std::shared_ptr<RECompBase<T1>>> re_comps_cluster_i; if (vecchia_approx_) { std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_per_cluster_[cluster_i]); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_per_cluster_[cluster_i]); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_per_cluster_[cluster_i]); std::vector<Triplet_t> entries_init_B_cluster_i; std::vector<Triplet_t> entries_init_B_grad_cluster_i; std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_per_cluster_[cluster_i]); CreateREComponentsVecchia(num_data_, data_indices_per_cluster_, cluster_i, num_data_per_cluster_, gp_coords_data, dim_gp_coords_, gp_rand_coef_data, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, re_comps_cluster_i, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, entries_init_B_cluster_i, entries_init_B_grad_cluster_i, z_outer_z_obs_neighbors_cluster_i, vecchia_ordering_, num_neighbors_); nearest_neighbors_.insert({ cluster_i, nearest_neighbors_cluster_i }); dist_obs_neighbors_.insert({ cluster_i, dist_obs_neighbors_cluster_i }); dist_between_neighbors_.insert({ cluster_i, dist_between_neighbors_cluster_i }); entries_init_B_.insert({ cluster_i, entries_init_B_cluster_i }); entries_init_B_grad_.insert({ cluster_i, entries_init_B_grad_cluster_i }); z_outer_z_obs_neighbors_.insert({ cluster_i, z_outer_z_obs_neighbors_cluster_i }); Log::Info("Nearest neighbors for Vecchia approximation found"); } else { CreateREComponents(num_data_, num_re_group_, data_indices_per_cluster_, cluster_i, re_group_levels, num_data_per_cluster_, num_re_group_rand_coef_, re_group_rand_coef_data, ind_effect_group_rand_coef_, num_gp_, gp_coords_data, dim_gp_coords_, gp_rand_coef_data, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, ind_intercept_gp_, re_comps_cluster_i); } re_comps_.insert({ cluster_i, re_comps_cluster_i }); } ////Following only prints stuff for testing. TODO: delete //Log::Info("********************** Meta data ********************************"); //Log::Info("num_data_ : %d", num_data_); //Log::Info("num_clusters_ : %d", num_clusters_); //Log::Info("num_re_group_ : %d", num_re_group_); //Log::Info("num_re_group_rand_coef_ : %d", num_re_group_rand_coef_); //Log::Info("num_re_group_total_ : %d", num_re_group_total_); //Log::Info("num_gp_rand_coef_ : %d", num_gp_rand_coef_); //Log::Info("num_gp_total_ : %d", num_gp_total_); //Log::Info("num_cov_par_: %d", num_cov_par_); //for (unsigned i = 0; i < ind_par_.size(); i++) { Log::Info("ind_par_[%d]: %d", i, ind_par_[i]); } //Log::Info("******************************************************"); //int ii = 0; //for (const auto& cluster_i : unique_clusters_) { // Log::Info("unique_clusters_[%d]: %d", ii, cluster_i); // Log::Info("num_data_per_cluster_[%d]: %d", cluster_i, num_data_per_cluster_[cluster_i]); // //for (int j = 0; j < std::min((int)data_indices_per_cluster_[cluster_i].size(), 10); ++j) { Log::Info("data_indices_per_cluster_[%d][%d]: %d", cluster_i, j, data_indices_per_cluster_[cluster_i][j]); } // if (num_re_group_ > 0) { // Log::Info("*********************** Grouped REs *******************************"); // //Log::Info("re_comps_[cluster_i] %s ", typeid(re_comps_[cluster_i]).name()); // //Log::Info("re_comps_[cluster_i].size(): %d", re_comps_[cluster_i].size()); // //for (const auto& re_comp : re_comps_[cluster_i]) { // for (int j = 0; j < re_comps_[cluster_i].size(); ++j) { // std::shared_ptr<RECompGroup<T1>> re_comp_group = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_[cluster_i][j]); // //for (const auto& el : re_comp_group->group_data_) { Log::Info("re_comps_[%d][j].group_data_[i]: %d", cluster_i, el); } // if (!re_comp_group->is_rand_coef_) { // for (int i = 0; i < std::min((int)(*re_comp_group->group_data_).size(), 10); i++) { Log::Info("re_comps_[%d][%d].group_data_[%d]: %s", cluster_i, j, i, (*re_comp_group->group_data_)[i]); } // } // else if (re_comp_group->is_rand_coef_) { // for (int i = 0; i < std::min(num_data_per_cluster_[cluster_i], 10); i++) { Log::Info("re_comps_[%d][%d].group_data_ref_[%d]: %s", cluster_i, j, i, (*re_comp_group->group_data_)[i]); } // for (int i = 0; i < std::min(num_data_per_cluster_[cluster_i], 10); i++) { Log::Info("re_comps_[%d][%d].rand_coef_data_[%d]: %f", cluster_i, j, i, re_comp_group->rand_coef_data_[i]); } // } // } // } // ii++; //} } /*! \brief Destructor */ ~REModelTemplate() { } /*! \brief Disable copy */ REModelTemplate& operator=(const REModelTemplate&) = delete; /*! \brief Disable copy */ REModelTemplate(const REModelTemplate&) = delete; /*! * \brief Find parameters that minimize the negative log-ligelihood (=MLE) using (Nesterov accelerated) gradient descent * Note: You should pre-allocate memory for optim_cov_pars (length = number of covariance parameters) * \param y_data Response variable data * \param init_cov_pars Initial values for covariance parameters of RE components * \param[out] optim_cov_pars Optimal covariance parameters * \param[out] num_it Number of iterations * \param lr Learning rate * \param acc_rate_cov Acceleration rate for covariance parameters for Nesterov acceleration (only relevant if nesterov_schedule_version == 0). * \param momentum_offset Number of iterations for which no mometum is applied in the beginning * \param max_iter Maximal number of iterations * \param delta_rel_conv Convergence criterion: stop iteration if relative change in parameters is below this value * \param optimizer Options: "gradient_descent" or "fisher_scoring" * \param use_nesterov_acc Indicates whether Nesterov acceleration is used in the gradient descent for finding the covariance parameters. Default = true * \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0 * \param[out] std_dev_cov_par Standard deviations for the covariance parameters * \param calc_std_dev If true, asymptotic standard deviations for the MLE of the covariance parameters are calculated as the diagonal of the inverse Fisher information * \param cov_pars_lag_1 Covariance parameters from previous iteration used for Nesterov step (on transformed scale). Default = nullptr */ void OptimCovPar(const double* y_data, double* init_cov_pars, double* optim_cov_pars, int& num_it, double lr = 0.01, double acc_rate_cov = 0.5, int momentum_offset = 2, int max_iter = 1000, double delta_rel_conv = 1.0e-6, string_t optimizer = "fisher_scoring", bool use_nesterov_acc = true, int nesterov_schedule_version = 0, double* std_dev_cov_par = nullptr, bool calc_std_dev = false, double* cov_pars_lag_1 = nullptr) { if (SUPPORTED_OPTIM_COV_PAR_.find(optimizer) == SUPPORTED_OPTIM_COV_PAR_.end()) { Log::Fatal("Optimizer option '%s' is not supported for covariance parameters.", optimizer.c_str()); } SetY(y_data); vec_t cov_pars = Eigen::Map<vec_t>(init_cov_pars, num_cov_par_); vec_t cov_pars_lag1 = (cov_pars_lag_1 == nullptr) ? cov_pars : cov_pars_lag1; num_it = max_iter; Log::Debug("Initial covariance parameters"); for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::Debug("cov_pars[%d]: %f", i, cov_pars[i]); } for (int it = 0; it < max_iter; ++it) { ApplyMomentumStep(it, cov_pars, cov_pars_lag1, use_nesterov_acc, acc_rate_cov, nesterov_schedule_version, true, momentum_offset); SetCovParsComps(cov_pars); CalcCovFactor(vecchia_approx_, true, 1., false);//Create covariance matrix and factorize it (and also calculate derivatives if Vecchia approximation is used) CalcYAux(); if (optimizer == "gradient_descent") {//gradient descent UpdateCovParGradOneIter(lr, cov_pars); } else if (optimizer == "fisher_scoring") {//Fisher scoring UpdateCovParFisherScoringOneIter(cov_pars); } CheckNaN(cov_pars); if (it < 10 || ((it + 1) % 10 == 0 && (it + 1) < 100) || ((it + 1) % 100 == 0 && (it + 1) < 1000) || ((it + 1) % 1000 == 0 && (it + 1) < 10000) || ((it + 1) % 10000 == 0)) { Log::Debug("Covariance parameter estimation: iteration number %d", it + 1); for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::Debug("cov_pars[%d]: %f", i, cov_pars[i]); } } if ((cov_pars - cov_pars_lag1).norm() / cov_pars_lag1.norm() < delta_rel_conv) { num_it = it + 1; break; } } if (num_it == max_iter) { Log::Warning("Covariance parameter estimation: no convergence after the maximal number of iterations. If this is a problem, you might consider increasing the number of iterations or using a different learning rate."); } for (int i = 0; i < num_cov_par_; ++i) { optim_cov_pars[i] = cov_pars[i]; } if (calc_std_dev) { vec_t std_dev_cov(num_cov_par_); CalcStdDevCovPar(cov_pars, std_dev_cov); for (int i = 0; i < num_cov_par_; ++i) { std_dev_cov_par[i] = std_dev_cov[i]; } } has_covariates_ = false; } /*! * \brief Find linear regression coefficients and covariance parameters that minimize the negative log-ligelihood (=MLE) using (Nesterov accelerated) gradient descent * Note: You should pre-allocate memory for optim_cov_pars and optim_coef. Their length equal the number of covariance parameters and the number of regression coefficients * If calc_std_dev=true, you also need to pre-allocate memory for std_dev_cov_par and std_dev_coef of the same length for the standard deviations * \param y_data Response variable data * \param covariate_data Covariate data (=independent variables, features) * \param num_covariates Number of covariates * \param[out] optim_cov_pars Optimal covariance parameters * \param[out] optim_coef Optimal regression coefficients * \param[out] num_it Number of iterations * \param init_cov_pars Initial values for covariance parameters of RE components * \param init_coef Initial values for the regression coefficients * \param lr_coef Learning rate for fixed-effect linear coefficients * \param lr_cov Learning rate for covariance parameters * \param acc_rate_coef Acceleration rate for coefficients for Nesterov acceleration (only relevant if nesterov_schedule_version == 0). * \param acc_rate_cov Acceleration rate for covariance parameters for Nesterov acceleration (only relevant if nesterov_schedule_version == 0). * \param momentum_offset Number of iterations for which no mometum is applied in the beginning * \param max_iter Maximal number of iterations * \param delta_rel_conv Convergence criterion: stop iteration if relative change in in parameters is below this value * \param use_nesterov_acc Indicates whether Nesterov acceleration is used in the gradient descent for finding the covariance parameters. Default = true * \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0 * \param optimizer_cov Optimizer for covariance parameters. Options: "gradient_descent" or "fisher_scoring" * \param optimizer_coef Optimizer for coefficients. Options: "gradient_descent" or "wls" (coordinate descent using weighted least squares) * \param[out] std_dev_cov_par Standard deviations for the covariance parameters * \param[out] std_dev_coef Standard deviations for the coefficients * \param calc_std_dev If true, asymptotic standard deviations for the MLE of the covariance parameters are calculated as the diagonal of the inverse Fisher information */ void OptimLinRegrCoefCovPar(const double* y_data, const double* covariate_data, int num_covariates, double* optim_cov_pars, double* optim_coef, int& num_it, double* init_cov_pars, double* init_coef = nullptr, double lr_coef = 0.01, double lr_cov = 0.01, double acc_rate_coef = 0.1, double acc_rate_cov = 0.5, int momentum_offset = 2, int max_iter = 1000, double delta_rel_conv = 1.0e-6, bool use_nesterov_acc = true, int nesterov_schedule_version = 0, string_t optimizer_cov = "fisher_scoring", string_t optimizer_coef = "wls", double* std_dev_cov_par = nullptr, double* std_dev_coef = nullptr, bool calc_std_dev = false) { if (SUPPORTED_OPTIM_COV_PAR_.find(optimizer_cov) == SUPPORTED_OPTIM_COV_PAR_.end()) { Log::Fatal("Optimizer option '%s' is not supported for covariance parameters.", optimizer_cov.c_str()); } if (SUPPORTED_OPTIM_COEF_.find(optimizer_coef) == SUPPORTED_OPTIM_COEF_.end()) { Log::Fatal("Optimizer option '%s' is not supported for regression coefficients.", optimizer_coef.c_str()); } CHECK(covariate_data != nullptr); has_covariates_ = true; num_coef_ = num_covariates; X_ = Eigen::Map<const den_mat_t>(covariate_data, num_data_, num_coef_); //Check whether one of the colums contains only 1's and if not, give out warning vec_t vec_ones(num_data_); vec_ones.setOnes(); bool has_intercept = false; for (int icol = 0; icol < num_coef_; ++icol) { if ((X_.col(icol) - vec_ones).cwiseAbs().sum() < 0.001) { has_intercept = true; break; } } if (!has_intercept) { Log::Warning("The covariate data contains no column of ones. This means that there is no intercept included."); } y_vec_ = Eigen::Map<const vec_t>(y_data, num_data_); vec_t cov_pars = Eigen::Map<const vec_t>(init_cov_pars, num_cov_par_); vec_t cov_pars_lag1 = cov_pars; vec_t beta(num_covariates); if (init_coef == nullptr) { beta.setZero(); } else { beta = Eigen::Map<const vec_t>(init_coef, num_covariates); } vec_t beta_lag1 = beta; vec_t resid; num_it = max_iter; for (int it = 0; it < max_iter; ++it) { if (it > 0) { ApplyMomentumStep(it, cov_pars, cov_pars_lag1, use_nesterov_acc, acc_rate_cov, nesterov_schedule_version, true, momentum_offset); if (optimizer_coef == "gradient_descent") { ApplyMomentumStep(it, beta, beta_lag1, use_nesterov_acc, acc_rate_coef, nesterov_schedule_version, false, momentum_offset); } } SetCovParsComps(cov_pars); CalcCovFactor(vecchia_approx_, true, 1., false); if (optimizer_coef == "gradient_descent") {//one step of gradient descent resid = y_vec_ - (X_ * beta); SetY(resid.data()); CalcYAux(); UpdateCoefGradOneIter(lr_coef, cov_pars[0], X_, beta); } else if (optimizer_coef == "wls") {//coordinate descent using generalized least squares SetY(y_vec_.data()); CalcYAux(); beta_lag1 = beta; UpdateCoefGLS(X_, beta); } resid = y_vec_ - (X_ * beta); SetY(resid.data()); CalcYAux(); if (optimizer_cov == "gradient_descent") {//one step of gradient descent UpdateCovParGradOneIter(lr_cov, cov_pars); } else if (optimizer_cov == "fisher_scoring") {//one step of Fisher scoring UpdateCovParFisherScoringOneIter(cov_pars); } CheckNaN(cov_pars); if (it < 10 || ((it + 1) % 10 == 0 && (it + 1) < 100) || ((it + 1) % 100 == 0 && (it + 1) < 1000) || ((it + 1) % 1000 == 0 && (it + 1) < 10000) || ((it + 1) % 10000 == 0)) { Log::Debug("Gradient descent iteration number %d", it + 1); for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::Debug("cov_pars[%d]: %f", i, cov_pars[i]); } for (int i = 0; i < std::min((int)beta.size(), 3); ++i) { Log::Debug("beta[%d]: %f", i, beta[i]); } } if (((beta - beta_lag1).norm() / beta_lag1.norm() < delta_rel_conv) && ((cov_pars - cov_pars_lag1).norm() / cov_pars_lag1.norm() < delta_rel_conv)) { num_it = it + 1; break; } } if (num_it == max_iter) { Log::Warning("Covariance parameter estimation: no convergence after the maximal number of iterations"); } for (int i = 0; i < num_cov_par_; ++i) { optim_cov_pars[i] = cov_pars[i]; } if (calc_std_dev) { vec_t std_dev_cov(num_cov_par_); CalcStdDevCovPar(cov_pars, std_dev_cov); for (int i = 0; i < num_cov_par_; ++i) { std_dev_cov_par[i] = std_dev_cov[i]; } } for (int i = 0; i < num_covariates; ++i) { optim_coef[i] = beta[i]; } if (calc_std_dev) { vec_t std_dev_beta(num_covariates); CalcStdDevCoef(cov_pars, X_, std_dev_beta); for (int i = 0; i < num_covariates; ++i) { std_dev_coef[i] = std_dev_beta[i]; } } } /*! * \brief Set the data used for making predictions (useful if the same data is used repeatedly, e.g., in validation of GPBoost) * \param num_data_pred Number of data points for which predictions are made * \param cluster_ids_data_pred IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) for which predictions are to be made * \param re_group_data_pred Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0' * \param re_group_rand_coef_data_pred Covariate data for grouped random coefficients * \param gp_coords_data_pred Coordinates (features) for Gaussian process * \param gp_rand_coef_data_pred Covariate data for Gaussian process random coefficients * \param covariate_data_pred Covariate data (=independent variables, features) for prediction */ void SetPredictionData(int num_data_pred, const gp_id_t* cluster_ids_data_pred = nullptr, const char* re_group_data_pred = nullptr, const double* re_group_rand_coef_data_pred = nullptr, double* gp_coords_data_pred = nullptr, const double* gp_rand_coef_data_pred = nullptr, const double* covariate_data_pred = nullptr) { if (cluster_ids_data_pred == nullptr) { cluster_ids_data_pred_.clear(); } else { cluster_ids_data_pred_ = std::vector<gp_id_t>(cluster_ids_data_pred, cluster_ids_data_pred + num_data_pred); } if (re_group_data_pred == nullptr) { re_group_levels_pred_.clear(); if (num_re_group_ > 0) { Log::Fatal("No group data is provided for making predictions"); } } else { //For grouped random effecst: create matrix 're_group_levels_pred' (vector of vectors, dimension: num_re_group_ x num_data_) with strings of group levels from characters in 'const char* re_group_data_pred' re_group_levels_pred_ = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_pred)); ConvertCharToStringGroupLevels(num_data_pred, num_re_group_, re_group_data_pred, re_group_levels_pred_); } if (re_group_rand_coef_data_pred == nullptr) { re_group_rand_coef_data_pred_.clear(); } else { re_group_rand_coef_data_pred_ = std::vector<double>(re_group_rand_coef_data_pred, re_group_rand_coef_data_pred + num_data_pred * num_re_group_rand_coef_); } if (gp_coords_data_pred == nullptr) { gp_coords_data_pred_.clear(); } else { gp_coords_data_pred_ = std::vector<double>(gp_coords_data_pred, gp_coords_data_pred + num_data_pred * dim_gp_coords_); } if (gp_rand_coef_data_pred == nullptr) { gp_rand_coef_data_pred_.clear(); } else { gp_rand_coef_data_pred_ = std::vector<double>(gp_rand_coef_data_pred, gp_rand_coef_data_pred + num_data_pred * num_gp_rand_coef_); } if (covariate_data_pred == nullptr) { covariate_data_pred_.clear(); } else { covariate_data_pred_ = std::vector<double>(covariate_data_pred, covariate_data_pred + num_data_pred * num_coef_); } } /*! * \brief Make predictions: calculate conditional mean and covariance matrix * Note: You should pre-allocate memory for out_predict * Its length is equal to num_data_pred if only the conditional mean is predicted (predict_cov_mat=false) * or num_data_pred * (1 + num_data_pred) if both the conditional mean and covariance matrix are predicted (predict_cov_mat=true) * \param cov_pars_pred Covariance parameters of components * \param y_obs Response variable for observed data * \param num_data_pred Number of data points for which predictions are made * \param[out] out_predict Conditional mean at prediciton points (="predicted value") followed by (if predict_cov_mat=true) the conditional covariance matrix at in column-major format * \param predict_cov_mat If true, the conditional covariance matrix is calculated (default=false) * \param covariate_data_pred Covariate data (=independent variables, features) for prediction * \param coef_pred Coefficients for linear covariates * \param cluster_ids_data_pred IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) for which predictions are to be made * \param re_group_data_pred Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0' * \param re_group_rand_coef_data_pred Covariate data for grouped random coefficients * \param gp_coords_data_pred Coordinates (features) for Gaussian process * \param gp_rand_coef_data_pred Covariate data for Gaussian process random coefficients * \param use_saved_data If true, saved data is used and some arguments are ignored * \param vecchia_pred_type Type of Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions, "latent_order_obs_first_cond_obs_only" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are only observed points, "latent_order_obs_first_cond_all" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are selected among all points * \param num_neighbors_pred The number of neighbors used in the Vecchia approximation for making predictions (-1 means that the value already set at initialization is used) */ void Predict(const double* cov_pars_pred, const double* y_obs, data_size_t num_data_pred, double* out_predict, bool predict_cov_mat = false, const double* covariate_data_pred = nullptr, const double* coef_pred = nullptr, const gp_id_t* cluster_ids_data_pred = nullptr, const char* re_group_data_pred = nullptr, const double* re_group_rand_coef_data_pred = nullptr, double* gp_coords_data_pred = nullptr, const double* gp_rand_coef_data_pred = nullptr, bool use_saved_data = false, const char* vecchia_pred_type = nullptr, int num_neighbors_pred = -1) { //Should previously set data be used? std::vector<std::vector<string_t>> re_group_levels_pred;//Matrix with group levels for the grouped random effects (re_group_levels_pred[j] contains the levels for RE number j) if (use_saved_data) { re_group_levels_pred = re_group_levels_pred_; if (cluster_ids_data_pred_.empty()) { cluster_ids_data_pred = nullptr; } else { cluster_ids_data_pred = cluster_ids_data_pred_.data(); } if (re_group_rand_coef_data_pred_.empty()) { re_group_rand_coef_data_pred = nullptr; } else { re_group_rand_coef_data_pred = re_group_rand_coef_data_pred_.data(); } if (gp_coords_data_pred_.empty()) { gp_coords_data_pred = nullptr; } else { gp_coords_data_pred = gp_coords_data_pred_.data(); } if (gp_rand_coef_data_pred_.empty()) { gp_rand_coef_data_pred = nullptr; } else { gp_rand_coef_data_pred = gp_rand_coef_data_pred_.data(); } if (covariate_data_pred_.empty()) { covariate_data_pred = nullptr; } else { covariate_data_pred = covariate_data_pred_.data(); } } else { if (num_re_group_ > 0) { if (re_group_data_pred == nullptr) { Log::Fatal("No group data is provided for making predictions"); } else { //For grouped random effecst: create matrix 're_group_levels_pred' (vector of vectors, dimension: num_re_group_ x num_data_) with strings of group levels from characters in 'const char* re_group_data_pred' re_group_levels_pred = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_pred)); ConvertCharToStringGroupLevels(num_data_pred, num_re_group_, re_group_data_pred, re_group_levels_pred); } } } //Some checks CHECK(num_data_pred > 0); if (has_covariates_) { CHECK(covariate_data_pred != nullptr); CHECK(coef_pred != nullptr); } if (y_obs == nullptr) { if (y_.empty()) { Log::Fatal("Observed data is not provided and has not been set before"); } } //Check whether some data is missing if (re_group_rand_coef_data_pred == nullptr && num_re_group_rand_coef_ > 0) { Log::Fatal("No covariate data for grouped random coefficients is provided for making predictions"); } if (gp_coords_data_pred == nullptr && num_gp_ > 0) { Log::Warning("No coordinate data for the Gaussian process is provided for making predictions"); } if (gp_rand_coef_data_pred == nullptr && num_gp_rand_coef_ > 0) { Log::Warning("No covariate data for Gaussian process random coefficients is provided for making predictions"); } if (num_data_pred > 10000 && predict_cov_mat) { double num_mem_d = ((double)num_data_pred) * ((double)num_data_pred); int mem_size = (int)(num_mem_d * 8. / 1000000.); Log::Warning("The covariance matrix can be very large for large sample sizes which might lead to memory limitations. In your case (n = %d), the covariance needs at least approximately %d mb of memory. If you only need variances or covariances for linear combinations, contact the developer of this package and ask to implement this feature.", num_data_pred, mem_size); } if (vecchia_approx_) { if (vecchia_pred_type != nullptr) { string_t vecchia_pred_type_S = std::string(vecchia_pred_type); CHECK(vecchia_pred_type_S == "order_obs_first_cond_obs_only" || vecchia_pred_type_S == "order_obs_first_cond_all" || vecchia_pred_type_S == "order_pred_first" || vecchia_pred_type_S == "latent_order_obs_first_cond_obs_only" || vecchia_pred_type_S == "latent_order_obs_first_cond_all"); vecchia_pred_type_ = vecchia_pred_type_S; } if (num_neighbors_pred > 0) { num_neighbors_pred_ = num_neighbors_pred; } } vec_t coef; if (has_covariates_) { coef = Eigen::Map<const vec_t>(coef_pred, num_coef_); den_mat_t X_pred = Eigen::Map<const den_mat_t>(covariate_data_pred, num_data_pred, num_coef_); vec_t mu = X_pred * coef; #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_pred; ++i) { out_predict[i] = mu[i]; } } vec_t cov_pars = Eigen::Map<const vec_t>(cov_pars_pred, num_cov_par_); //Set up cluster IDs std::map<gp_id_t, int> num_data_per_cluster_pred; std::map<gp_id_t, std::vector<int>> data_indices_per_cluster_pred; std::vector<gp_id_t> unique_clusters_pred; data_size_t num_clusters_pred; SetUpGPIds(num_data_pred, cluster_ids_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, unique_clusters_pred, num_clusters_pred); //Check whether predictions are made for existing clusters or if only for new independet clusters predictions are made bool pred_for_observed_data = false; for (const auto& cluster_i : unique_clusters_pred) { if (std::find(unique_clusters_.begin(), unique_clusters_.end(), cluster_i) != unique_clusters_.end()) { pred_for_observed_data = true; break; } } //Factorize covariance matrix and calculate Psi^{-1}y_obs (if required for prediction) if (pred_for_observed_data) {//TODO: this acutally needs to be done only for the GP realizations for which predictions are made (currently it is done for all of them in unique_clusters_pred) if (has_covariates_) { vec_t resid; if (y_obs != nullptr) { vec_t y = Eigen::Map<const vec_t>(y_obs, num_data_); resid = y - (X_ * coef); } else { resid = y_vec_ - (X_ * coef); } SetY(resid.data()); } else { if (y_obs != nullptr) { SetY(y_obs); } } SetCovParsComps(cov_pars); if (!vecchia_approx_) { CalcCovFactor(false, true, 1., false);//no need to do this for the Vecchia approximation, is done in the prediction steps CalcYAux(); } }//end if(pred_for_observed_data) //Initialize covariance matrix if (predict_cov_mat) {//TODO: avoid unnecessary initialization (only set to 0 for covariances accross different realizations of GPs) #pragma omp parallel for schedule(static) for (int i = 0; i < (num_data_pred * num_data_pred); ++i) { out_predict[i + num_data_pred] = 0.; } } for (const auto& cluster_i : unique_clusters_pred) { //no data observed for this Gaussian process with ID 'cluster_i'. Thus use prior mean (0) and prior covariance matrix if (std::find(unique_clusters_.begin(), unique_clusters_.end(), cluster_i) == unique_clusters_.end()) { if (!has_covariates_) { #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { out_predict[data_indices_per_cluster_pred[cluster_i][i]] = 0.; } } if (predict_cov_mat) { T1 psi; std::vector<std::shared_ptr<RECompBase<T1>>> re_comps_cluster_i; if (vecchia_approx_) { std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]); std::vector<Triplet_t> entries_init_B_cluster_i; std::vector<Triplet_t> entries_init_B_grad_cluster_i; std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]); CreateREComponentsVecchia(num_data_pred, data_indices_per_cluster_pred, cluster_i, num_data_per_cluster_pred, gp_coords_data_pred, dim_gp_coords_, gp_rand_coef_data_pred, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, re_comps_cluster_i, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, entries_init_B_cluster_i, entries_init_B_grad_cluster_i, z_outer_z_obs_neighbors_cluster_i, "none", num_neighbors_pred_);//TODO: maybe also use ordering for making predictions? (need to check that there are not errors) for (int j = 0; j < num_comps_total_; ++j) { const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]); re_comps_cluster_i[j]->SetCovPars(pars); } sp_mat_t B_cluster_i; sp_mat_t D_inv_cluster_i; std::vector<sp_mat_t> B_grad_cluster_i;//not used, but needs to be passed to function std::vector<sp_mat_t> D_grad_cluster_i;//not used, but needs to be passed to function CalcCovFactorVecchia(num_data_per_cluster_pred[cluster_i], false, re_comps_cluster_i, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, entries_init_B_cluster_i, entries_init_B_grad_cluster_i, z_outer_z_obs_neighbors_cluster_i, B_cluster_i, D_inv_cluster_i, B_grad_cluster_i, D_grad_cluster_i); //Calculate Psi sp_mat_t D_sqrt(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]); D_sqrt.setIdentity(); D_sqrt.diagonal().array() = D_inv_cluster_i.diagonal().array().pow(-0.5); sp_mat_t B_inv_D_sqrt; eigen_sp_Lower_sp_RHS_cs_solve(B_cluster_i, D_sqrt, B_inv_D_sqrt, true); psi = B_inv_D_sqrt * B_inv_D_sqrt.transpose(); }//end Vecchia else { psi.resize(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]); psi.setIdentity(); CreateREComponents(num_data_pred, num_re_group_, data_indices_per_cluster_pred, cluster_i, re_group_levels_pred, num_data_per_cluster_pred, num_re_group_rand_coef_, re_group_rand_coef_data_pred, ind_effect_group_rand_coef_, num_gp_, gp_coords_data_pred, dim_gp_coords_, gp_rand_coef_data_pred, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, ind_intercept_gp_, re_comps_cluster_i); for (int j = 0; j < num_comps_total_; ++j) { const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]); re_comps_cluster_i[j]->SetCovPars(pars); re_comps_cluster_i[j]->CalcSigma(); psi += (*(re_comps_cluster_i[j]->GetZSigmaZt().get())); } }//end not Vecchia psi *= cov_pars[0]; //write on output #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index for (int j = 0; j < num_data_per_cluster_pred[cluster_i]; ++j) {//row index out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_i][j] + num_data_pred] = psi.coeff(j, i); } } }//end predict_cov_mat }//end cluster_i with no observed data else {//there exists observed data for this cluster_i (= typical case) den_mat_t gp_coords_mat_pred; if (num_gp_ > 0) { std::vector<double> gp_coords_pred; for (int j = 0; j < dim_gp_coords_; ++j) { for (const auto& id : data_indices_per_cluster_pred[cluster_i]) { gp_coords_pred.push_back(gp_coords_data_pred[j * num_data_pred + id]); } } gp_coords_mat_pred = Eigen::Map<den_mat_t>(gp_coords_pred.data(), num_data_per_cluster_pred[cluster_i], dim_gp_coords_); } vec_t mean_pred_id(num_data_per_cluster_pred[cluster_i]); T1 cov_mat_pred_id; if (predict_cov_mat) { cov_mat_pred_id = T1(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]); } if (vecchia_approx_) { std::shared_ptr<RECompGP<T1>> re_comp = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_[cluster_i][ind_intercept_gp_]); int num_data_tot = num_data_per_cluster_[cluster_i] + num_data_per_cluster_pred[cluster_i]; double num_mem_d = ((double)num_neighbors_pred_) * ((double)num_neighbors_pred_) * (double)(num_data_tot)+(double)(num_neighbors_pred_) * (double)(num_data_tot); int mem_size = (int)(num_mem_d * 8. / 1000000.); if (mem_size > 4000) { Log::Warning("The current implementation of the Vecchia approximation needs a lot of memory if the number of neighbors is large. In your case (nb. of neighbors = %d, nb. of observations = %d, nb. of predictions = %d), this needs at least approximately %d mb of memory. If this is a problem for you, contact the developer of this package and ask to change this.", num_neighbors_pred_, num_data_per_cluster_[cluster_i], num_data_per_cluster_pred[cluster_i], mem_size); } if (vecchia_pred_type_ == "order_obs_first_cond_obs_only") { CalcPredVecchiaObservedFirstOrder(true, cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id); } else if (vecchia_pred_type_ == "order_obs_first_cond_all") { CalcPredVecchiaObservedFirstOrder(false, cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id); } else if (vecchia_pred_type_ == "order_pred_first") { CalcPredVecchiaPredictedFirstOrder(cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id); } else if (vecchia_pred_type_ == "latent_order_obs_first_cond_obs_only") { CalcPredVecchiaLatentObservedFirstOrder(true, cluster_i, num_data_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id); } else if (vecchia_pred_type_ == "latent_order_obs_first_cond_all") { CalcPredVecchiaLatentObservedFirstOrder(false, cluster_i, num_data_per_cluster_pred, re_comp->coords_, gp_coords_mat_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id); } }//end Vecchia approximation else { CalcPred(cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred, re_group_levels_pred, re_group_rand_coef_data_pred, gp_coords_mat_pred, gp_rand_coef_data_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id); }//end not Vecchia approximation //write on output #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) { if (has_covariates_) { out_predict[data_indices_per_cluster_pred[cluster_i][i]] += mean_pred_id[i]; } else { out_predict[data_indices_per_cluster_pred[cluster_i][i]] = mean_pred_id[i]; } } if (predict_cov_mat) { cov_mat_pred_id *= cov_pars[0]; #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index for (int j = 0; j < num_data_per_cluster_pred[cluster_i]; ++j) {//row index out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_i][j] + num_data_pred] = cov_mat_pred_id.coeff(j, i);//cov_mat_pred_id_den(j, i); } } } }//end cluster_i with data }//end loop over cluster } /*! * \brief Find "reasonable" default values for the intial values of the covariance parameters (on transformed scale) * Note: You should pre-allocate memory for optim_cov_pars (length = number of covariance parameters) * \param y_data Response variable data * \param[out] init_cov_pars Initial values for covariance parameters of RE components */ void FindInitCovPar(const double* y_data, double* init_cov_pars) { double mean = 0; for (int i = 0; i < num_data_; ++i) { mean += y_data[i]; } mean /= num_data_; double var = 0; for (int i = 0; i < num_data_; ++i) { var += (y_data[i] - mean) * (y_data[i] - mean); } var /= (num_data_ - 1); init_cov_pars[0] = var; int ind_par = 1; for (int j = 0; j < num_comps_total_; ++j) { int num_par_j = ind_par_[j + 1] - ind_par_[j]; vec_t pars = vec_t(num_par_j); re_comps_[unique_clusters_[0]][j]->FindInitCovPar(pars); for (int jj = 0; jj < num_par_j; ++jj) { init_cov_pars[ind_par] = pars[jj]; ind_par++; } } } int num_cov_par() { return(num_cov_par_); } //void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) { // if (num_clusters_ == 1 && vecchia_ordering_ == "none") { // if (vecchia_approx_) { // den_mat_t BX = B_[unique_clusters_[0]] * X; // XT_psi_inv_X = BX.transpose() * D_inv_[unique_clusters_[0]] * BX; // } // else { // XT_psi_inv_X = X.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(X); // } // } // else { // XT_psi_inv_X = den_mat_t(X.cols(), X.cols()); // XT_psi_inv_X.setZero(); // den_mat_t BX; // for (const auto& cluster_i : unique_clusters_) { // if (vecchia_approx_) { // BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all); // XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX; // } // else { // XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * chol_facts_solve_[cluster_i].solve((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)); // } // } // } //} /*! * \brief Calculate the leaf values when performing a Newton update step after the tree structure has been found in tree-boosting * Note: only used in GPBoost for tree-boosting (this is called from regression_objective). It is assume that 'CalcYAux' has been called before. * \param data_leaf_index Leaf index for every data point (array of size num_data) * \param num_leaves Number of leaves * \param[out] leaf_values Leaf values when performing a Newton update step (array of size num_leaves) * \param marg_variance The marginal variance. Default = 1. Can be used to multiply values by it since Newton updates do not depend on it but 'CalcYAux' might have been called using marg_variance!=1. */ void NewtonUpdateLeafValues(const int* data_leaf_index, const int num_leaves, double* leaf_values, double marg_variance = 1.) { CHECK(y_aux_has_been_calculated_); den_mat_t HTPsiInvH(num_leaves, num_leaves); vec_t HTYAux(num_leaves); HTPsiInvH.setZero(); HTYAux.setZero(); for (const auto& cluster_i : unique_clusters_) { //Entries for matrix H_cluster_i = incidence matrix H that relates tree leaves to observations for cluster_i std::vector<Triplet_t> entries_H_cluster_i(num_data_per_cluster_[cluster_i]); #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_per_cluster_[cluster_i]; ++i) { entries_H_cluster_i[i] = Triplet_t(i, data_leaf_index[data_indices_per_cluster_[cluster_i][i]], 1.); } if (vecchia_approx_) { sp_mat_t H_cluster_i(num_data_per_cluster_[cluster_i], num_leaves);//row major format is needed for Vecchia approx. H_cluster_i.setFromTriplets(entries_H_cluster_i.begin(), entries_H_cluster_i.end()); HTYAux -= H_cluster_i.transpose() * y_aux_[cluster_i];//minus sign since y_aux_ has been calculated on the gradient = F-y (and not y-F) sp_mat_t BH = B_[cluster_i] * H_cluster_i; den_mat_t HTPsiInvH_cluster_i = den_mat_t(BH.transpose() * D_inv_[cluster_i] * BH); HTPsiInvH += HTPsiInvH_cluster_i; } else { sp_mat_t H_cluster_i(num_data_per_cluster_[cluster_i], num_leaves); H_cluster_i.setFromTriplets(entries_H_cluster_i.begin(), entries_H_cluster_i.end()); HTYAux -= H_cluster_i.transpose() * y_aux_[cluster_i];//minus sign since y_aux_ has been calculated on the gradient = F-y (and not y-F) T1 PsiInvSqrtH; CalcPsiInvSqrtH(PsiInvSqrtH, H_cluster_i, cluster_i); den_mat_t HTPsiInvH_cluster_i = PsiInvSqrtH.transpose() * PsiInvSqrtH; HTPsiInvH += HTPsiInvH_cluster_i; /* Log::Info("H_cluster_i[:,0] = %f, %f, %f, %f, %f, %f", H_cluster_i.coeffRef(0, 0), H_cluster_i.coeffRef(1, 0), H_cluster_i.coeffRef(2, 0), H_cluster_i.coeffRef(3, 0), H_cluster_i.coeffRef(4, 0), H_cluster_i.coeffRef(5, 0)); Log::Info("H_cluster_i[:,1] = %f, %f, %f, %f, %f, %f", H_cluster_i.coeffRef(0, 1), H_cluster_i.coeffRef(1, 1), H_cluster_i.coeffRef(2, 1), H_cluster_i.coeffRef(3, 1), H_cluster_i.coeffRef(4, 1), H_cluster_i.coeffRef(5, 1));*/ } } //Log::Info("marg_variance: %f", marg_variance); HTYAux *= marg_variance; vec_t new_leaf_values = HTPsiInvH.llt().solve(HTYAux); for (int i = 0; i < num_leaves; ++i) { leaf_values[i] = new_leaf_values[i]; } //Log::Info("HTYAux[:] = %f, %f", HTYAux(0), HTYAux(1)); //Log::Info("HTPsiInvH[0,:] = %f, %f", HTPsiInvH(0, 0), HTPsiInvH(0, 1)); //Log::Info("HTPsiInvH[1,:] = %f, %f", HTPsiInvH(1, 0), HTPsiInvH(1, 1)); } private: /*! \brief Number of data points */ data_size_t num_data_; /*! \brief Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points */ std::map<gp_id_t, std::vector<int>> data_indices_per_cluster_; /*! \brief Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization */ std::map<gp_id_t, int> num_data_per_cluster_; /*! \brief Number of independent realizations of the REs/GPs */ data_size_t num_clusters_; /*! \brief Unique labels of independent realizations */ std::vector<gp_id_t> unique_clusters_; /*! \brief Number of grouped (intercept) random effects */ data_size_t num_re_group_ = 0; /*! \brief Number of grouped random coefficients */ data_size_t num_re_group_rand_coef_ = 0; /*! \brief Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting starts at 1 (and ends at the number of base intercept random effects). Length of vector = num_re_group_rand_coef_. */ std::vector<int> ind_effect_group_rand_coef_; /*! \brief Total number of grouped random effects (random intercepts plus random coefficients (slopes)) */ data_size_t num_re_group_total_ = 0; /*! \brief 1 if there is a Gaussian process 0 otherwise */ data_size_t num_gp_ = 0; /*! \brief Type of GP. 0 = classical (spatial) GP, 1 = spatio-temporal GP */ //TODO: remove? int8_t GP_type_ = 0; /*! \brief Number of random coefficient GPs */ data_size_t num_gp_rand_coef_ = 0; /*! \brief Total number of GPs (random intercepts plus random coefficients) */ data_size_t num_gp_total_ = 0; /*! \brief Index in the vector of random effect components (in the values of 're_comps_') of the intercept GP associated with the random coefficient GPs */ int ind_intercept_gp_; /*! \brief Dimension of the coordinates (=number of features) for Gaussian process */ int dim_gp_coords_ = 2;//required to save since it is needed in the Predict() function when predictions are made for new independent realizations of GPs /*! \brief Type of covariance(kernel) function for Gaussian processes */ string_t cov_fct_ = "exponential";//required to also save here since it is needed in the Predict() function when predictions are made for new independent realizations of GPs /*! \brief Shape parameter of covariance function (=smoothness parameter for Matern covariance) */ double cov_fct_shape_ = 0.; /*! \brief Keys: labels of independent realizations of REs/GPs, values: vectors with individual RE/GP components */ std::map<gp_id_t, std::vector<std::shared_ptr<RECompBase<T1>>>> re_comps_; /*! \brief Indices of parameters of RE components in global parameter vector cov_pars. ind_par_[i] + 1 and ind_par_[i+1] are the indices of the first and last parameter of component number i */ std::vector<data_size_t> ind_par_; /*! \brief Number of covariance parameters */ data_size_t num_cov_par_; /*! \brief Total number of random effect components (grouped REs plus other GPs) */ data_size_t num_comps_total_ = 0; /*! \brief Key: labels of independent realizations of REs/GPs, values: Symbolic Cholesky decomposition of Psi matrices */ std::map<gp_id_t, T2> chol_facts_solve_; /*! \brief Key: labels of independent realizations of REs/GPs, values: Cholesky factors of Psi matrices */ //TODO: above needed or can pattern be saved somewhere else? std::map<gp_id_t, T1> chol_facts_; /*! \brief Key: labels of independent realizations of REs/GPs, values: **** */ //TODO: remove? std::map<gp_id_t, T1> Id_; /*! \brief Key: labels of independent realizations of REs/GPs, values: Idendity matrices used for calculation of inverse covariance matrix **** */ std::map<gp_id_t, cs> Id_cs_; /*! \brief Key: labels of independent realizations of REs/GPs, value: data y */ std::map<gp_id_t, vec_t> y_; /*! \brief Key: labels of independent realizations of REs/GPs, value: Psi^-1*y_ (used for various computations) */ std::map<gp_id_t, vec_t> y_aux_; /*! \brief Indicates whether y_aux_ has been calculated */ bool y_aux_has_been_calculated_ = false; /*! \brief Copy of response data (used only in case there are also linear covariates since then y_ is modified during the algorithm) */ vec_t y_vec_; /*! \brief Key: labels of independent realizations of REs/GPs, value: Psi^-1*y_ (used for various computations) */ bool do_symbolic_decomposition_ = true; /*! \brief If true, the model linearly incluses covariates */ bool has_covariates_ = false; /*! \brief Number of covariates */ int num_coef_; /*! \brief Covariate data */ den_mat_t X_; /*! \brief List of supported optimizers for covariance parameters */ const std::set<string_t> SUPPORTED_OPTIM_COV_PAR_{ "gradient_descent", "fisher_scoring" }; /*! \brief List of supported optimizers for regression coefficients */ const std::set<string_t> SUPPORTED_OPTIM_COEF_{ "gradient_descent", "wls" }; /*! \brief If true, the Veccia approximation is used for the Gaussian process */ bool vecchia_approx_ = false; /*! \brief The number of neighbors used in the Vecchia approximation */ int num_neighbors_; /*! \brief Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering */ string_t vecchia_ordering_ = "none"; /*! \brief The number of neighbors used in the Vecchia approximation for making predictions */ int num_neighbors_pred_; /*! \brief Ordering used in the Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions */ string_t vecchia_pred_type_ = "order_obs_first_cond_obs_only";//This is saved here and not simply set in the prediction function since it needs to be used repeatedly in the GPBoost algorithm when making predictions in "regression_metric.hpp" and the way predictions are done for the Vecchia approximation should be decoupled from the boosting algorithm /*! \brief List of supported covariance functions */ const std::set<string_t> SUPPORTED_VECCHIA_PRED_TYPES_{ "order_obs_first_cond_obs_only", "order_obs_first_cond_all", "order_pred_first", "latent_order_obs_first_cond_obs_only", "latent_order_obs_first_cond_all" }; /*! \brief Collects indices of nearest neighbors (used for Vecchia approximation) */ std::map<gp_id_t, std::vector<std::vector<int>>> nearest_neighbors_; /*! \brief Distances between locations and their nearest neighbors (this is used only if the Vecchia approximation is used, otherwise the distances are saved directly in the base GP component) */ std::map<gp_id_t, std::vector<den_mat_t>> dist_obs_neighbors_; /*! \brief Distances between nearest neighbors for all locations (this is used only if the Vecchia approximation is used, otherwise the distances are saved directly in the base GP component) */ std::map<gp_id_t, std::vector<den_mat_t>> dist_between_neighbors_;//TODO: this contains duplicate information (i.e. distances might be saved reduntly several times). But there is a trade-off between storage and computational speed. I currently don't see a way for saving unique distances without copying them when using the^m. /*! \brief Outer product of covariate vector at observations and neighbors with itself. First index = cluster, second index = data point i, third index = GP number j (this is used only if the Vecchia approximation is used, this is handled saved directly in the GP component using Z_) */ std::map<gp_id_t, std::vector<std::vector<den_mat_t>>> z_outer_z_obs_neighbors_; /*! \brief Collects matrices B = I - A (=Cholesky factor of inverse covariance) for Vecchia approximation */ std::map<gp_id_t, sp_mat_t> B_; /*! \brief Collects diagonal matrices D^-1 for Vecchia approximation */ std::map<gp_id_t, sp_mat_t> D_inv_; /*! \brief Collects derivatives of matrices B ( = derivative of matrix -A) for Vecchia approximation */ std::map<gp_id_t, std::vector<sp_mat_t>> B_grad_; /*! \brief Collects derivatives of matrices D for Vecchia approximation */ std::map<gp_id_t, std::vector<sp_mat_t>> D_grad_; /*! \brief Triplets for intializing the matrices B */ std::map<gp_id_t, std::vector<Triplet_t>> entries_init_B_; /*! \brief Triplets for intializing the matrices B_grad */ std::map<gp_id_t, std::vector<Triplet_t>> entries_init_B_grad_; /*! \brief Variance of idiosyncratic error term (nugget effect) */ double sigma2_; /*! \brief Cluster IDs for prediction */ std::vector<gp_id_t> cluster_ids_data_pred_; /*! \brief Levels of grouped RE for prediction */ std::vector<std::vector<string_t>> re_group_levels_pred_; /*! \brief Covariate data for grouped random RE for prediction */ std::vector<double> re_group_rand_coef_data_pred_; /*! \brief Coordinates for GP for prediction */ std::vector<double> gp_coords_data_pred_; /*! \brief Covariate data for random GP for prediction */ std::vector<double> gp_rand_coef_data_pred_; /*! \brief Covariate data for linear regression term */ std::vector<double> covariate_data_pred_; /*! \brief Nesterov schedule */ double NesterovSchedule(int iter, int momentum_schedule_version = 0, double nesterov_acc_rate = 0.5, int momentum_offset = 2) { if (iter < momentum_offset) { return(0.); } else { if (momentum_schedule_version == 0) { return(nesterov_acc_rate); } else if (momentum_schedule_version == 1) { return(1. - (3. / (6. + iter))); } else { return(0.); } } } /*! \brief mutex for threading safe call */ std::mutex mutex_; /*! \brief Constructs identity matrices if sparse matrices are used (used for calculating inverse covariance matrix) */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void ConstructI(gp_id_t cluster_i) { T3 I(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);//identity matrix for calculating precision matrix I.setIdentity(); Id_.insert({ cluster_i, I }); cs Id_cs = cs();//same for cs type //TODO: construct this independently of Id_ , but then care need to be taken for deleting the pointer objects. Id_cs.nzmax = num_data_per_cluster_[cluster_i]; Id_cs.m = num_data_per_cluster_[cluster_i]; Id_cs.n = num_data_per_cluster_[cluster_i]; Id_[cluster_i].makeCompressed(); Id_cs.p = reinterpret_cast<csi*>(Id_[cluster_i].outerIndexPtr()); Id_cs.i = reinterpret_cast<csi*>(Id_[cluster_i].innerIndexPtr()); Id_cs.x = Id_[cluster_i].valuePtr(); Id_cs.nz = -1; Id_cs_.insert({ cluster_i, Id_cs }); } /*! \brief Constructs identity matrices if dense matrices are used (used for calculating inverse covariance matrix) */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void ConstructI(gp_id_t cluster_i) { T3 I(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);//identity matrix for calculating precision matrix I.setIdentity(); Id_.insert({ cluster_i, I }); } /*! * \brief Set response variable data (y_) for RE model * \param y_data Response variable data */ void SetY(const double* y_data) { if (num_clusters_ == 1 && vecchia_ordering_ == "none") { y_[unique_clusters_[0]] = Eigen::Map<const vec_t>(y_data, num_data_); //y_[unique_clusters_[0]] = vec_t(num_data_); //y_[unique_clusters_[0]].setZero(); } else { for (const auto& cluster_i : unique_clusters_) { y_[cluster_i] = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying? for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { y_[cluster_i][j] = y_data[data_indices_per_cluster_[cluster_i][j]]; } } } } /*! * \brief Get y_aux = Psi^-1*y * \param[out] y_aux Psi^-1*y (=y_aux_). Array needs to be pre-allocated of length num_data_ */ void GetYAux(double* y_aux) { CHECK(y_aux_has_been_calculated_); if (num_clusters_ == 1 && vecchia_ordering_ == "none") { for (int j = 0; j < num_data_; ++j) { y_aux[j] = y_aux_[unique_clusters_[0]][j]; } } else { for (const auto& cluster_i : unique_clusters_) { for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { y_aux[data_indices_per_cluster_[cluster_i][j]] = y_aux_[cluster_i][j]; } } } } /*! * \brief Get y_aux = Psi^-1*y * \param[out] y_aux Psi^-1*y (=y_aux_). This vector needs to be pre-allocated of length num_data_ */ void GetYAux(vec_t& y_aux) { CHECK(y_aux_has_been_calculated_); if (num_clusters_ == 1 && vecchia_ordering_ == "none") { y_aux = y_aux_[unique_clusters_[0]]; } else { for (const auto& cluster_i : unique_clusters_) { y_aux(data_indices_per_cluster_[cluster_i]) = y_aux_[cluster_i]; } } } /*! \brief Do Cholesky decomposition if sparse matrices are used */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void CalcChol(T3& psi, gp_id_t cluster_i, bool analyze_pattern) { if (analyze_pattern) { chol_facts_solve_[cluster_i].analyzePattern(psi); } chol_facts_solve_[cluster_i].factorize(psi); chol_facts_[cluster_i] = chol_facts_solve_[cluster_i].matrixL(); chol_facts_[cluster_i].makeCompressed(); } /*! \brief Do Cholesky decomposition if dense matrices are used */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void CalcChol(T3& psi, gp_id_t cluster_i, bool analyze_pattern) { if (analyze_pattern) { Log::Warning("Pattern of Cholesky factor is not analyzed when dense matrices are used."); } chol_facts_solve_[cluster_i].compute(psi); chol_facts_[cluster_i] = chol_facts_solve_[cluster_i].matrixL(); } /*! \brief Caclulate Psi^(-1) if sparse matrices are used */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void CalcPsiInv(T3& psi_inv, gp_id_t cluster_i) { //Using CSparse function 'cs_spsolve' cs L_cs = cs();//Prepare LHS L_cs.nzmax = (int)chol_facts_[cluster_i].nonZeros(); L_cs.m = num_data_per_cluster_[cluster_i]; L_cs.n = num_data_per_cluster_[cluster_i]; L_cs.p = reinterpret_cast<csi*>(chol_facts_[cluster_i].outerIndexPtr()); L_cs.i = reinterpret_cast<csi*>(chol_facts_[cluster_i].innerIndexPtr()); L_cs.x = chol_facts_[cluster_i].valuePtr(); L_cs.nz = -1; sp_mat_t L_inv; sp_Lower_sp_RHS_cs_solve(&L_cs, &Id_cs_[cluster_i], L_inv, true); psi_inv = L_inv.transpose() * L_inv; ////Version 2: doing sparse solving "by hand" but ignoring sparse RHS //const double* val = chol_facts_[cluster_i].valuePtr(); //const int* row_idx = chol_facts_[cluster_i].innerIndexPtr(); //const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr(); //den_mat_t L_inv_dens = den_mat_t(Id_[cluster_i]); //for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { // sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], L_inv_dens.data() + j * num_data_per_cluster_[cluster_i]); //} //const sp_mat_t L_inv = L_inv_dens.sparseView(); //psi_inv = L_inv.transpose() * L_inv; ////Version 1 //cpsi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]); } /*! \brief Caclulate Psi^(-1) if dense matrices are used */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void CalcPsiInv(T3& psi_inv, gp_id_t cluster_i) { ////Version 1 //psi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]); //Version 2: solving by hand T3 L_inv = Id_[cluster_i]; #pragma omp parallel for schedule(static)//TODO: maybe sometimes faster without parallelization? for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], L_inv.data() + j * num_data_per_cluster_[cluster_i]); } //chol_facts_[cluster_i].triangularView<Eigen::Lower>().solveInPlace(L_inv); //slower psi_inv = L_inv.transpose() * L_inv; // Using dpotri from LAPACK does not work since LAPACK is not installed //int info = 0; //int n = num_data_per_cluster_[cluster_i]; //int lda = num_data_per_cluster_[cluster_i]; //char* uplo = "L"; //den_mat_t M = chol_facts_[cluster_i]; //BLASFUNC(dpotri)(uplo, &n, M.data(), &lda, &info); } /*! \brief Caclulate Psi^(-0.5)H if dense matrices are used. Used in 'NewtonUpdateLeafValues' */ template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > void CalcPsiInvSqrtH(T3& PsiInvSqrtH, sp_mat_t& H, gp_id_t cluster_i) { PsiInvSqrtH = den_mat_t(H); #pragma omp parallel for schedule(static) for (int j = 0; j < H.cols(); ++j) { L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], PsiInvSqrtH.data() + j * num_data_per_cluster_[cluster_i]); } } /*! \brief Caclulate Psi^(-0.5)H if sparse matrices are used. Used in 'NewtonUpdateLeafValues' */ template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > void CalcPsiInvSqrtH(T3& PsiInvSqrtH, sp_mat_t& H, gp_id_t cluster_i) { //Using CSparse function 'cs_spsolve' eigen_sp_Lower_sp_RHS_cs_solve(chol_facts_[cluster_i], H, PsiInvSqrtH, true); } ///*! //* \brief Caclulate X^TPsi^(-1)X //* \param X Covariate data matrix X //* \param[out] XT_psi_inv_X X^TPsi^(-1)X //*/ // template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr > // void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) { // den_mat_t BX; // if (num_clusters_ == 1) { // gp_id_t cluster0 = unique_clusters_[0]; // if (vecchia_approx_) { // BX = B_[cluster0] * X; // XT_psi_inv_X = BX.transpose() * D_inv_[cluster0] * BX; // } // else { // BX = X; // #pragma omp parallel for schedule(static) // for (int j = 0; j < num_data_per_cluster_[cluster0]; ++j) { // L_solve(chol_facts_[cluster0].data(), num_data_per_cluster_[cluster0], BX.data() + j * num_data_per_cluster_[cluster0]); // } // XT_psi_inv_X = BX.transpose() * BX; // } // } // else { // XT_psi_inv_X = den_mat_t(X.cols(), X.cols()); // XT_psi_inv_X.setZero(); // for (const auto& cluster_i : unique_clusters_) { // if (vecchia_approx_) { // BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all); // XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX; // } // else { // BX = X(data_indices_per_cluster_[cluster_i], Eigen::all); // #pragma omp parallel for schedule(static) // for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { // L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], BX.data() + j * num_data_per_cluster_[cluster_i]); // } // XT_psi_inv_X += (BX.transpose() * BX); // } // } // } // } // //same for sparse matrices // template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr > // void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) { // den_mat_t BX; // if (num_clusters_ == 1) { // gp_id_t cluster0 = unique_clusters_[0]; // if (vecchia_approx_) { // BX = B_[cluster0] * X; // XT_psi_inv_X = BX.transpose() * D_inv_[cluster0] * BX; // } // else { // BX = X; // #pragma omp parallel for schedule(static) // for (int j = 0; j < num_data_per_cluster_[cluster0]; ++j) { // sp_L_solve(chol_facts_[cluster0].valuePtr(), chol_facts_[cluster0].innerIndexPtr(), chol_facts_[cluster0].outerIndexPtr(), // num_data_per_cluster_[cluster0], BX.data() + j * num_data_per_cluster_[cluster0]); // } // XT_psi_inv_X = BX.transpose() * BX; // } // } // else { // XT_psi_inv_X = den_mat_t(X.cols(), X.cols()); // XT_psi_inv_X.setZero(); // for (const auto& cluster_i : unique_clusters_) { // if (vecchia_approx_) { // BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all); // XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX; // } // else { // BX = X(data_indices_per_cluster_[cluster_i], Eigen::all); // #pragma omp parallel for schedule(static) // for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { // sp_L_solve(chol_facts_[cluster_i].valuePtr(), chol_facts_[cluster_i].innerIndexPtr(), chol_facts_[cluster_i].outerIndexPtr(), // num_data_per_cluster_[cluster_i], BX.data() + j * num_data_per_cluster_[cluster_i]); // } // XT_psi_inv_X += (BX.transpose() * BX); // } // } // } // } /*! * \brief Caclulate X^TPsi^(-1)X * \param X Covariate data matrix X * \param[out] XT_psi_inv_X X^TPsi^(-1)X */ void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) { if (num_clusters_ == 1 && vecchia_ordering_ == "none") { if (vecchia_approx_) { den_mat_t BX = B_[unique_clusters_[0]] * X; XT_psi_inv_X = BX.transpose() * D_inv_[unique_clusters_[0]] * BX; } else { XT_psi_inv_X = X.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(X); } } else { XT_psi_inv_X = den_mat_t(X.cols(), X.cols()); XT_psi_inv_X.setZero(); den_mat_t BX; for (const auto& cluster_i : unique_clusters_) { if (vecchia_approx_) { BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all); XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX; } else { XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * chol_facts_solve_[cluster_i].solve((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)); } } } } /*! * \brief Initialize data structures for handling independent realizations of the Gaussian processes. Answers written on arguments. * \param num_data Number of data points * \param cluster_ids_data IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) * \param[out] num_data_per_cluster Keys: labels of independent clusters, values: number of data points per independent realization * \param[out] data_indices_per_cluster Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster * \param[out] unique_clusters Unique labels of independent realizations * \param[out] num_clusters Number of independent clusters */ void SetUpGPIds(data_size_t num_data, const gp_id_t* cluster_ids_data, std::map<gp_id_t, int>& num_data_per_cluster, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster, std::vector<gp_id_t>& unique_clusters, data_size_t& num_clusters) { if (cluster_ids_data != nullptr) { for (int i = 0; i < num_data; ++i) { if (num_data_per_cluster.find(cluster_ids_data[i]) == num_data_per_cluster.end()) {//first occurrence of cluster_ids_data[i] unique_clusters.push_back(cluster_ids_data[i]); num_data_per_cluster.insert({ cluster_ids_data[i], 1 }); std::vector<int> id; id.push_back(i); data_indices_per_cluster.insert({ cluster_ids_data[i], id }); } else { num_data_per_cluster[cluster_ids_data[i]] += 1; data_indices_per_cluster[cluster_ids_data[i]].push_back(i); } } num_clusters = (data_size_t)unique_clusters.size(); } else { unique_clusters.push_back(0); num_data_per_cluster.insert({ 0, num_data }); num_clusters = 1; std::vector<int> gp_id_vec(num_data); for (int i = 0; i < num_data; ++i) { gp_id_vec[i] = i; } data_indices_per_cluster.insert({ 0, gp_id_vec }); } } /*! * \brief Convert characters in 'const char* re_group_data' to matrix (num_re_group x num_data) with strings of group labels * \param num_data Number of data points * \param num_re_group Number of grouped random effects * \param re_group_data Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0' * \param[out] Matrix of dimension num_re_group x num_data with strings of group labels for levels of grouped random effects */ void ConvertCharToStringGroupLevels(data_size_t num_data, data_size_t num_re_group, const char* re_group_data, std::vector<std::vector<string_t>>& re_group_levels) { int char_start = 0; for (int ire = 0; ire < num_re_group; ++ire) {//TODO: catch / report potential error if format of re_group_data is not correct for (int id = 0; id < num_data; ++id) { int number_chars = 0; while (re_group_data[char_start + number_chars] != '\0') { number_chars++; } re_group_levels[ire][id] = std::string(re_group_data + char_start); char_start += number_chars + 1; } } } /*! * \brief Initialize individual component models and collect them in a containter * \param num_data Number of data points * \param num_re_group Number of grouped random effects * \param data_indices_per_cluster Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points * \param cluster_i Index / label of the realization of the Gaussian process for which the components should be constructed * \param Group levels for every grouped random effect * \param num_data_per_cluster Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization * \param num_re_group_rand_coef Number of grouped random coefficients * \param re_group_rand_coef_data Covariate data for grouped random coefficients * \param ind_effect_group_rand_coef Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting start at 1. * \param num_gp Number of Gaussian processes (intercept only, random coefficients not counting) * \param gp_coords_data Coordinates (features) for Gaussian process * \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process * \param gp_rand_coef_data Covariate data for Gaussian process random coefficients * \param num_gp_rand_coef Number of Gaussian process random coefficients * \param cov_fct Type of covariance (kernel) function for Gaussian processes * \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance) * \param ind_intercept_gp Index in the vector of random effect components (in the values of 're_comps_') of the intercept GP associated with the random coefficient GPs * \param[out] re_comps_cluster_i Container that collects the individual component models */ void CreateREComponents(data_size_t num_data, data_size_t num_re_group, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster, gp_id_t cluster_i, std::vector<std::vector<string_t>>& re_group_levels, std::map<gp_id_t, int>& num_data_per_cluster, data_size_t num_re_group_rand_coef, const double* re_group_rand_coef_data, std::vector<int>& ind_effect_group_rand_coef, data_size_t num_gp, const double* gp_coords_data, int dim_gp_coords, const double* gp_rand_coef_data, data_size_t num_gp_rand_coef, const string_t cov_fct, double cov_fct_shape, int ind_intercept_gp, std::vector<std::shared_ptr<RECompBase<T1>>>& re_comps_cluster_i) { //Grouped REs if (num_re_group > 0) { for (int j = 0; j < num_re_group; ++j) { std::vector<re_group_t> group_data; for (const auto& id : data_indices_per_cluster[cluster_i]) { group_data.push_back(re_group_levels[j][id]);//group_data_.push_back(std::string(re_group_data[j * num_data_ + id])); } re_comps_cluster_i.push_back(std::shared_ptr<RECompGroup<T1>>(new RECompGroup<T1>(group_data))); } //Random slopes if (num_re_group_rand_coef > 0) { for (int j = 0; j < num_re_group_rand_coef; ++j) { std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster[cluster_i]) { rand_coef_data.push_back(re_group_rand_coef_data[j * num_data + id]); } std::shared_ptr<RECompGroup<T1>> re_comp = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_cluster_i[ind_effect_group_rand_coef[j] - 1]);//Subtract -1 since ind_effect_group_rand_coef[j] starts counting at 1 not 0 re_comps_cluster_i.push_back(std::shared_ptr<RECompGroup<T1>>(new RECompGroup<T1>(re_comp->group_data_, re_comp->map_group_label_index_, re_comp->num_group_, rand_coef_data))); } } } //GPs if (num_gp > 0) { std::vector<double> gp_coords; for (int j = 0; j < dim_gp_coords; ++j) { for (const auto& id : data_indices_per_cluster[cluster_i]) { gp_coords.push_back(gp_coords_data[j * num_data + id]); } } den_mat_t gp_coords_mat = Eigen::Map<den_mat_t>(gp_coords.data(), num_data_per_cluster[cluster_i], dim_gp_coords); re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(gp_coords_mat, cov_fct, cov_fct_shape, true))); //Random slopes if (num_gp_rand_coef > 0) { for (int j = 0; j < num_gp_rand_coef; ++j) { std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster[cluster_i]) { rand_coef_data.push_back(gp_rand_coef_data[j * num_data + id]); } std::shared_ptr<RECompGP<T1>> re_comp = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_cluster_i[ind_intercept_gp]); re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(re_comp->dist_, re_comp->has_Z_, &re_comp->Z_, rand_coef_data, cov_fct, cov_fct_shape))); } } } } /*! * \brief Initialize individual component models and collect them in a containter when the Vecchia approximation is used * \param num_data Number of data points * \param data_indices_per_cluster Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points * \param cluster_i Index / label of the realization of the Gaussian process for which the components should be constructed * \param num_data_per_cluster Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization * \param gp_coords_data Coordinates (features) for Gaussian process * \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process * \param gp_rand_coef_data Covariate data for Gaussian process random coefficients * \param num_gp_rand_coef Number of Gaussian process random coefficients * \param cov_fct Type of covariance (kernel) function for Gaussian processes * \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance) * \param[out] re_comps_cluster_i Container that collects the individual component models * \param[out] nearest_neighbors_cluster_i Collects indices of nearest neighbors * \param[out] dist_obs_neighbors_cluster_i Distances between locations and their nearest neighbors * \param[out] dist_between_neighbors_cluster_i Distances between nearest neighbors for all locations * \param[out] entries_init_B_cluster_i Triplets for intializing the matrices B * \param[out] entries_init_B_grad_cluster_i Triplets for intializing the matrices B_grad * \param[out] z_outer_z_obs_neighbors_cluster_i Outer product of covariate vector at observations and neighbors with itself for random coefficients. First index = data point i, second index = GP number j * \param vecchia_ordering Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering * \param num_neighbors The number of neighbors used in the Vecchia approximation */ void CreateREComponentsVecchia(data_size_t num_data, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster, gp_id_t cluster_i, std::map<gp_id_t, int>& num_data_per_cluster, const double* gp_coords_data, int dim_gp_coords, const double* gp_rand_coef_data, data_size_t num_gp_rand_coef, const string_t cov_fct, double cov_fct_shape, std::vector<std::shared_ptr<RECompBase<T1>>>& re_comps_cluster_i, std::vector<std::vector<int>>& nearest_neighbors_cluster_i, std::vector<den_mat_t>& dist_obs_neighbors_cluster_i, std::vector<den_mat_t>& dist_between_neighbors_cluster_i, std::vector<Triplet_t >& entries_init_B_cluster_i, std::vector<Triplet_t >& entries_init_B_grad_cluster_i, std::vector<std::vector<den_mat_t>>& z_outer_z_obs_neighbors_cluster_i, string_t vecchia_ordering = "none", int num_neighbors = 30) { if (vecchia_ordering == "random") { unsigned seed = 0; std::shuffle(data_indices_per_cluster[cluster_i].begin(), data_indices_per_cluster[cluster_i].end(), std::default_random_engine(seed)); } std::vector<double> gp_coords; for (int j = 0; j < dim_gp_coords; ++j) { for (const auto& id : data_indices_per_cluster[cluster_i]) { gp_coords.push_back(gp_coords_data[j * num_data + id]); } } den_mat_t gp_coords_mat = Eigen::Map<den_mat_t>(gp_coords.data(), num_data_per_cluster[cluster_i], dim_gp_coords); re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(gp_coords_mat, cov_fct, cov_fct_shape, false))); find_nearest_neighbors_Veccia_fast(gp_coords_mat, num_data_per_cluster[cluster_i], num_neighbors, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1); for (int i = 0; i < num_data_per_cluster[cluster_i]; ++i) { for (int j = 0; j < (int)nearest_neighbors_cluster_i[i].size(); ++j) { entries_init_B_cluster_i.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][j], 0.)); entries_init_B_grad_cluster_i.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][j], 0.)); } entries_init_B_cluster_i.push_back(Triplet_t(i, i, 1.));//Put 1's on the diagonal since B = I - A } //Random coefficients if (num_gp_rand_coef > 0) { for (int j = 0; j < num_gp_rand_coef; ++j) { std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster[cluster_i]) { rand_coef_data.push_back(gp_rand_coef_data[j * num_data + id]); } re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(rand_coef_data, cov_fct, cov_fct_shape))); //save random coefficient data in the form ot outer product matrices #pragma omp for schedule(static) for (int i = 0; i < num_data_per_cluster[cluster_i]; ++i) { z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef); int dim_z = (i == 0) ? 1 : ((int)nearest_neighbors_cluster_i[i].size() + 1); vec_t coef_vec(dim_z); coef_vec(0) = rand_coef_data[i]; if (i > 0) { for (int ii = 1; ii < dim_z; ++ii) { coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]]; } } //Log::Info("coef_vec * coef_vec.transpose(): %f", (coef_vec * coef_vec.transpose())(0,0)); //Log::Info("re_comps_[cluster_i] %s ", typeid(re_comps_[cluster_i]).name()); z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose(); } } } } /*! * \brief Set the covariance parameters of the components * \param cov_pars Covariance parameters */ void SetCovParsComps(const vec_t& cov_pars) { CHECK(cov_pars.size() == num_cov_par_); sigma2_ = cov_pars[0]; for (const auto& cluster_i : unique_clusters_) { for (int j = 0; j < num_comps_total_; ++j) { //const std::vector<double> pars = std::vector<double>(cov_pars.begin() + ind_par_[j] + 1, cov_pars.begin() + ind_par_[j + 1] + 1); const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]); re_comps_[cluster_i][j]->SetCovPars(pars); } } } /*! * \brief Transform the covariance parameters to the scake on which the MLE is found * \param cov_pars_trans Covariance parameters * \param[out] pars_trans Transformed covariance parameters */ void TransformCovPars(const vec_t& cov_pars, vec_t& cov_pars_trans) { CHECK(cov_pars.size() == num_cov_par_); cov_pars_trans = vec_t(num_cov_par_); cov_pars_trans[0] = cov_pars[0]; for (int j = 0; j < num_comps_total_; ++j) { const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]); vec_t pars_trans = pars; re_comps_[unique_clusters_[0]][j]->TransformCovPars(cov_pars[0], pars, pars_trans); cov_pars_trans.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]) = pars_trans; } } /*! * \brief Back-transform the covariance parameters to the original scale * \param cov_pars Covariance parameters * \param[out] cov_pars_orig Back-transformed, original covariance parameters */ void TransformBackCovPars(const vec_t& cov_pars, vec_t& cov_pars_orig) { CHECK(cov_pars.size() == num_cov_par_); cov_pars_orig = vec_t(num_cov_par_); cov_pars_orig[0] = cov_pars[0]; for (int j = 0; j < num_comps_total_; ++j) { const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]); vec_t pars_orig = pars; re_comps_[unique_clusters_[0]][j]->TransformBackCovPars(cov_pars[0], pars, pars_orig); cov_pars_orig.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]) = pars_orig; } } /*! * \brief Calculate covariance matrices of the components */ void CalcSigmaComps() { for (const auto& cluster_i : unique_clusters_) { for (int j = 0; j < num_comps_total_; ++j) { re_comps_[cluster_i][j]->CalcSigma(); } } } /*! * \brief Calculate matrices A and D_inv as well as their derivatives for the Vecchia approximation for one cluster (independent realization of GP) * \param num_data_cluster_i Number of data points * \param calc_gradient If true, the gradient also be calculated (only for Vecchia approximation) * \param re_comps_cluster_i Container that collects the individual component models * \param nearest_neighbors_cluster_i Collects indices of nearest neighbors * \param dist_obs_neighbors_cluster_i Distances between locations and their nearest neighbors * \param dist_between_neighbors_cluster_i Distances between nearest neighbors for all locations * \param entries_init_B_cluster_i Triplets for intializing the matrices B * \param entries_init_B_grad_cluster_i Triplets for intializing the matrices B_grad * \param z_outer_z_obs_neighbors_cluster_i Outer product of covariate vector at observations and neighbors with itself for random coefficients. First index = data point i, second index = GP number j * \param[out] B_cluster_i Matrix A = I - B (= Cholesky factor of inverse covariance) for Vecchia approximation * \param[out] D_inv_cluster_i Diagonal matrices D^-1 for Vecchia approximation * \param[out] B_grad_cluster_i Derivatives of matrices A ( = derivative of matrix -B) for Vecchia approximation * \param[out] D_grad_cluster_i Derivatives of matrices D for Vecchia approximation * \param transf_scale If true, the derivatives are taken on the transformed scale otherwise on the original scale. Default = true * \param nugget_var Nugget effect variance parameter sigma^2 (used only if transf_scale = false to transform back) * \param calc_gradient_nugget If true, derivatives are also taken with respect to the nugget / noise variance */ void CalcCovFactorVecchia(int num_data_cluster_i, bool calc_gradient,//TODO: make arguments const std::vector<std::shared_ptr<RECompBase<T1>>>& re_comps_cluster_i, std::vector<std::vector<int>>& nearest_neighbors_cluster_i, std::vector<den_mat_t>& dist_obs_neighbors_cluster_i, std::vector<den_mat_t>& dist_between_neighbors_cluster_i, std::vector<Triplet_t >& entries_init_B_cluster_i, std::vector<Triplet_t >& entries_init_B_grad_cluster_i, std::vector<std::vector<den_mat_t>>& z_outer_z_obs_neighbors_cluster_i, sp_mat_t& B_cluster_i, sp_mat_t& D_inv_cluster_i, std::vector<sp_mat_t>& B_grad_cluster_i, std::vector<sp_mat_t>& D_grad_cluster_i, bool transf_scale = true, double nugget_var = 1., bool calc_gradient_nugget = false) { int num_par_comp = re_comps_cluster_i[ind_intercept_gp_]->num_cov_par_; int num_par_gp = num_par_comp * num_gp_total_ + calc_gradient_nugget; //Initialize matrices B = I - A and D^-1 as well as their derivatives (in order that the code below can be run in parallel) B_cluster_i = sp_mat_t(num_data_cluster_i, num_data_cluster_i);//B = I - A B_cluster_i.setFromTriplets(entries_init_B_cluster_i.begin(), entries_init_B_cluster_i.end());//Note: 1's are put on the diagonal D_inv_cluster_i = sp_mat_t(num_data_cluster_i, num_data_cluster_i);//D^-1. Note: we first calculate D, and then take the inverse below D_inv_cluster_i.setIdentity();//Put 1's on the diagonal for nugget effect (entries are not overriden but added below) if (!transf_scale) { D_inv_cluster_i.diagonal().array() *= nugget_var;//nugget effect is not 1 if not on transformed scale } if (calc_gradient) { B_grad_cluster_i = std::vector<sp_mat_t>(num_par_gp);//derivative of B = derviateive of (-A) D_grad_cluster_i = std::vector<sp_mat_t>(num_par_gp);//derivative of D for (int ipar = 0; ipar < num_par_gp; ++ipar) { B_grad_cluster_i[ipar] = sp_mat_t(num_data_cluster_i, num_data_cluster_i); B_grad_cluster_i[ipar].setFromTriplets(entries_init_B_grad_cluster_i.begin(), entries_init_B_grad_cluster_i.end()); D_grad_cluster_i[ipar] = sp_mat_t(num_data_cluster_i, num_data_cluster_i); D_grad_cluster_i[ipar].setIdentity();//Put 1 on the diagonal but entries are overriden below } }//end initialization #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_cluster_i; ++i) { int num_nn = (int)nearest_neighbors_cluster_i[i].size(); //calculate covariance matrices between observations and neighbors and among neighbors as well as their derivatives den_mat_t cov_mat_obs_neighbors(1, num_nn); den_mat_t cov_mat_between_neighbors(num_nn, num_nn); std::vector<den_mat_t> cov_grad_mats_obs_neighbors(num_par_gp);//covariance matrix plus derivative wrt to every parameter std::vector<den_mat_t> cov_grad_mats_between_neighbors(num_par_gp); if (i > 0) { for (int j = 0; j < num_gp_total_; ++j) { int ind_first_par = j * num_par_comp;//index of first parameter (variance) of component j in gradient vectors if (j == 0) { re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],//re_comp-> cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors[ind_first_par], cov_grad_mats_obs_neighbors[ind_first_par + 1], calc_gradient, transf_scale, nugget_var);//write on matrices directly for first GP component re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors, cov_grad_mats_between_neighbors[ind_first_par], cov_grad_mats_between_neighbors[ind_first_par + 1], calc_gradient, transf_scale, nugget_var); } else {//random coefficient GPs den_mat_t cov_mat_obs_neighbors_j; den_mat_t cov_mat_between_neighbors_j; re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors[ind_first_par], cov_grad_mats_obs_neighbors[ind_first_par + 1], calc_gradient, transf_scale, nugget_var); re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors[ind_first_par], cov_grad_mats_between_neighbors[ind_first_par + 1], calc_gradient, transf_scale, nugget_var); //multiply by coefficient matrix cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();//cov_mat_obs_neighbors_j.cwiseProduct() cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); cov_mat_obs_neighbors += cov_mat_obs_neighbors_j; cov_mat_between_neighbors += cov_mat_between_neighbors_j; if (calc_gradient) { cov_grad_mats_obs_neighbors[ind_first_par].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array(); cov_grad_mats_obs_neighbors[ind_first_par + 1].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array(); cov_grad_mats_between_neighbors[ind_first_par].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); cov_grad_mats_between_neighbors[ind_first_par + 1].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); } } }//end loop over components j }//end if(i>1) //Calculate matrices B and D as well as their derivatives //1. add first summand of matrix D (ZCZ^T_{ii}) and its derivatives for (int j = 0; j < num_gp_total_; ++j) { double d_comp_j = re_comps_cluster_i[ind_intercept_gp_ + j]->cov_pars_[0]; if (!transf_scale) { d_comp_j *= nugget_var; } if (j > 0) {//random coefficient d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0); } D_inv_cluster_i.coeffRef(i, i) += d_comp_j; if (calc_gradient) { if (transf_scale) { D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = d_comp_j;//derivative of the covariance function wrt the variance. derivative of the covariance function wrt to range is zero on the diagonal } else { D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = 1.;//1's on the diagonal on the orignal scale } } } if (calc_gradient && calc_gradient_nugget) { D_grad_cluster_i[num_par_gp - 1].coeffRef(i, i) = 1.; } //2. remaining terms if (i > 0) { if (transf_scale) { cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect } else { cov_mat_between_neighbors.diagonal().array() += nugget_var; } den_mat_t A_i(1, num_nn); den_mat_t cov_mat_between_neighbors_inv; den_mat_t A_i_grad_sigma2; if (calc_gradient) { den_mat_t I(num_nn, num_nn); I.setIdentity(); cov_mat_between_neighbors_inv = cov_mat_between_neighbors.llt().solve(I); A_i = cov_mat_obs_neighbors * cov_mat_between_neighbors_inv; if (calc_gradient_nugget) { A_i_grad_sigma2 = -A_i * cov_mat_between_neighbors_inv; } } else { A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose(); } for (int inn = 0; inn < num_nn; ++inn) { B_cluster_i.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i(0, inn); } D_inv_cluster_i.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); if (calc_gradient) { den_mat_t A_i_grad(1, num_nn); for (int j = 0; j < num_gp_total_; ++j) { int ind_first_par = j * num_par_comp; for (int ipar = 0; ipar < num_par_comp; ++ipar) { A_i_grad = (cov_grad_mats_obs_neighbors[ind_first_par + ipar] * cov_mat_between_neighbors_inv) - (cov_mat_obs_neighbors * cov_mat_between_neighbors_inv * cov_grad_mats_between_neighbors[ind_first_par + ipar] * cov_mat_between_neighbors_inv); for (int inn = 0; inn < num_nn; ++inn) { B_grad_cluster_i[ind_first_par + ipar].coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i_grad(0, inn); } if (ipar == 0) { D_grad_cluster_i[ind_first_par + ipar].coeffRef(i, i) -= ((A_i_grad * cov_mat_obs_neighbors.transpose())(0, 0) + (A_i * cov_grad_mats_obs_neighbors[ind_first_par + ipar].transpose())(0, 0));//add to derivative of diagonal elements for marginal variance } else { D_grad_cluster_i[ind_first_par + ipar].coeffRef(i, i) = -((A_i_grad * cov_mat_obs_neighbors.transpose())(0, 0) + (A_i * cov_grad_mats_obs_neighbors[ind_first_par + ipar].transpose())(0, 0));//don't add to existing values since derivative of diagonal is zero for range } } } if (calc_gradient_nugget) { for (int inn = 0; inn < num_nn; ++inn) { B_grad_cluster_i[num_par_gp - 1].coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i_grad_sigma2(0, inn); } D_grad_cluster_i[num_par_gp - 1].coeffRef(i, i) -= (A_i_grad_sigma2 * cov_mat_obs_neighbors.transpose())(0, 0); } }//end calc_gradient }//end if i > 0 D_inv_cluster_i.coeffRef(i, i) = 1. / D_inv_cluster_i.coeffRef(i, i); }//end loop over data i } /*! * \brief Create the covariance matrix Psi and factorize it (either calculate a Cholesky factor or the inverse covariance matrix) * \param calc_gradient If true, the gradient also be calculated (only for Vecchia approximation) * \param transf_scale If true, the derivatives are taken on the transformed scale otherwise on the original scale. Default = true (only for Vecchia approximation) * \param nugget_var Nugget effect variance parameter sigma^2 (used only if transf_scale = false to transform back, normally this is equal to one, since the variance paramter is modelled separately and factored out) * \param calc_gradient_nugget If true, derivatives are also taken with respect to the nugget / noise variance (only for Vecchia approximation) */ void CalcCovFactor(bool calc_gradient = false, bool transf_scale = true, double nugget_var = 1., bool calc_gradient_nugget = false) { if (vecchia_approx_) { for (const auto& cluster_i : unique_clusters_) { int num_data_cl_i = num_data_per_cluster_[cluster_i]; CalcCovFactorVecchia(num_data_cl_i, calc_gradient, re_comps_[cluster_i], nearest_neighbors_[cluster_i], dist_obs_neighbors_[cluster_i], dist_between_neighbors_[cluster_i], entries_init_B_[cluster_i], entries_init_B_grad_[cluster_i], z_outer_z_obs_neighbors_[cluster_i], B_[cluster_i], D_inv_[cluster_i], B_grad_[cluster_i], D_grad_[cluster_i], transf_scale, nugget_var, calc_gradient_nugget); } } else { CalcSigmaComps(); for (const auto& cluster_i : unique_clusters_) { T1 psi; psi.resize(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]); psi.setIdentity(); for (int j = 0; j < num_comps_total_; ++j) { psi += (*(re_comps_[cluster_i][j]->GetZSigmaZt())); } CalcChol<T1>(psi, cluster_i, do_symbolic_decomposition_); } do_symbolic_decomposition_ = false;//Symbolic decompostion done only once (if sparse matrices are used) } } /*! * \brief Calculate Psi^-1*y (=y_aux_) for RE model * \param marg_variance The marginal variance. Default = 1. */ void CalcYAux(double marg_variance = 1.) { for (const auto& cluster_i : unique_clusters_) { if (y_.find(cluster_i) == y_.end()) { Log::Fatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first."); } if (vecchia_approx_) { if (B_.find(cluster_i) == B_.end()) { Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first."); } y_aux_[cluster_i] = B_[cluster_i].transpose() * D_inv_[cluster_i] * B_[cluster_i] * y_[cluster_i]; }//end Vecchia else { if (chol_facts_.find(cluster_i) == chol_facts_.end()) { Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first."); } //Version 1: let Eigen do the computation y_aux_[cluster_i] = chol_facts_solve_[cluster_i].solve(y_[cluster_i]); //// Version 2 'do-it-yourself' //y_aux_[cluster_i] = y_[cluster_i]; //const double* val = chol_facts_[cluster_i].valuePtr(); //const int* row_idx = chol_facts_[cluster_i].innerIndexPtr(); //const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr(); //sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_[cluster_i].data()); //sp_L_t_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_[cluster_i].data()); }//end non-Vecchia if (marg_variance != 1.) { y_aux_[cluster_i] /= marg_variance; } } y_aux_has_been_calculated_ = true; } /*! * \brief Calculate gradient for covariance parameters * \return Gradient for covariance parameters */ vec_t GetCovParGrad() { vec_t cov_grad = vec_t::Zero(num_cov_par_ - 1); for (const auto& cluster_i : unique_clusters_) { if (vecchia_approx_) { vec_t u(num_data_per_cluster_[cluster_i]); vec_t uk(num_data_per_cluster_[cluster_i]); u = D_inv_[cluster_i] * B_[cluster_i] * y_[cluster_i];//TODO: this is already calculated in CalcYAux -> save it there and re-use here? for (int j = 0; j < num_comps_total_; ++j) { int num_par_comp = re_comps_[cluster_i][j]->num_cov_par_; for (int ipar = 0; ipar < num_par_comp; ++ipar) { uk = B_grad_[cluster_i][num_par_comp * j + ipar] * y_[cluster_i]; cov_grad[ind_par_[j] + ipar] += ((uk.dot(u) - 0.5 * u.dot(D_grad_[cluster_i][num_par_comp * j + ipar] * u)) / sigma2_ + 0.5 * (D_inv_[cluster_i].diagonal()).dot(D_grad_[cluster_i][num_par_comp * j + ipar].diagonal())); } } }//end Vecchia else { T1 psi_inv; CalcPsiInv(psi_inv, cluster_i); //////Version 2: doing sparse solving but ignoring sparse RHS ////const double* val = chol_facts_[cluster_i].valuePtr(); ////const int* row_idx = chol_facts_[cluster_i].innerIndexPtr(); ////const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr(); ////den_mat_t L_inv_dens = den_mat_t(Id_[cluster_i]); ////for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) { //// sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], L_inv_dens.data() + j * num_data_per_cluster_[cluster_i]); ////} ////const sp_mat_t L_inv = L_inv_dens.sparseView(); ////const sp_mat_t psi_inv = L_inv.transpose() * L_inv; //////Version 1: let Eigen do the solve ////const sp_mat_t psi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]); //sp_mat_t psi_inv = re_comps_[cluster_i][0]->GetZSigmaZt(cov_pars); for (int j = 0; j < num_comps_total_; ++j) { for (int ipar = 0; ipar < re_comps_[cluster_i][j]->num_cov_par_; ++ipar) { std::shared_ptr<T1> gradPsi = re_comps_[cluster_i][j]->GetZSigmaZtGrad(ipar); //if (ipar == 1) { // for (int i = 0; i < 3; ++i) { // for (int j = i; j < 3; ++j) { // Log::Info("(*gradPsi)(%d,%d): %f", i, j, (*gradPsi).coeff(i, j)); // } // } //} cov_grad[ind_par_[j] + ipar] += -1. * ((double)(y_aux_[cluster_i].transpose() * (*gradPsi) * y_aux_[cluster_i])) / sigma2_ / 2. + ((double)(((*gradPsi).cwiseProduct(psi_inv)).sum())) / 2.; } } }//end standard (non-Vecchia) calculation }// end loop over clusters return(cov_grad); } /*! * \brief Apply a momentum step * \param it Iteration number * \param[out] pars Parameters * \param[out] pars_lag1 Parameters from last iteration * \param use_nesterov_acc Indicates whether Nesterov acceleration is used in the gradient descent for finding the covariance parameters. Default = true * \param nesterov_acc_rate Acceleration rate for Nesterov acceleration * \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0 * \param exclude_first_log_scale If true, no momentum is applied to the first value and the momentum step is done on the log-scale for the other values. Default = true * \param momentum_offset Number of iterations for which no mometum is applied in the beginning */ void ApplyMomentumStep(int it, vec_t& pars, vec_t& pars_lag1, bool use_nesterov_acc = true, double nesterov_acc_rate = 0.5, int nesterov_schedule_version = 0, bool exclude_first_log_scale = true, int momentum_offset = 2) { if (use_nesterov_acc) { double mu = NesterovSchedule(it, nesterov_schedule_version, nesterov_acc_rate, momentum_offset); int num_par = (int)pars.size(); vec_t pars_mom(num_par);//Covariance parameters plus a momentum step if (exclude_first_log_scale) { pars_mom.segment(1, num_par - 1) = ((mu + 1.) * (pars.segment(1, num_par - 1).array().log()) - mu * (pars_lag1.segment(1, num_par - 1).array().log())).exp().matrix();//Momentum is added on the log scale pars_mom[0] = pars[0]; } else { pars_mom = (mu + 1) * pars - mu * pars_lag1; } pars_lag1 = pars; pars = pars_mom; } else { pars_lag1 = pars; } } /*! * \brief Update covariance parameters doing one gradient descent step (except for the marginal variance which is updated using an explicit solution) * \param lr Learning rate * \param[out] cov_pars Covariance parameters */ void UpdateCovParGradOneIter(double lr, vec_t& cov_pars) { cov_pars[0] = 0.; for (const auto& cluster_i : unique_clusters_) { cov_pars[0] += (double)(y_[cluster_i].transpose() * y_aux_[cluster_i]); } cov_pars[0] /= num_data_; sigma2_ = cov_pars[0]; vec_t grad = GetCovParGrad(); cov_pars.segment(1, num_cov_par_ - 1) = (cov_pars.segment(1, num_cov_par_ - 1).array().log() - lr * grad.array()).exp().matrix(); //for (int i = 0; i < (int)grad.size(); ++i) { Log::Debug("grad[%d]: %f", i, grad[i]); }//For debugging only } /*! * \brief Update covariance parameters doing one step of Fisher scoring (except for the marginal variance which is updated using an explicit solution) * \param[out] cov_pars Covariance parameters */ void UpdateCovParFisherScoringOneIter(vec_t& cov_pars) { cov_pars[0] = 0.; for (const auto& cluster_i : unique_clusters_) { cov_pars[0] += (double)(y_[cluster_i].transpose() * y_aux_[cluster_i]); } cov_pars[0] /= num_data_; sigma2_ = cov_pars[0]; vec_t grad = GetCovParGrad(); den_mat_t FI; CalcFisherInformation(cov_pars, FI, true, false); vec_t update = FI.llt().solve(grad); cov_pars.segment(1, num_cov_par_ - 1) = (cov_pars.segment(1, num_cov_par_ - 1).array().log() - update.array()).exp().matrix();//make update on log-scale } /*! * \brief Update linear fixed-effect coefficients doing one gradient descent step * \param lr Learning rate * \param marg_var Marginal variance parameters sigma^2 * \param X Covariate data for linear fixed-effect * \param[out] beta Linear regression coefficients */ void UpdateCoefGradOneIter(double lr, double marg_var, den_mat_t& X, vec_t& beta) { vec_t y_aux(num_data_); GetYAux(y_aux); beta += lr * (1. / marg_var) * (X.transpose()) * y_aux; } /*! * \brief Update linear fixed-effect coefficients using generalized least squares (GLS) * \param X Covariate data for linear fixed-effect * \param[out] beta Linear regression coefficients */ void UpdateCoefGLS(den_mat_t& X, vec_t& beta) { vec_t y_aux(num_data_); GetYAux(y_aux); den_mat_t XT_psi_inv_X; CalcXTPsiInvX(X, XT_psi_inv_X); beta = XT_psi_inv_X.llt().solve(X.transpose() * y_aux); } /*! * \brief Check whether NaN's are presend * \param par Vector of parameters that should be checked */ void CheckNaN(vec_t& par) { if (std::isnan(par[0])) { Log::Fatal("NaN occurred. (if gradient descent is used, consider using a smaller learning rate)"); } } /*! * \brief Calculate the Fisher information for covariance parameters. Note: you need to call CalcCovFactor first * \param cov_pars Covariance parameters * \param[out] FI Fisher information * \param transf_scale If true, the derivative is taken on the transformed scale otherwise on the original scale. Default = true * \param include_marg_var If true, the marginal variance parameter is also included, otherwise not */ void CalcFisherInformation(const vec_t& cov_pars, den_mat_t& FI, bool transf_scale = true, bool include_marg_var = false) { if (include_marg_var) { FI = den_mat_t(num_cov_par_, num_cov_par_); } else { FI = den_mat_t(num_cov_par_ - 1, num_cov_par_ - 1); } FI.setZero(); for (const auto& cluster_i : unique_clusters_) { if (vecchia_approx_) { sp_mat_t Identity(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]); Identity.setIdentity(); sp_mat_t B_inv; eigen_sp_Lower_sp_RHS_cs_solve(B_[cluster_i], Identity, B_inv, true); sp_mat_t D = sp_mat_t(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]); D.setIdentity(); D.diagonal().array() = D_inv_[cluster_i].diagonal().array().pow(-1); sp_mat_t D_inv_2 = sp_mat_t(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]); D_inv_2.setIdentity(); D_inv_2.diagonal().array() = D_inv_[cluster_i].diagonal().array().pow(2); //Counters for the covariance parameters int par_i, par_j; int start_cov_pars = include_marg_var ? 1 : 0; sp_mat_t D_inv_B_grad_B_inv, B_grad_B_inv_D; if (include_marg_var) { //First for nugget effect / noise variance parameter int ind_grad_nugget = re_comps_[cluster_i][ind_intercept_gp_]->num_cov_par_ * num_gp_total_; D_inv_B_grad_B_inv = D_inv_[cluster_i] * B_grad_[cluster_i][ind_grad_nugget] * B_inv; B_grad_B_inv_D = B_grad_[cluster_i][ind_grad_nugget] * B_inv * D; double diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array()).sum()); FI(0, 0) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.); par_j = 0; for (int j = 0; j < num_comps_total_; ++j) { for (int jpar = 0; jpar < re_comps_[cluster_i][j]->num_cov_par_; ++jpar) { B_grad_B_inv_D = B_grad_[cluster_i][par_j] * B_inv * D; diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array() * D_grad_[cluster_i][par_j].diagonal().array()).sum()); FI(0, par_j + 1) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.); par_j++; } } } par_i = 0; //Remaining covariance parameters for (int i = 0; i < num_comps_total_; ++i) { for (int ipar = 0; ipar < re_comps_[cluster_i][i]->num_cov_par_; ++ipar) { D_inv_B_grad_B_inv = D_inv_[cluster_i] * B_grad_[cluster_i][par_i] * B_inv; par_j = 0; for (int j = 0; j < num_comps_total_; ++j) { for (int jpar = 0; jpar < re_comps_[cluster_i][j]->num_cov_par_; ++jpar) { if (par_j >= par_i) { B_grad_B_inv_D = B_grad_[cluster_i][par_j] * B_inv * D; double diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][par_i].diagonal().array() * D_grad_[cluster_i][par_j].diagonal().array()).sum()); FI(par_i + start_cov_pars, par_j + start_cov_pars) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.); } par_j++; } } par_i++; } }//end loop over components }//end Vecchia approximation else { T1 psi_inv; CalcPsiInv(psi_inv, cluster_i); if (!transf_scale) { psi_inv /= cov_pars[0];//psi_inv has been calculated with a transformed parametrization, so we need to divide everything by cov_pars[0] to obtain the covariance matrix } int par_i = 0; int par_j; if (include_marg_var) { //First for nugget effect / noise variance parameter T1 psi_inv_grad_psi_sigma2 = psi_inv;//The gradient for the nugget variance is the identity matrix. FI(par_i, par_i) += ((double)(psi_inv_grad_psi_sigma2.cwiseProduct(psi_inv_grad_psi_sigma2)).sum()) / 2.; par_j = 1; for (int j = 0; j < num_comps_total_; ++j) {//there is currently no possibility to loop over the parameters directly for (int jpar = 0; jpar < re_comps_[cluster_i][j]->num_cov_par_; ++jpar) { T1 psi_inv_grad_psi_par_j = psi_inv * *(re_comps_[cluster_i][j]->GetZSigmaZtGrad(jpar, transf_scale, cov_pars[0])); FI(par_i, par_j) += ((double)(psi_inv_grad_psi_sigma2.cwiseProduct(psi_inv_grad_psi_par_j)).sum()) / 2.; par_j++; } } par_i = 1; } //Remaining covariance parameters for (int i = 0; i < num_comps_total_; ++i) { for (int ipar = 0; ipar < re_comps_[cluster_i][i]->num_cov_par_; ++ipar) { T1 psi_inv_grad_psi_par_i = psi_inv * *(re_comps_[cluster_i][i]->GetZSigmaZtGrad(ipar, transf_scale, cov_pars[0])); T1 psi_inv_grad_psi_par_i_T = psi_inv_grad_psi_par_i.transpose(); FI(par_i, par_i) += ((double)(psi_inv_grad_psi_par_i_T.cwiseProduct(psi_inv_grad_psi_par_i)).sum()) / 2.; psi_inv_grad_psi_par_i.resize(0, 0);//not needed anymore if (include_marg_var) { par_j = 1; } else { par_j = 0; } for (int j = 0; j < num_comps_total_; ++j) {//there is currently no possibility to loop over the parameters directly for (int jpar = 0; jpar < re_comps_[cluster_i][j]->num_cov_par_; ++jpar) { if (par_j > par_i) { T1 psi_inv_grad_psi_par_j = psi_inv * *(re_comps_[cluster_i][j]->GetZSigmaZtGrad(jpar, transf_scale, cov_pars[0])); FI(par_i, par_j) += ((double)(psi_inv_grad_psi_par_i_T.cwiseProduct(psi_inv_grad_psi_par_j)).sum()) / 2.; } par_j++; } } par_i++; } }//end loop over components }//end non-Vecchia approximation }//end loop over clusters FI.triangularView<Eigen::StrictlyLower>() = FI.triangularView<Eigen::StrictlyUpper>().transpose(); //for (int i = 0; i < (int)FI.rows(); ++i) { // for (int j = i; j < (int)FI.cols(); ++j) { // Log::Info("FI(%d,%d) %f", i, j, FI(i, j)); // } //} } /*! * \brief Calculate the standard deviations for the MLE of the covariance parameters as the diagonal of the inverse Fisher information (on the orignal scale and not the transformed scale used in the optimization) * \param cov_pars MLE of covariance parameters * \param[out] std_dev Standard deviations */ void CalcStdDevCovPar(const vec_t& cov_pars, vec_t& std_dev) { SetCovParsComps(cov_pars); CalcCovFactor(true, false, cov_pars[0], true); den_mat_t FI; CalcFisherInformation(cov_pars, FI, false, true); std_dev = FI.inverse().diagonal().array().sqrt().matrix(); } /*! * \brief Calculate the standard deviations for the MLE of the regression coefficients as the diagonal of the inverse Fisher information * \param cov_pars MLE of covariance parameters * \param X Covariate data for linear fixed-effect * \param[out] std_dev Standard deviations */ void CalcStdDevCoef(vec_t& cov_pars, const den_mat_t& X, vec_t& std_dev) { if ((int)std_dev.size() >= num_data_) { Log::Warning("Sample size too small to calculate standard deviations for coefficients"); for (int i = 0; i < (int)std_dev.size(); ++i) { std_dev[i] = std::numeric_limits<double>::quiet_NaN(); } } else { SetCovParsComps(cov_pars); CalcCovFactor(false, true, 1., false); den_mat_t FI((int)X.cols(), (int)X.cols()); CalcXTPsiInvX(X, FI); FI /= cov_pars[0]; std_dev = FI.inverse().diagonal().array().sqrt().matrix(); } } /*! * \brief Calculate predictions (conditional mean and covariance matrix) (for one cluster * \param cluster_i Cluster index for which prediction are made * \param num_data_pred Number of prediction locations * \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization * \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster * \param re_group_levels_pred Group levels for the grouped random effects (re_group_levels_pred[j] contains the levels for RE number j) * \param re_group_rand_coef_data_pred Random coefficient data for grouped REs * \param gp_coords_mat_pred Coordinates for prediction locations * \param gp_rand_coef_data_pred Random coefficient data for GPs * \param predict_cov_mat If true, the covariance matrix is also calculated * \param[out] mean_pred_id Predicted mean * \param[out] cov_mat_pred_id Predicted covariance matrix */ void CalcPred(gp_id_t cluster_i, int num_data_pred, std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred, const std::vector<std::vector<string_t>>& re_group_levels_pred, const double* re_group_rand_coef_data_pred, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred, bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) { // Vector which contains covariance matrices needed for making predictions in the following order: // 0. Ztilde*Sigma*Z^T, 1. Zstar*Sigmatilde^T*Z^T, 2. Ztilde*Sigma*Ztilde^T, 3. Ztilde*Sigmatilde*Zstar^T, 4. Zstar*Sigmastar*Zstar^T std::vector<T1> pred_mats(5); //Define which covariance matrices are zero ('false') or non-zero ('true') std::vector<bool> active_mats{ false, false, false, false, false }; if (num_re_group_total_ > 0) { active_mats[0] = true; active_mats[2] = true; active_mats[4] = true; } if (num_gp_total_ > 0) { active_mats[1] = true; active_mats[4] = true; } //Initialize covariance matrices for (int i = 0; i < 2; ++i) { if (active_mats[i]) { pred_mats[i].resize(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_[cluster_i]); pred_mats[i].setZero(); } } if (predict_cov_mat) { for (int i = 2; i < 5; ++i) { if (active_mats[i]) { pred_mats[i].resize(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]); pred_mats[i].setZero(); } } } //Calculate covariance matrices int cn = 0;//component number if (num_re_group_ > 0) { //Grouped random effects for (int j = 0; j < num_re_group_; ++j) { std::shared_ptr<RECompGroup<T1>> re_comp = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_[cluster_i][cn]); std::vector<re_group_t> group_data; for (const auto& id : data_indices_per_cluster_pred[cluster_i]) { group_data.push_back(re_group_levels_pred[j][id]); } re_comp->AddPredCovMatrices(group_data, pred_mats, predict_cov_mat); cn += 1; } if (num_re_group_rand_coef_ > 0) { //Random coefficient grouped random effects for (int j = 0; j < num_re_group_rand_coef_; ++j) { std::shared_ptr<RECompGroup<T1>> re_comp = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_[cluster_i][cn]); std::vector<re_group_t> group_data; std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster_pred[cluster_i]) { rand_coef_data.push_back(re_group_rand_coef_data_pred[j * num_data_pred + id]); group_data.push_back(re_group_levels_pred[ind_effect_group_rand_coef_[j] - 1][id]);//subtract 1 since counting starts at one for this index } re_comp->AddPredCovMatrices(group_data, pred_mats, predict_cov_mat, rand_coef_data.data()); cn += 1; } } } if (num_gp_ > 0) { //Gaussian process std::shared_ptr<RECompGP<T1>> re_comp_base = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_[cluster_i][cn]); re_comp_base->AddPredCovMatrices(re_comp_base->coords_, gp_coords_mat_pred, pred_mats, predict_cov_mat); cn += 1; if (num_gp_rand_coef_ > 0) { std::shared_ptr<RECompGP<T1>> re_comp; //Random coefficient Gaussian processes for (int j = 0; j < num_gp_rand_coef_; ++j) { re_comp = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_[cluster_i][cn]); std::vector<double> rand_coef_data; for (const auto& id : data_indices_per_cluster_pred[cluster_i]) { rand_coef_data.push_back(gp_rand_coef_data_pred[j * num_data_pred + id]); } re_comp->AddPredCovMatrices(re_comp_base->coords_, gp_coords_mat_pred, pred_mats, predict_cov_mat, rand_coef_data.data()); cn += 1; } } } T1 M_aux(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_[cluster_i]);//Ztilde*Sigma*Z^T + Zstar*Sigmatilde^T*Z^T M_aux.setZero(); for (int i = 0; i < 2; ++i) { if (active_mats[i]) { M_aux += pred_mats[i]; } } mean_pred_id = M_aux * y_aux_[cluster_i]; if (predict_cov_mat) { cov_mat_pred_id.setIdentity(); for (int i = 2; i < 5; ++i) { if (active_mats[i]) { cov_mat_pred_id += pred_mats[i]; if (i == 3) {//Ztilde*Sigmatilde*Zstar^T cov_mat_pred_id += T1(pred_mats[i].transpose()); } } } cov_mat_pred_id -= (M_aux * (chol_facts_solve_[cluster_i].solve(T1(M_aux.transpose())))); } } /*! * \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the covariance matrix of the observable process when observed locations appear first in the ordering * \param CondObsOnly If true, the nearest neighbors for the predictions are found only among the observed data * \param cluster_i Cluster index for which prediction are made * \param num_data_pred Number of prediction locations * \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization * \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster * \param gp_coords_mat_obs Coordinates for observed locations * \param gp_coords_mat_pred Coordinates for prediction locations * \param gp_rand_coef_data_pred Random coefficient data for GPs * \param predict_cov_mat If true, the covariance matrix is also calculated * \param[out] mean_pred_id Predicted mean * \param[out] cov_mat_pred_id Predicted covariance matrix */ void CalcPredVecchiaObservedFirstOrder(bool CondObsOnly, gp_id_t cluster_i, int num_data_pred, std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred, const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred, bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) { int num_data_cli = num_data_per_cluster_[cluster_i]; int num_data_pred_cli = num_data_per_cluster_pred[cluster_i]; //Find nearest neighbors den_mat_t coords_all(num_data_cli + num_data_pred_cli, dim_gp_coords_); coords_all << gp_coords_mat_obs, gp_coords_mat_pred; std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_pred_cli); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_pred_cli); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_pred_cli); if (CondObsOnly) { find_nearest_neighbors_Veccia_fast(coords_all, num_data_cli + num_data_pred_cli, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, num_data_cli, num_data_cli - 1); } else {//find neighbors among both the observed and prediction locations find_nearest_neighbors_Veccia_fast(coords_all, num_data_cli + num_data_pred_cli, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, num_data_cli, -1); } //Random coefficients std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_pred_cli); if (num_gp_rand_coef_ > 0) { for (int j = 0; j < num_gp_rand_coef_; ++j) { std::vector<double> rand_coef_data = re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_;//First entries are the observed data, then the predicted data for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {//TODO: maybe do the following in parallel? (see CalcPredVecchiaPredictedFirstOrder) rand_coef_data.push_back(gp_rand_coef_data_pred[j * num_data_pred + id]); } #pragma omp for schedule(static) for (int i = 0; i < num_data_pred_cli; ++i) { z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef_); int dim_z = (int)nearest_neighbors_cluster_i[i].size() + 1; vec_t coef_vec(dim_z); coef_vec(0) = rand_coef_data[num_data_cli + i]; if ((num_data_cli + i) > 0) { for (int ii = 1; ii < dim_z; ++ii) { coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]]; } } z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose(); } } } // Determine Triplet for initializing Bpo and Bp std::vector<Triplet_t> entries_init_Bpo, entries_init_Bp; for (int i = 0; i < num_data_pred_cli; ++i) { entries_init_Bp.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) { if (nearest_neighbors_cluster_i[i][inn] < num_data_cli) {//nearest neighbor belongs to observed data entries_init_Bpo.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.)); } else {//nearest neighbor belongs to predicted data entries_init_Bp.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn] - num_data_cli, 0.)); } } } sp_mat_t Bpo(num_data_pred_cli, num_data_cli); sp_mat_t Bp(num_data_pred_cli, num_data_pred_cli); Bpo.setFromTriplets(entries_init_Bpo.begin(), entries_init_Bpo.end());//initialize matrices (in order that the code below can be run in parallel) Bp.setFromTriplets(entries_init_Bp.begin(), entries_init_Bp.end()); sp_mat_t Dp(num_data_pred_cli, num_data_pred_cli); Dp.setIdentity();//Put 1 on the diagonal (for nugget effect) #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_pred_cli; ++i) { int num_nn = (int)nearest_neighbors_cluster_i[i].size(); //define covariance and gradient matrices den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below for (int j = 0; j < num_gp_total_; ++j) { if (j == 0) { re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); } else {//random coefficient GPs den_mat_t cov_mat_obs_neighbors_j; den_mat_t cov_mat_between_neighbors_j; re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false); re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); //multiply by coefficient matrix cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array(); cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); cov_mat_obs_neighbors += cov_mat_obs_neighbors_j; cov_mat_between_neighbors += cov_mat_between_neighbors_j; } }//end loop over components j //Calculate matrices A and D as well as their derivatives //1. add first summand of matrix D (ZCZ^T_{ii}) for (int j = 0; j < num_gp_total_; ++j) { double d_comp_j = re_comps_[cluster_i][ind_intercept_gp_ + j]->cov_pars_[0]; if (j > 0) {//random coefficient d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0); } Dp.coeffRef(i, i) += d_comp_j; } //2. remaining terms cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect den_mat_t A_i(1, num_nn);//dim = 1 x nn A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose(); for (int inn = 0; inn < num_nn; ++inn) { if (nearest_neighbors_cluster_i[i][inn] < num_data_cli) {//nearest neighbor belongs to observed data Bpo.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn); } else { Bp.coeffRef(i, nearest_neighbors_cluster_i[i][inn] - num_data_cli) -= A_i(0, inn); } } Dp.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); }//end loop over data i mean_pred_id = -Bpo * y_[cluster_i]; if (!CondObsOnly) { sp_L_solve(Bp.valuePtr(), Bp.innerIndexPtr(), Bp.outerIndexPtr(), num_data_pred_cli, mean_pred_id.data()); } if (predict_cov_mat) { if (CondObsOnly) { cov_mat_pred_id = Dp; } else { sp_mat_t Identity(num_data_pred_cli, num_data_pred_cli); Identity.setIdentity(); sp_mat_t Bp_inv; eigen_sp_Lower_sp_RHS_cs_solve(Bp, Identity, Bp_inv, true); cov_mat_pred_id = T1(Bp_inv * Dp * Bp_inv.transpose()); } } } /*! * \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the covariance matrix of the observable proces when prediction locations appear first in the ordering * \param cluster_i Cluster index for which prediction are made * \param num_data_pred Number of prediction locations * \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization * \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster * \param gp_coords_mat_obs Coordinates for observed locations * \param gp_coords_mat_pred Coordinates for prediction locations * \param gp_rand_coef_data_pred Random coefficient data for GPs * \param predict_cov_mat If true, the covariance matrix is also calculated * \param[out] mean_pred_id Predicted mean * \param[out] cov_mat_pred_id Predicted covariance matrix */ void CalcPredVecchiaPredictedFirstOrder(gp_id_t cluster_i, int num_data_pred, std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred, const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred, bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) { int num_data_cli = num_data_per_cluster_[cluster_i]; int num_data_pred_cli = num_data_per_cluster_pred[cluster_i]; int num_data_tot = num_data_cli + num_data_pred_cli; //Find nearest neighbors den_mat_t coords_all(num_data_tot, dim_gp_coords_); coords_all << gp_coords_mat_pred, gp_coords_mat_obs; std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_tot); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_tot); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_tot); find_nearest_neighbors_Veccia_fast(coords_all, num_data_tot, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1); //Prepare data for random coefficients std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_tot); if (num_gp_rand_coef_ > 0) { for (int j = 0; j < num_gp_rand_coef_; ++j) { std::vector<double> rand_coef_data(num_data_tot);//First entries are the predicted data, then the observed data #pragma omp for schedule(static) for (int i = 0; i < num_data_pred_cli; ++i) { rand_coef_data[i] = gp_rand_coef_data_pred[j * num_data_pred + data_indices_per_cluster_pred[cluster_i][i]]; } #pragma omp for schedule(static) for (int i = 0; i < num_data_cli; ++i) { rand_coef_data[num_data_pred_cli + i] = re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_[i]; } //re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_ //for (int i = 0; i < rand_coef_data.size(); ++i) { // Log::Info("rand_coef_data[%d]: %f", i, rand_coef_data[i]); //} #pragma omp for schedule(static) for (int i = 0; i < num_data_tot; ++i) { z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef_); int dim_z = (int)nearest_neighbors_cluster_i[i].size() + 1; vec_t coef_vec(dim_z); coef_vec(0) = rand_coef_data[i]; if (i > 0) { for (int ii = 1; ii < dim_z; ++ii) { coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]]; } } z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose(); } } } // Determine Triplet for initializing Bo, Bop, and Bp std::vector<Triplet_t> entries_init_Bo, entries_init_Bop, entries_init_Bp; for (int i = 0; i < num_data_pred_cli; ++i) { entries_init_Bp.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) { entries_init_Bp.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.)); } } for (int i = 0; i < num_data_cli; ++i) { entries_init_Bo.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i + num_data_pred_cli].size(); ++inn) { if (nearest_neighbors_cluster_i[i + num_data_pred_cli][inn] < num_data_pred_cli) {//nearest neighbor belongs to predicted data entries_init_Bop.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i + num_data_pred_cli][inn], 0.)); } else {//nearest neighbor belongs to predicted data entries_init_Bo.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i + num_data_pred_cli][inn] - num_data_pred_cli, 0.)); } } } sp_mat_t Bo(num_data_cli, num_data_cli); sp_mat_t Bop(num_data_cli, num_data_pred_cli); sp_mat_t Bp(num_data_pred_cli, num_data_pred_cli); Bo.setFromTriplets(entries_init_Bo.begin(), entries_init_Bo.end());//initialize matrices (in order that the code below can be run in parallel) Bop.setFromTriplets(entries_init_Bop.begin(), entries_init_Bop.end()); Bp.setFromTriplets(entries_init_Bp.begin(), entries_init_Bp.end()); sp_mat_t Do_inv(num_data_cli, num_data_cli); sp_mat_t Dp_inv(num_data_pred_cli, num_data_pred_cli); Do_inv.setIdentity();//Put 1 on the diagonal (for nugget effect) Dp_inv.setIdentity(); #pragma omp parallel for schedule(static) for (int i = 0; i < num_data_tot; ++i) { int num_nn = (int)nearest_neighbors_cluster_i[i].size(); //define covariance and gradient matrices den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below if (i > 0) { for (int j = 0; j < num_gp_total_; ++j) { if (j == 0) { re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); } else {//random coefficient GPs den_mat_t cov_mat_obs_neighbors_j; den_mat_t cov_mat_between_neighbors_j; re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false); re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); //multiply by coefficient matrix cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array(); cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array(); cov_mat_obs_neighbors += cov_mat_obs_neighbors_j; cov_mat_between_neighbors += cov_mat_between_neighbors_j; } }//end loop over components j } //Calculate matrices A and D as well as their derivatives //1. add first summand of matrix D (ZCZ^T_{ii}) for (int j = 0; j < num_gp_total_; ++j) { double d_comp_j = re_comps_[cluster_i][ind_intercept_gp_ + j]->cov_pars_[0]; if (j > 0) {//random coefficient d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0); } if (i < num_data_pred_cli) { Dp_inv.coeffRef(i, i) += d_comp_j; } else { Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) += d_comp_j; } } //2. remaining terms if (i > 0) { cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect den_mat_t A_i(1, num_nn);//dim = 1 x nn A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose(); for (int inn = 0; inn < num_nn; ++inn) { if (i < num_data_pred_cli) { Bp.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn); } else { if (nearest_neighbors_cluster_i[i][inn] < num_data_pred_cli) {//nearest neighbor belongs to predicted data Bop.coeffRef(i - num_data_pred_cli, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn); } else { Bo.coeffRef(i - num_data_pred_cli, nearest_neighbors_cluster_i[i][inn] - num_data_pred_cli) -= A_i(0, inn); } } } if (i < num_data_pred_cli) { Dp_inv.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); } else { Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); } } if (i < num_data_pred_cli) { Dp_inv.coeffRef(i, i) = 1 / Dp_inv.coeffRef(i, i); } else { Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) = 1 / Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli); } }//end loop over data i sp_mat_t cond_prec = Bp.transpose() * Dp_inv * Bp + Bop.transpose() * Do_inv * Bop; chol_sp_mat_t CholFact; CholFact.compute(cond_prec); if (predict_cov_mat) { sp_mat_t Identity(num_data_pred_cli, num_data_pred_cli); Identity.setIdentity(); sp_mat_t cond_prec_chol = CholFact.matrixL(); sp_mat_t cond_prec_chol_inv; eigen_sp_Lower_sp_RHS_cs_solve(cond_prec_chol, Identity, cond_prec_chol_inv, true); cov_mat_pred_id = T1(cond_prec_chol_inv.transpose() * cond_prec_chol_inv); mean_pred_id = -cov_mat_pred_id * Bop.transpose() * Do_inv * Bo * y_[cluster_i]; } else { mean_pred_id = -CholFact.solve(Bop.transpose() * Do_inv * Bo * y_[cluster_i]); } } /*! * \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the latent process when observed locations appear first in the ordering * \param CondObsOnly If true, the nearest neighbors for the predictions are found only among the observed data * \param cluster_i Cluster index for which prediction are made * \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization * \param gp_coords_mat_obs Coordinates for observed locations * \param gp_coords_mat_pred Coordinates for prediction locations * \param predict_cov_mat If true, the covariance matrix is also calculated * \param[out] mean_pred_id Predicted mean * \param[out] cov_mat_pred_id Predicted covariance matrix */ void CalcPredVecchiaLatentObservedFirstOrder(bool CondObsOnly, gp_id_t cluster_i, std::map<gp_id_t, int>& num_data_per_cluster_pred, const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) { if (num_gp_rand_coef_ > 0) { Log::Fatal("The Vecchia approximation for the latent process is currently not implemented when having random coefficients"); } int num_data_cli = num_data_per_cluster_[cluster_i]; int num_data_pred_cli = num_data_per_cluster_pred[cluster_i]; int num_data_tot = num_data_cli + num_data_pred_cli; //Find nearest neighbors den_mat_t coords_all(num_data_cli + num_data_pred_cli, dim_gp_coords_); coords_all << gp_coords_mat_obs, gp_coords_mat_pred; //Determine number of unique observartion locations std::vector<int> uniques;//unique points std::vector<int> unique_idx;//used for constructing incidence matrix Z_ if there are duplicates DetermineUniqueDuplicateCoords(gp_coords_mat_obs, num_data_cli, uniques, unique_idx); int num_coord_unique_obs = (int)uniques.size(); //Determine unique locations (observed and predicted) DetermineUniqueDuplicateCoords(coords_all, num_data_tot, uniques, unique_idx); int num_coord_unique = (int)uniques.size(); den_mat_t coords_all_unique; if ((int)uniques.size() == num_data_tot) {//no multiple observations at the same locations -> no incidence matrix needed coords_all_unique = coords_all; } else { coords_all_unique = coords_all(uniques, Eigen::all); } //Determine incidence matrices sp_mat_t Z_o = sp_mat_t(num_data_cli, uniques.size()); sp_mat_t Z_p = sp_mat_t(num_data_pred_cli, uniques.size()); for (int i = 0; i < num_data_tot; ++i) { if (i < num_data_cli) { Z_o.insert(i, unique_idx[i]) = 1.; } else { Z_p.insert(i - num_data_cli, unique_idx[i]) = 1.; } } std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_coord_unique); std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_coord_unique); std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_coord_unique); if (CondObsOnly) {//find neighbors among both the observed locations only find_nearest_neighbors_Veccia_fast(coords_all_unique, num_coord_unique, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, num_coord_unique_obs - 1); } else {//find neighbors among both the observed and prediction locations find_nearest_neighbors_Veccia_fast(coords_all_unique, num_coord_unique, num_neighbors_pred_, nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1); } // Determine Triplet for initializing Bpo and Bp std::vector<Triplet_t> entries_init_B; for (int i = 0; i < num_coord_unique; ++i) { entries_init_B.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) { entries_init_B.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.)); } } sp_mat_t B(num_coord_unique, num_coord_unique); B.setFromTriplets(entries_init_B.begin(), entries_init_B.end());//initialize matrices (in order that the code below can be run in parallel) sp_mat_t D(num_coord_unique, num_coord_unique); D.setIdentity(); D.diagonal().array() = 0.; #pragma omp parallel for schedule(static) for (int i = 0; i < num_coord_unique; ++i) { int num_nn = (int)nearest_neighbors_cluster_i[i].size(); //define covariance and gradient matrices den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below if (i > 0) { re_comps_[cluster_i][ind_intercept_gp_]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i], cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component re_comps_[cluster_i][ind_intercept_gp_]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i], cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false); } //Calculate matrices A and D as well as their derivatives //1. add first summand of matrix D (ZCZ^T_{ii}) D.coeffRef(i, i) = re_comps_[cluster_i][ind_intercept_gp_]->cov_pars_[0]; //2. remaining terms if (i > 0) { den_mat_t A_i(1, num_nn);//dim = 1 x nn A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose(); for (int inn = 0; inn < num_nn; ++inn) { B.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn); } D.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0); } }//end loop over data i //Calculate D_inv and B_inv in order to calcualte Sigma and Sigma^-1 sp_mat_t D_inv(num_coord_unique, num_coord_unique); D_inv.setIdentity(); D_inv.diagonal().array() = D.diagonal().array().pow(-1); sp_mat_t Identity_all(num_coord_unique, num_coord_unique); Identity_all.setIdentity(); sp_mat_t B_inv; eigen_sp_Lower_sp_RHS_cs_solve(B, Identity_all, B_inv, true); //Calculate inverse of covariance matrix for observed data using the Woodbury identity sp_mat_t Z_o_T = Z_o.transpose(); sp_mat_t M_aux_Woodbury = B.transpose() * D_inv * B + Z_o_T * Z_o; chol_sp_mat_t CholFac_M_aux_Woodbury; CholFac_M_aux_Woodbury.compute(M_aux_Woodbury); if (predict_cov_mat) { //Using Eigen's solver sp_mat_t M_aux_Woodbury2 = CholFac_M_aux_Woodbury.solve(Z_o_T); sp_mat_t Identity_obs(num_data_cli, num_data_cli); Identity_obs.setIdentity(); sp_mat_t ZoSigmaZoT_plusI_Inv = -Z_o * M_aux_Woodbury2 + Identity_obs; sp_mat_t ZpSigmaZoT = Z_p * B_inv * D * B_inv.transpose() * Z_o_T; sp_mat_t M_aux = ZpSigmaZoT * ZoSigmaZoT_plusI_Inv; mean_pred_id = M_aux * y_[cluster_i]; sp_mat_t Identity_pred(num_data_pred_cli, num_data_pred_cli); Identity_pred.setIdentity(); cov_mat_pred_id = T1(Z_p * B_inv * D * B_inv.transpose() * Z_p.transpose() + Identity_pred - M_aux * ZpSigmaZoT.transpose()); } else { vec_t resp_aux = Z_o_T * y_[cluster_i]; vec_t resp_aux2 = CholFac_M_aux_Woodbury.solve(resp_aux); resp_aux = y_[cluster_i] - Z_o * resp_aux2; mean_pred_id = Z_p * B_inv * D * B_inv.transpose() * Z_o_T * resp_aux; } } friend class REModel; }; } // namespace GPBoost #endif // GPB_RE_MODEL_TEMPLATE_H_
GB_unop__identity_uint32_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint32_fc64) // op(A') function: GB (_unop_tran__identity_uint32_fc64) // C type: uint32_t // A type: GxB_FC64_t // cast: uint32_t cij = GB_cast_to_uint32_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = GB_cast_to_uint32_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = GB_cast_to_uint32_t (creal (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint32_fc64) ( uint32_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t (creal (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t (creal (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint32_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fibonacci.h
/* BSD 3-Clause License * Copyright (c) 2019-2021, contributors * All rights reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <iostream> #include <chrono> #include <omp.h> #include "xitao.h" using namespace std; // enable a known trick to avoid redundant recursion for evaluated cases //#define MEMOIZE // the maximum number of Fibonacci terms that can fit in unsigned 64 bit const uint32_t MAX_FIB = 92; // a global variable to manage the granularity of TAO creation (coarsening level) uint32_t grain_size; // declare the class class FibTAO; // init the memoization array of TAOs FibTAO* fib_taos[MAX_FIB + 1]; // basic Fibonacci implementation size_t fib(uint32_t num) { // return 0 for 0 and negative terms (undefined) if(num <= 0) return 0; // return 1 for the term 1 else if(num == 1) return 1; // recursively find the result return fib(num - 1) + fib(num - 2); } // basic Fibonacci implementation size_t fib_omp(uint32_t num) { // return 0 for 0 and negative terms (undefined) if(num <= 0) return 0; // return 1 for the term 1 else if(num == 1) return 1; // recursively find the result #pragma omp task if (num > grain_size) auto num_1 = fib_omp(num - 1); #pragma omp task if (num > grain_size) auto num_2 = fib_omp(num - 2); #pragma omp taskwait return num_1 + num_2; } // the Fibonacci TAO (Every TAO class must inherit from AssemblyTask) class FibTAO : public AssemblyTask { public: // the n - 1 tao FibTAO* prev1; // the n - 2 tao FibTAO* prev2; // the term number uint32_t term; // the Fib value for the TAO size_t val; // the tao construction. resource hint 1 FibTAO(int _term): term(_term), AssemblyTask(1) { } // the work function void execute(int nthread) { // calculate locally if at required granularity if(term <= grain_size) val = fib(term); // if this is not a terminal term else if(term > 1) // calculate the value val = prev1->val + prev2->val; } void cleanup(){ } }; // build the DAG by reversing the recursion tree FibTAO* buildDAG(uint32_t term) { // gaurd against negative terms if(term < 0) term = 0; // if this is terminal term if(term <= 1) { // create the terminal tao fib_taos[term] = new FibTAO(term); // push the tao xitao_push(fib_taos[term]); // return the tao return fib_taos[term]; } #ifdef MEMOIZE // if this TAO has already been created (avoid redundant calculation) if(fib_taos[term]) return fib_taos[term]; #endif // construct the tao fib_taos[term] = new FibTAO(term); // create TAOs as long as you are above the grain size if(term > grain_size) { // build DAG of n - 1 term fib_taos[term]->prev1 = buildDAG(term - 1); // make edge to current fib_taos[term]->prev1->make_edge(fib_taos[term]); // build DAG of n - 1 term fib_taos[term]->prev2 = buildDAG(term - 2); // make edge to current fib_taos[term]->prev2->make_edge(fib_taos[term]); } else { // you have reached a terminal TAO // push the TAO to fire the DAG execution xitao_push(fib_taos[term]); } // return the current tao (the head of the DAG) return fib_taos[term]; }
omp_task_linkedlist.c
#include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #define LIST_ITERATION 20 typedef struct lnklist LNKLIST; struct lnklist { int num; LNKLIST *next; } *ll_head, *ll_tail; void make_lnklist(); LNKLIST *append_lnklist(LNKLIST *l); void walk_lnklist(); int main() { #pragma omp parallel { #pragma omp single { make_lnklist(); walk_lnklist(); /* 링크드 리스트를 횡단한다. */ } } return 0; } void make_lnklist() { int i; LNKLIST *list; ll_tail = ll_head = calloc(1, sizeof(LNKLIST)); ll_head->num = -1; ll_head->next = ll_tail; for (i=0; i<LIST_ITERATION; i++) { list = calloc(1, sizeof(LNKLIST)); list->num = i; append_lnklist(list); } } LNKLIST *append_lnklist(LNKLIST *list) { ll_tail->next = list; /* tail에 새로운 리스트 추가 */ ll_tail = list; /* 리스트 tail 업데이트 */ list->next = NULL; return list; } void walk_lnklist() { int i; LNKLIST *list; for(i=0,list=ll_head; list != NULL; i++,list = list->next) { #pragma omp task firstprivate(i) { #ifdef _OPENMP printf("[%02d : %d] (%p -> %p)\n", i, omp_get_thread_num(), list, list->next); #else printf("[%02d] (%p -> %p)\n", i, list, list->next); #endif sleep(1); } } }
ConvolutionUnfold.h
#pragma once #include <string.h> #include <math.h> #include <algorithm> #include "General.h" #include "TensorRef.h" #include "Vector-inl.h" OPS_API int TS_Unfolded_Copy( TensorRef* finput, TensorRef* input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight); OPS_API int TS_Unfolded_Acc( TensorRef *finput, TensorRef *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight); OPS_API int TS_Softmax( TensorRef* out_, TensorRef* in_, int rows, int cols); OPS_API int TS_SoftmaxGrad( TensorRef* grad_, TensorRef* adj_, TensorRef* val_, int rows, int cols, bool addGrad); template<typename T> void Softmax(TensorRef* out, TensorRef* in, int rows, int cols) { T * pOut = (T*)out->buffer; T * pIn = (T*)in->buffer; for (int j = 0; j < rows; ++j) { T * so = pOut + j * cols; T * sp = pIn + j * cols; T max = sp[0]; for (int i = 1; i < cols; ++i) max = std::max(max, sp[i]); T sum = 0.f; for (int i = 0; i < cols; ++i) { T ex = expf(sp[i] - max); so[i] = ex; sum += ex; } for (int i = 0; i < cols; ++i) { so[i] /= sum; } } } template<typename T> void SoftmaxGrad(TensorRef* grad_, TensorRef* adj_, TensorRef* val_, int rows, int cols, bool addGrad) { T * grad = (T*)grad_->buffer; T * adj = (T*)adj_->buffer; T * val = (T*)val_->buffer; for (int j = 0; j < rows; ++j) { T * gradRow = grad + j * cols; T * adjRow = adj + j * cols; T * valRow = val + j * cols; T sum = 0.f; for (int i = 0; i < cols; ++i) { sum += valRow[i] * adjRow[i]; } for (int i = 0; i < cols; ++i) { if (addGrad) { gradRow[i] += valRow[i] * (adjRow[i] - sum); } else { gradRow[i] = valRow[i] * (adjRow[i] - sum); } } } } // note: due to write issues, this one cannot be parallelized as well as unfolded_copy template<typename T> void unfolded_acc( TensorRef *finput, TensorRef *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { size_t nip; T *input_data = (T*)input->buffer; T *finput_data = (T*)finput->buffer; #pragma omp parallel for private(nip) for (nip = 0; nip < nInputPlane; nip++) { size_t kw, kh, y, x; __int64 ix = 0, iy = 0; for (kh = 0; kh < kH; kh++) { for (kw = 0; kw < kW; kw++) { T *src = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); T *dst = input_data + nip*(inputHeight*inputWidth); if (padW > 0 || padH > 0) { size_t lpad, rpad; for (y = 0; y < outputHeight; y++) { iy = (__int64)(y*dH - padH + kh); if (iy < 0 || iy >= inputHeight) { } else { if (dW == 1) { ix = (__int64)(0 - padW + kw); lpad = std::max(size_t(0), (padW - kw)); rpad = std::max(size_t(0), (padW - (kW - kw - 1))); Vector_add<T>(dst + (size_t)(iy*inputWidth + ix + lpad), src + (size_t)(y*outputWidth + lpad), 1, outputWidth - lpad - rpad); } else { for (x = 0; x<outputWidth; x++) { ix = (__int64)(x*dW - padW + kw); if (ix < 0 || ix >= inputWidth) { } else Vector_add<T>(dst + (size_t)(iy*inputWidth + ix), src + (size_t)(y*outputWidth + x), 1, 1); } } } } } else { for (y = 0; y < outputHeight; y++) { iy = (__int64)(y*dH + kh); ix = (__int64)(0 + kw); if (dW == 1) Vector_add<T>(dst + (size_t)(iy*inputWidth + ix), src + (size_t)(y*outputWidth), 1, outputWidth); else { for (x = 0; x < outputWidth; x++) Vector_add<T>(dst + (size_t)(iy*inputWidth + ix + x*dW), src + (size_t)(y*outputWidth + x), 1, 1); } } } } } } } template<typename T> void unfolded_copy(TensorRef *finput, TensorRef *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { long k; T *input_data = (T*)input->buffer; T *finput_data = (T*)finput->buffer; #pragma omp parallel for private(k) for (k = 0; k < nInputPlane*kH*kW; k++) { size_t nip = k / (kH*kW); size_t rest = k % (kH*kW); size_t kh = rest / kW; size_t kw = rest % kW; size_t x, y; __int64 ix, iy; T *dst = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); T *src = input_data + nip*(inputHeight*inputWidth); if (padW > 0 || padH > 0) { size_t lpad, rpad; for (y = 0; y < outputHeight; y++) { iy = (__int64)(y*dH - padH + kh); if (iy < 0 || iy >= inputHeight) { memset(dst + y*outputWidth, 0, sizeof(T)*outputWidth); } else { if (dW == 1) { ix = (__int64)(0 - padW + kw); lpad = std::max(size_t(0), (padW - kw)); rpad = std::max(size_t(0), (padW - (kW - kw - 1))); if (outputWidth - rpad - lpad <= 0) { memset(dst + (size_t)(y*outputWidth), 0, sizeof(T)*outputWidth); } else { if (lpad > 0) memset(dst + y*outputWidth, 0, sizeof(T)*lpad); memcpy(dst + (size_t)(y*outputWidth + lpad), src + (size_t)(iy*inputWidth + ix + lpad), sizeof(T)*(outputWidth - rpad - lpad)); if (rpad > 0) memset(dst + y*outputWidth + outputWidth - rpad, 0, sizeof(T)*rpad); } } else { for (x = 0; x<outputWidth; x++) { ix = (__int64)(x*dW - padW + kw); if (ix < 0 || ix >= inputWidth) memset(dst + (size_t)(y*outputWidth + x), 0, sizeof(T) * 1); else memcpy(dst + (size_t)(y*outputWidth + x), src + (size_t)(iy*inputWidth + ix), sizeof(T)*(1)); } } } } } else { for (y = 0; y < outputHeight; y++) { iy = (__int64)(y*dH + kh); ix = (__int64)(0 + kw); if (dW == 1) memcpy(dst + (size_t)(y*outputWidth), src + (size_t)(iy*inputWidth + ix), sizeof(T)*outputWidth); else { for (x = 0; x<outputWidth; x++) memcpy(dst + (size_t)(y*outputWidth + x), src + (size_t)(iy*inputWidth + ix + x*dW), sizeof(T)*(1)); } } } } }
CPhotoconsistencyOdometryBiObjective.h
/* * Photoconsistency-Visual-Odometry * Multiscale Photoconsistency Visual Odometry from RGBD Images * Copyright (c) 2012-2013, Miguel Algaba Borrego * * http://code.google.com/p/photoconsistency-visual-odometry/ * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the holder(s) nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef _CPHOTOCONSISTENCY_ODOMETRY_BIOBJECTIVE_ #define _CPHOTOCONSISTENCY_ODOMETRY_BIOBJECTIVE_ #define ENABLE_GAUSSIAN_BLUR 1 #define ENABLE_BOX_FILTER_BLUR 0 #define ENABLE_OPENMP_MULTITHREADING 0 #define ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS 0 #include "CPhotoconsistencyOdometry.h" #include "opencv2/highgui/highgui.hpp" #include "opencv2/contrib/contrib.hpp" //TickMeter #include <iostream> namespace phovo { namespace Analytic { /*!This class computes the rigid (6DoF) transformation that best aligns a pair of RGBD frames using a photoconsistency maximization approach. To estimate the rigid transformation, this class implements a coarse to fine approach minimizing the photometric and depth error simultaneously. Thus, the algorithm starts finding a first pose approximation at a low resolution level and uses the estimate to initialize the optimization at greater image scales. Both the residuals and jacobians are computed analytically.*/ template< class TPixel, class TCoordinate > class CPhotoconsistencyOdometryBiObjective : public CPhotoconsistencyOdometry< TPixel, TCoordinate > { public: typedef CPhotoconsistencyOdometry< TPixel, TCoordinate > Superclass; typedef typename Superclass::CoordinateType CoordinateType; typedef typename Superclass::IntensityImageType IntensityImageType; typedef typename Superclass::DepthImageType DepthImageType; typedef typename Superclass::Matrix33Type Matrix33Type; typedef typename Superclass::Matrix44Type Matrix44Type; typedef typename Superclass::Vector6Type Vector6Type; typedef typename Superclass::Vector4Type Vector4Type; private: typedef DepthImageType InternalIntensityImageType; typedef std::vector< InternalIntensityImageType > InternalIntensityImageContainerType; typedef std::vector< DepthImageType > DepthImageContainerType; typedef std::vector< CoordinateType > CoordinateContainerType; typedef std::vector< int > IntegerContainerType; /*!Intensity (gray), depth and gradient image pyramids. Each pyramid has 'numOptimizationLevels' levels.*/ InternalIntensityImageContainerType m_IntensityPyramid0; InternalIntensityImageContainerType m_IntensityPyramid1; DepthImageContainerType m_DepthPyramid0; DepthImageContainerType m_DepthPyramid1; InternalIntensityImageContainerType m_IntensityGradientXPyramid1; InternalIntensityImageContainerType m_IntensityGradientYPyramid1; DepthImageContainerType m_DepthGradientXPyramid1; DepthImageContainerType m_DepthGradientYPyramid1; /*!Camera matrix (intrinsic parameters).*/ Matrix33Type m_IntrinsicMatrix; /*!Current optimization level. Level 0 corresponds to the higher image resolution.*/ int m_OptimizationLevel; /*!Number of optimization levels.*/ int m_NumOptimizationLevels; /*!Scaling factor to update the state vector (at each level).*/ CoordinateContainerType m_LambdaOptimizationSteps; /*!Size (in pixels) of the blur filter (at each level).*/ IntegerContainerType m_BlurFilterSizes; /*!Scaling factor applied to the image gradients (at each level).*/ CoordinateContainerType m_ImageGradientsScalingFactors; /*!Maximum number of iterations for the Gauss-Newton algorithm (at each level).*/ IntegerContainerType m_MaxNumIterations; /*!Minimum gradient norm of the jacobian (at each level).*/ CoordinateContainerType m_MinGradientNorms; /*!Enable the visualization of the optimization process (only for debug).*/ bool m_VisualizeIterations; /*!State vector.*/ Vector6Type m_StateVector; //Parameter vector (x y z yaw pitch roll) /*!Gradient of the error function.*/ Vector6Type m_Gradients; /*!Current iteration at the current optimization level.*/ int m_Iteration; /*!Minimum allowed depth to consider a depth pixel valid.*/ CoordinateType m_MinDepth; /*!Maximum allowed depth to consider a depth pixel valid.*/ CoordinateType m_MaxDepth; /*!Depth component gain. This variable is used to scale the depth values so that depth components are similar to intensity values.*/ CoordinateType m_DepthComponentGain; template< class TImage > void BuildPyramid( const TImage & img, std::vector< TImage > & pyramid, const int levels, const bool applyBlur ) { typedef TImage ImageType; //Create space for all the images pyramid.resize( levels ); double factor = 1.; for( int level=0; level<levels; level++ ) { //Create an auxiliar image of factor times the size of the original image ImageType imgAux; if( level!=0 ) { cv::resize( img, imgAux, cv::Size(0,0), factor, factor ); } else { imgAux = img; } //Blur the resized image with different filter size depending on the current pyramid level if( applyBlur ) { int blurFilterSize = m_BlurFilterSizes[level]; #if ENABLE_GAUSSIAN_BLUR if( blurFilterSize>0 ) { cv::GaussianBlur( imgAux, imgAux, cv::Size( blurFilterSize, blurFilterSize ), 3 ); cv::GaussianBlur( imgAux, imgAux, cv::Size( blurFilterSize, blurFilterSize ), 3 ); } #elif ENABLE_BOX_FILTER_BLUR if( blurFilterSize>0 ) { cv::blur( imgAux, imgAux, cv::Size( blurFilterSize, blurFilterSize ) ); cv::blur( imgAux, imgAux, cv::Size( blurFilterSize, blurFilterSize ) ); } #endif } //Assign the resized image to the current level of the pyramid pyramid[level] = imgAux; factor = factor/2; } } void BuildIntensityDerivativesPyramids( InternalIntensityImageContainerType & imagePyramid, InternalIntensityImageContainerType & derXPyramid, InternalIntensityImageContainerType & derYPyramid) { //Compute image gradients double delta = 0.0; int ddepth = m_IntensityPyramid0[0].type(); //Create space for all the derivatives images derXPyramid.resize(imagePyramid.size()); derYPyramid.resize(imagePyramid.size()); for( size_t level=0; level<imagePyramid.size(); level++ ) { // Compute the gradient in x InternalIntensityImageType imgGray1_grad_x; cv::Scharr( imagePyramid[level], derXPyramid[level], ddepth, 1, 0, m_ImageGradientsScalingFactors[level], delta, cv::BORDER_DEFAULT ); // Compute the gradient in y InternalIntensityImageType imgGray1_grad_y; cv::Scharr( imagePyramid[level], derYPyramid[level],ddepth, 0, 1, m_ImageGradientsScalingFactors[level], delta, cv::BORDER_DEFAULT ); } } CoordinateType MaxDepthValue( const DepthImageType & image ) const { CoordinateType maxDepth = 0; for( int r=0; r<image.rows; r++ ) { for( int c=0; c<image.cols; c++ ) { if( image( r, c ) > maxDepth ) { maxDepth = image( r, c ); } } } return maxDepth; } void BuildDepthDerivativesPyramids( DepthImageContainerType & imagePyramid, DepthImageContainerType & derXPyramid, DepthImageContainerType & derYPyramid) { //Compute image gradients double delta = 0.0; int ddepth = m_DepthPyramid0[0].type(); //Create space for all the derivatives images derXPyramid.resize(imagePyramid.size()); derYPyramid.resize(imagePyramid.size()); for( size_t level=0; level<imagePyramid.size(); level++ ) { DepthImageType imgNormalizedDepth; imagePyramid[level].convertTo( imgNormalizedDepth, ddepth, 1./m_MaxDepth ); // Compute the gradient in x cv::Scharr( imgNormalizedDepth, derXPyramid[level], ddepth, 1, 0, m_ImageGradientsScalingFactors[level], delta, cv::BORDER_DEFAULT ); // Compute the gradient in y DepthImageType imgGray1_grad_y; cv::Scharr( imgNormalizedDepth, derYPyramid[level],ddepth, 0, 1, m_ImageGradientsScalingFactors[level], delta, cv::BORDER_DEFAULT ); } } //Separated jacobians void ComputeResidualsAndJacobians( const InternalIntensityImageType & source_grayImg, const DepthImageType & source_depthImg, const InternalIntensityImageType & target_grayImg, const InternalIntensityImageType & target_depthImg, const InternalIntensityImageType & target_intensityGradXImg, const InternalIntensityImageType & target_intensityGradYImg, const DepthImageType & target_depthGradXImg, const DepthImageType & target_depthGradYImg, Numeric::RowDynamicMatrixColMajor< CoordinateType, 1 > & residuals, Numeric::RowDynamicMatrixColMajor< CoordinateType, 6 > & jacobians, InternalIntensityImageType & warped_source_grayImage) { int nRows = source_grayImg.rows; int nCols = source_grayImg.cols; CoordinateType scaleFactor = 1.0/pow(2,m_OptimizationLevel); CoordinateType fx = m_IntrinsicMatrix(0,0)*scaleFactor; CoordinateType fy = m_IntrinsicMatrix(1,1)*scaleFactor; CoordinateType ox = m_IntrinsicMatrix(0,2)*scaleFactor; CoordinateType oy = m_IntrinsicMatrix(1,2)*scaleFactor; CoordinateType inv_fx = 1.f/fx; CoordinateType inv_fy = 1.f/fy; CoordinateType x = m_StateVector(0); CoordinateType y = m_StateVector(1); CoordinateType z = m_StateVector(2); CoordinateType yaw = m_StateVector(3); CoordinateType pitch = m_StateVector(4); CoordinateType roll = m_StateVector(5); //Compute the rigid transformation matrix from the parameters Matrix44Type Rt = Matrix44Type::Identity(); CoordinateType sin_yaw = sin(yaw); CoordinateType cos_yaw = cos(yaw); CoordinateType sin_pitch = sin(pitch); CoordinateType cos_pitch = cos(pitch); CoordinateType sin_roll = sin(roll); CoordinateType cos_roll = cos(roll); Rt(0,0) = cos_yaw * cos_pitch; Rt(0,1) = cos_yaw * sin_pitch * sin_roll - sin_yaw * cos_roll; Rt(0,2) = cos_yaw * sin_pitch * cos_roll + sin_yaw * sin_roll; Rt(0,3) = x; Rt(1,0) = sin_yaw * cos_pitch; Rt(1,1) = sin_yaw * sin_pitch * sin_roll + cos_yaw * cos_roll; Rt(1,2) = sin_yaw * sin_pitch * cos_roll - cos_yaw * sin_roll; Rt(1,3) = y; Rt(2,0) = -sin_pitch; Rt(2,1) = cos_pitch * sin_roll; Rt(2,2) = cos_pitch * cos_roll; Rt(2,3) = z; Rt(3,0) = 0.0; Rt(3,1) = 0.0; Rt(3,2) = 0.0; Rt(3,3) = 1.0; m_DepthComponentGain = cv::mean( target_grayImg ).val[0] / cv::mean( target_depthImg ).val[0]; #if ENABLE_OPENMP_MULTITHREADING #pragma omp parallel for #endif for (int r=0;r<nRows;r++) { for (int c=0;c<nCols;c++) { int i = nCols*r+c; //vector index //Compute the 3D coordinates of the pij of the source frame Vector4Type point3D; point3D(2) = source_depthImg( r, c ); if( m_MinDepth < point3D(2) && point3D(2) < m_MaxDepth) //Compute the jacobian only for the valid points { point3D(0) = (c - ox) * point3D(2) * inv_fx; point3D(1) = (r - oy) * point3D(2) * inv_fy; point3D(3) = 1.0; CoordinateType px = point3D(0); CoordinateType py = point3D(1); CoordinateType pz = point3D(2); //Transform the 3D point using the transformation matrix Rt Vector4Type transformedPoint3D = Rt*point3D; //Project the 3D point to the 2D plane CoordinateType inv_transformedPz = 1.0 / transformedPoint3D(2); CoordinateType transformed_c = (transformedPoint3D(0) * fx) * inv_transformedPz + ox; //transformed x (2D) CoordinateType transformed_r = (transformedPoint3D(1) * fy) * inv_transformedPz + oy; //transformed y (2D) int transformed_r_int = static_cast< int >( round( transformed_r ) ); int transformed_c_int = static_cast< int >( round( transformed_c ) ); //Asign the intensity value to the warped image and compute the difference between the transformed //pixel of frame 1 and the corresponding pixel of frame 2. Compute the error function if( ( transformed_r_int >= 0 && transformed_r_int < nRows ) & ( transformed_c_int >= 0 && transformed_c_int < nCols ) ) { //Obtain the pixel values that will be used to compute the intensity residual CoordinateType intensity1; //Intensity value of the pixel(r,c) of the warped frame 1 CoordinateType intensity2; //Intensity value of the pixel(r,c) of frame 2 intensity1 = source_grayImg( r, c ); intensity2 = target_grayImg( transformed_r_int, transformed_c_int ); //Obtain the depth values that will be used to the compute the depth residual CoordinateType depth1; //Depth value of the pixel(r,c) of the warped frame 1 CoordinateType depth2; //Depth value of the pixel(r,c) of frame 2 depth1 = source_depthImg( r, c ); depth2 = target_depthImg( transformed_r_int, transformed_c_int ); //Compute the rigid transformation jacobian Numeric::FixedMatrixRowMajor< CoordinateType, 3, 6 > jacobianRt; //Derivative with respect to x jacobianRt(0,0) = 1.; jacobianRt(1,0) = 0.; jacobianRt(2,0) = 0.; //Derivative with respect to y jacobianRt(0,1) = 0.; jacobianRt(1,1) = 1.; jacobianRt(2,1) = 0.; //Derivative with respect to z jacobianRt(0,2) = 0.; jacobianRt(1,2) = 0.; jacobianRt(2,2) = 1.; //Derivative with respect to yaw jacobianRt(0,3) = py*(-sin(pitch)*sin(roll)*sin(yaw)-cos(roll)*cos(yaw))+pz*(sin(roll)*cos(yaw)-sin(pitch)*cos(roll)*sin(yaw))-cos(pitch)*px*sin(yaw); jacobianRt(1,3) = pz*(sin(roll)*sin(yaw)+sin(pitch)*cos(roll)*cos(yaw))+py*(sin(pitch)*sin(roll)*cos(yaw)-cos(roll)*sin(yaw))+cos(pitch)*px*cos(yaw); jacobianRt(2,3) = 0.; //Derivative with respect to pitch jacobianRt(0,4) = cos(pitch)*py*sin(roll)*cos(yaw)+cos(pitch)*pz*cos(roll)*cos(yaw)-sin(pitch)*px*cos(yaw); jacobianRt(1,4) = cos(pitch)*py*sin(roll)*sin(yaw)+cos(pitch)*pz*cos(roll)*sin(yaw)-sin(pitch)*px*sin(yaw); jacobianRt(2,4) = -sin(pitch)*py*sin(roll)-sin(pitch)*pz*cos(roll)-cos(pitch)*px; //Derivative with respect to roll jacobianRt(0,5) = py*(sin(roll)*sin(yaw)+sin(pitch)*cos(roll)*cos(yaw))+pz*(cos(roll)*sin(yaw)-sin(pitch)*sin(roll)*cos(yaw)); jacobianRt(1,5) = pz*(-sin(pitch)*sin(roll)*sin(yaw)-cos(roll)*cos(yaw))+py*(sin(pitch)*cos(roll)*sin(yaw)-sin(roll)*cos(yaw)); jacobianRt(2,5) = cos(pitch)*py*cos(roll)-cos(pitch)*pz*sin(roll); //Compute the proyective transformation jacobian Numeric::FixedMatrixRowMajor< CoordinateType, 2, 3 > jacobianProy; //Derivative with respect to x jacobianProy(0,0) = fx*inv_transformedPz; jacobianProy(1,0) = 0.; //Derivative with respect to y jacobianProy(0,1) = 0.; jacobianProy(1,1) = fy*inv_transformedPz; //Derivative with respect to z jacobianProy(0,2) = -(fx*transformedPoint3D(0))*inv_transformedPz*inv_transformedPz; jacobianProy(1,2) = -(fy*transformedPoint3D(1))*inv_transformedPz*inv_transformedPz; //Intensity jacobian: //Apply the chain rule to compound the intensity gradients with the projective+RigidTransform jacobians Numeric::FixedRowVector< CoordinateType, 2 > target_intensityGradient; target_intensityGradient(0,0) = target_intensityGradXImg(i); target_intensityGradient(0,1) = target_intensityGradYImg(i); Numeric::FixedRowVector< CoordinateType, 6 > jacobianItensity = target_intensityGradient*jacobianProy*jacobianRt; //Depth jacobian: //Apply the chain rule to compound the depth gradients with the projective+RigidTransform jacobians Numeric::FixedRowVector< CoordinateType, 2 > target_depthGradient; target_depthGradient(0,0) = target_depthGradXImg(i); target_depthGradient(0,1) = target_depthGradYImg(i); Numeric::FixedRowVector< CoordinateType, 6 > jacobianRt_z; jacobianRt_z(0,0) = jacobianRt(2,0); jacobianRt_z(0,1) = jacobianRt(2,1); jacobianRt_z(0,2) = jacobianRt(2,2); jacobianRt_z(0,3) = jacobianRt(2,3); jacobianRt_z(0,4) = jacobianRt(2,4); jacobianRt_z(0,5) = jacobianRt(2,5); Numeric::FixedRowVector< CoordinateType, 6 > jacobianDepth = m_DepthComponentGain * ( target_depthGradient * jacobianProy * jacobianRt - jacobianRt_z ); //Assign the pixel residual and jacobian to its corresponding row //Assign intensity jacobians jacobians(i,0) = jacobianItensity(0,0); jacobians(i,1) = jacobianItensity(0,1); jacobians(i,2) = jacobianItensity(0,2); jacobians(i,3) = jacobianItensity(0,3); jacobians(i,4) = jacobianItensity(0,4); jacobians(i,5) = jacobianItensity(0,5); //Assign intensity residuals residuals( nCols * transformed_r_int + transformed_c_int , 0 ) = intensity2 - intensity1; //Assign depth jacobians jacobians( 2*i, 0 ) = jacobianDepth(0,0); jacobians( 2*i, 1 ) = jacobianDepth(0,1); jacobians( 2*i, 2 ) = jacobianDepth(0,2); jacobians( 2*i, 3 ) = jacobianDepth(0,3); jacobians( 2*i, 4 ) = jacobianDepth(0,4); jacobians( 2*i, 5 ) = jacobianDepth(0,5); //Assign depth residuals residuals (nCols * 2 * transformed_r_int + 2 * transformed_c_int, 0 ) = m_DepthComponentGain * ( depth2 - depth1 ); if( m_VisualizeIterations ) { warped_source_grayImage( transformed_r_int, transformed_c_int ) = intensity1; } } } } } } enum TerminationCriteriaType { NonTerminated = -1, MaxIterationsReached = 0, GradientNormLowerThanThreshold = 1 }; bool TestTerminationCriteria() const { bool optimizationFinished = false; CoordinateType gradientNorm = m_Gradients.norm(); TerminationCriteriaType terminationCriteria = NonTerminated; if( m_Iteration >= m_MaxNumIterations[ m_OptimizationLevel ] ) { terminationCriteria = MaxIterationsReached; optimizationFinished = true; } else if( gradientNorm < m_MinGradientNorms[ m_OptimizationLevel ] ) { terminationCriteria = GradientNormLowerThanThreshold; optimizationFinished = true; } if( optimizationFinished ) { #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS std::cout << "----------------------------------------" << std::endl; std::cout << "Optimization level: " << m_OptimizationLevel << std::endl; std::cout << "Termination criteria: "; #endif switch( terminationCriteria ) { case MaxIterationsReached: #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS std::cout << " Max number of iterations reached (" << m_MaxNumIterations[ m_OptimizationLevel ] << ")" << std::endl;; #endif break; case GradientNormLowerThanThreshold: #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS std::cout << " Gradient norm is lower than threshold (" << m_MinGradientNorms[ m_OptimizationLevel ] << ")" << std::endl; #endif break; default : break; } #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS std::cout << "Number iterations: " << m_Iteration << std::endl; std::cout << "gradient norm: " << gradientNorm << std::endl; std::cout << "----------------------------------------" << std::endl; #endif } return optimizationFinished; } public: CPhotoconsistencyOdometryBiObjective() : m_MinDepth( 0.3 ), m_MaxDepth( 5.0 ) { m_StateVector.setZero(); m_NumOptimizationLevels = 5; m_BlurFilterSizes.resize( m_NumOptimizationLevels, 0 ); m_ImageGradientsScalingFactors.resize( m_NumOptimizationLevels, 0.0625 ); m_LambdaOptimizationSteps.resize( m_NumOptimizationLevels, 1. ); m_MaxNumIterations.resize( m_NumOptimizationLevels, 0 ); m_MaxNumIterations[ 2 ] = 5; m_MaxNumIterations[ 3 ] = 20; m_MaxNumIterations[ 4 ] = 50; m_MinGradientNorms.resize( m_NumOptimizationLevels, 300. ); m_VisualizeIterations = false; } ~CPhotoconsistencyOdometryBiObjective(){}; /*!Sets the minimum depth distance (m) to consider a certain pixel valid.*/ void SetMinDepth( const CoordinateType minD ) { m_MinDepth = minD; } /*!Sets the maximum depth distance (m) to consider a certain pixel valid.*/ void SetMaxDepth( const CoordinateType maxD ) { m_MaxDepth = maxD; } /*!Sets the 3x3 intrinsic camera matrix*/ void SetIntrinsicMatrix( const Matrix33Type & intrinsicMatrix ) { m_IntrinsicMatrix = intrinsicMatrix; } /*!Sets the source (Intensity+Depth) frame.*/ void SetSourceFrame( const IntensityImageType & intensityImage, const DepthImageType & depthImage ) { //Create an auxialiary image from the imput image InternalIntensityImageType intensityImageAux; intensityImage.convertTo( intensityImageAux, depthImage.type(), 1./255 ); //Compute image pyramids for the grayscale and depth images BuildPyramid( intensityImageAux, m_IntensityPyramid0, m_NumOptimizationLevels, true ); BuildPyramid( depthImage, m_DepthPyramid0, m_NumOptimizationLevels, false ); //TODO: Do not apply low-pass filtering to depth image } /*!Sets the source (Intensity+Depth) frame. Depth image is ignored*/ void SetTargetFrame( const IntensityImageType & intensityImage, const DepthImageType & depthImage ) { //Create an auxialiary image from the imput image InternalIntensityImageType intensityImageAux; intensityImage.convertTo( intensityImageAux, depthImage.type(), 1./255 ); //Compute image pyramids for the grayscale and depth images BuildPyramid( intensityImageAux, m_IntensityPyramid1, m_NumOptimizationLevels, true ); BuildPyramid( depthImage, m_DepthPyramid1, m_NumOptimizationLevels, false ); //TODO: Do not apply low-pass filtering to depth image //Compute image pyramids for the gradients images BuildIntensityDerivativesPyramids( m_IntensityPyramid1, m_IntensityGradientXPyramid1, m_IntensityGradientYPyramid1 ); BuildDepthDerivativesPyramids( m_DepthPyramid1, m_DepthGradientXPyramid1, m_DepthGradientYPyramid1 ); } /*!Initializes the state vector to a certain value. The optimization process uses the initial state vector as the initial estimate.*/ void SetInitialStateVector( const Vector6Type & initialStateVector ) { m_StateVector = initialStateVector; } /*!Launches the least-squares optimization process to find the configuration of the state vector parameters that maximizes the photoconsistency between the source and target frame.*/ void Optimize() { for( m_OptimizationLevel = m_NumOptimizationLevels-1; m_OptimizationLevel >= 0; m_OptimizationLevel-- ) { int nRows = m_IntensityPyramid0[ m_OptimizationLevel ].rows; int nCols = m_IntensityPyramid0[ m_OptimizationLevel ].cols; int nPoints = nRows * nCols; m_Iteration = 0; while(true) { #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS cv::TickMeter tm;tm.start(); #endif InternalIntensityImageType warpedSourceIntensityImage; if( m_VisualizeIterations ) warpedSourceIntensityImage = InternalIntensityImageType::zeros( nRows, nCols ); Numeric::RowDynamicMatrixColMajor< CoordinateType, 1 > residuals; residuals.resize( 2*nPoints, Eigen::NoChange ); residuals.setZero(); Numeric::RowDynamicMatrixColMajor< CoordinateType, 6 > jacobians; jacobians.resize( 2*nPoints, Eigen::NoChange ); jacobians.setZero(); if( m_MaxNumIterations[ m_OptimizationLevel] > 0 ) //compute only if the number of maximum iterations are greater than 0 { ComputeResidualsAndJacobians( m_IntensityPyramid0[ m_OptimizationLevel ], m_DepthPyramid0[ m_OptimizationLevel ], m_IntensityPyramid1[ m_OptimizationLevel ], m_DepthPyramid1[ m_OptimizationLevel ], m_IntensityGradientXPyramid1[ m_OptimizationLevel ], m_IntensityGradientYPyramid1[ m_OptimizationLevel ], m_DepthGradientXPyramid1[ m_OptimizationLevel ], m_DepthGradientYPyramid1[ m_OptimizationLevel ], residuals, jacobians, warpedSourceIntensityImage ); m_Gradients = jacobians.transpose()*residuals; m_StateVector = m_StateVector - m_LambdaOptimizationSteps[ m_OptimizationLevel ] * ((jacobians.transpose()*jacobians).inverse() * m_Gradients ); #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS tm.stop(); std::cout << "Iteration time = " << tm.getTimeSec() << " sec." << std::endl; #endif } m_Iteration++; if( TestTerminationCriteria() ){break;} if( m_VisualizeIterations ) { InternalIntensityImageType imgDiff = InternalIntensityImageType::zeros( nRows, nCols ); cv::absdiff( m_IntensityPyramid1[ m_OptimizationLevel ], warpedSourceIntensityImage, imgDiff ); cv::imshow("optimize::imgDiff",imgDiff); cv::waitKey(0); } } } //After all the optimization process the optimization level is 0 m_OptimizationLevel = 0; } /*!Returns the optimal state vector. This method has to be called after calling the Optimize() method.*/ Vector6Type GetOptimalStateVector() const { return m_StateVector; } /*!Returns the optimal 4x4 rigid transformation matrix between the source and target frame. This method has to be called after calling the Optimize() method.*/ Matrix44Type GetOptimalRigidTransformationMatrix() const { Matrix44Type Rt; eigenPose( m_StateVector(0), m_StateVector(1), m_StateVector(2), m_StateVector(3), m_StateVector(4), m_StateVector(5), Rt ); return Rt; } /*!Reads the configuration parameters from a .yml file.*/ void ReadConfigurationFile( const std::string & fileName ) { cv::FileStorage fs( fileName, cv::FileStorage::READ ); //Read the number of optimization levels fs["numOptimizationLevels"] >> m_NumOptimizationLevels; #if ENABLE_GAUSSIAN_BLUR || ENABLE_BOX_FILTER_BLUR //Read the blur filter size at every pyramid level fs["blurFilterSize (at each level)"] >> m_BlurFilterSizes; #endif //Read the scaling factor for each gradient image at each level fs["imageGradientsScalingFactor (at each level)"] >> m_ImageGradientsScalingFactors; //Read the lambda factor to change the optimization step fs["lambda_optimization_step (at each level)"] >> m_LambdaOptimizationSteps; //Read the number of Levenberg-Marquardt iterations at each optimization level fs["max_num_iterations (at each level)"] >> m_MaxNumIterations; //Read optimizer minimum gradient norm at each level fs["min_gradient_norm (at each level)"] >> m_MinGradientNorms; //Read the boolean value to determine if visualize the progress images or not fs["visualizeIterations"] >> m_VisualizeIterations; } }; } //end namespace Analytic } //end namespace phovo #endif
jacobi-sse.c
#include <immintrin.h> void kernel(double* v1, double * v2, int m) { __m128d alpha = _mm_set1_pd(0.25); // __m128d phi_e = _mm_loadu_pd (v1 + 1 ); __m128d phi_w = _mm_loadu_pd (v1 - 1 ); __m128d phi_n = _mm_loadu_pd (v1 + m); __m128d phi_s = _mm_loadu_pd (v1 - m); // phi_e = _mm_add_pd(phi_e, phi_s); phi_e = _mm_add_pd(phi_e, phi_n); phi_e = _mm_add_pd(phi_e, phi_w); phi_e = _mm_mul_pd(alpha, phi_e); // _mm_storeu_pd(v2, phi_e); } void laplacian(double* v1, double* v2, int dim_m, int dim_n) { int m = dim_m; // #pragma omp parallel for //schedule(static) for (int j = 1; j < dim_m - 1; ++j ) { for (int i = 1; i < dim_n - 1 - (dim_n - 1)%2; i = i + 2) { kernel(v1 + j*dim_m + i, v2 + j*dim_m + i, dim_m); } } }
ast-dump-openmp-task.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -fopenmp-version=50 -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s typedef unsigned long omp_event_handle_t; void test() { omp_event_handle_t evt; #pragma omp task detach(evt) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <line:4:1, line:8:1> line:4:6 test 'void ()' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:8:1> // CHECK: `-OMPTaskDirective {{.*}} <line:6:1, col:29> // CHECK-NEXT: |-OMPDetachClause {{.+}} <col:18, col:28> // CHECK-NEXT: | `-DeclRefExpr {{.+}} <col:25> 'omp_event_handle_t':'unsigned long' lvalue Var {{.+}} 'evt' 'omp_event_handle_t':'unsigned long' // CHECK-NEXT: |-OMPFirstprivateClause {{.+}} <<invalid sloc>> <implicit> // CHECK-NEXT: | `-DeclRefExpr {{.+}} <col:25> 'omp_event_handle_t':'unsigned long' lvalue Var {{.+}} 'evt' 'omp_event_handle_t':'unsigned long' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:7:3> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-NullStmt {{.*}} <col:3> // CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:6:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-task.c:6:1) *const restrict'
libomp_interface.h
// This file does not contain any code; it just contains additional text and formatting // for doxygen. //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /*! @mainpage LLVM&nbsp; OpenMP* Runtime Library Interface @section sec_intro Introduction This document describes the interface provided by the LLVM &nbsp;OpenMP\other runtime library to the compiler. Routines that are directly called as simple functions by user code are not currently described here, since their definition is in the OpenMP specification available from http://openmp.org The aim here is to explain the interface from the compiler to the runtime. The overall design is described, and each function in the interface has its own description. (At least, that's the ambition, we may not be there yet). @section sec_building Quickly Building the Runtime For the impatient, we cover building the runtime as the first topic here. CMake is used to build the OpenMP runtime. For details and a full list of options for the CMake build system, see <tt>README.rst</tt> in the source code repository. These instructions will provide the most typical build. In-LLVM-tree build:. @code $ cd where-you-want-to-live Check out openmp into llvm/projects $ cd where-you-want-to-build $ mkdir build && cd build $ cmake path/to/llvm -DCMAKE_C_COMPILER=<C compiler> -DCMAKE_CXX_COMPILER=<C++ compiler> $ make omp @endcode Out-of-LLVM-tree build: @code $ cd where-you-want-to-live Check out openmp $ cd where-you-want-to-live/openmp $ mkdir build && cd build $ cmake path/to/openmp -DCMAKE_C_COMPILER=<C compiler> -DCMAKE_CXX_COMPILER=<C++ compiler> $ make @endcode @section sec_supported Supported RTL Build Configurations The architectures supported are IA-32 architecture, Intel&reg;&nbsp; 64, and Intel&reg;&nbsp; Many Integrated Core Architecture. The build configurations supported are shown in the table below. <table border=1> <tr><th> <th>icc/icl<th>gcc<th>clang <tr><td>Linux\other OS<td>Yes(1,5)<td>Yes(2,4)<td>Yes(4,6,7) <tr><td>FreeBSD\other<td>Yes(1,5)<td>Yes(2,4)<td>Yes(4,6,7,8) <tr><td>OS X\other<td>Yes(1,3,4)<td>No<td>Yes(4,6,7) <tr><td>Windows\other OS<td>Yes(1,4)<td>No<td>No </table> (1) On IA-32 architecture and Intel&reg;&nbsp; 64, icc/icl versions 12.x are supported (12.1 is recommended).<br> (2) gcc version 4.7 is supported.<br> (3) For icc on OS X\other, OS X\other version 10.5.8 is supported.<br> (4) Intel&reg;&nbsp; Many Integrated Core Architecture not supported.<br> (5) On Intel&reg;&nbsp; Many Integrated Core Architecture, icc/icl versions 13.0 or later are required.<br> (6) Clang\other version 3.3 is supported.<br> (7) Clang\other currently does not offer a software-implemented 128 bit extended precision type. Thus, all entry points reliant on this type are removed from the library and cannot be called in the user program. The following functions are not available: @code __kmpc_atomic_cmplx16_* __kmpc_atomic_float16_* __kmpc_atomic_*_fp @endcode (8) Community contribution provided AS IS, not tested by Intel. Supported Architectures: IBM(R) Power 7 and Power 8 <table border=1> <tr><th> <th>gcc<th>clang <tr><td>Linux\other OS<td>Yes(1,2)<td>Yes(3,4) </table> (1) On Power 7, gcc version 4.8.2 is supported.<br> (2) On Power 8, gcc version 4.8.2 is supported.<br> (3) On Power 7, clang version 3.7 is supported.<br> (4) On Power 8, clang version 3.7 is supported.<br> @section sec_frontend Front-end Compilers that work with this RTL The following compilers are known to do compatible code generation for this RTL: icc/icl, gcc. Code generation is discussed in more detail later in this document. @section sec_outlining Outlining The runtime interface is based on the idea that the compiler "outlines" sections of code that are to run in parallel into separate functions that can then be invoked in multiple threads. For instance, simple code like this @code void foo() { #pragma omp parallel { ... do something ... } } @endcode is converted into something that looks conceptually like this (where the names used are merely illustrative; the real library function names will be used later after we've discussed some more issues...) @code static void outlinedFooBody() { ... do something ... } void foo() { __OMP_runtime_fork(outlinedFooBody, (void*)0); // Not the real function name! } @endcode @subsection SEC_SHAREDVARS Addressing shared variables In real uses of the OpenMP\other API there are normally references from the outlined code to shared variables that are in scope in the containing function. Therefore the containing function must be able to address these variables. The runtime supports two alternate ways of doing this. @subsubsection SEC_SEC_OT Current Technique The technique currently supported by the runtime library is to receive a separate pointer to each shared variable that can be accessed from the outlined function. This is what is shown in the example below. We hope soon to provide an alternative interface to support the alternate implementation described in the next section. The alternative implementation has performance advantages for small parallel regions that have many shared variables. @subsubsection SEC_SEC_PT Future Technique The idea is to treat the outlined function as though it were a lexically nested function, and pass it a single argument which is the pointer to the parent's stack frame. Provided that the compiler knows the layout of the parent frame when it is generating the outlined function it can then access the up-level variables at appropriate offsets from the parent frame. This is a classical compiler technique from the 1960s to support languages like Algol (and its descendants) that support lexically nested functions. The main benefit of this technique is that there is no code required at the fork point to marshal the arguments to the outlined function. Since the runtime knows statically how many arguments must be passed to the outlined function, it can easily copy them to the thread's stack frame. Therefore the performance of the fork code is independent of the number of shared variables that are accessed by the outlined function. If it is hard to determine the stack layout of the parent while generating the outlined code, it is still possible to use this approach by collecting all of the variables in the parent that are accessed from outlined functions into a single `struct` which is placed on the stack, and whose address is passed to the outlined functions. In this way the offsets of the shared variables are known (since they are inside the struct) without needing to know the complete layout of the parent stack-frame. From the point of view of the runtime either of these techniques is equivalent, since in either case it only has to pass a single argument to the outlined function to allow it to access shared variables. A scheme like this is how gcc\other generates outlined functions. @section SEC_INTERFACES Library Interfaces The library functions used for specific parts of the OpenMP\other language implementation are documented in different modules. - @ref BASIC_TYPES fundamental types used by the runtime in many places - @ref DEPRECATED functions that are in the library but are no longer required - @ref STARTUP_SHUTDOWN functions for initializing and finalizing the runtime - @ref PARALLEL functions for implementing `omp parallel` - @ref THREAD_STATES functions for supporting thread state inquiries - @ref WORK_SHARING functions for work sharing constructs such as `omp for`, `omp sections` - @ref THREADPRIVATE functions to support thread private data, copyin etc - @ref SYNCHRONIZATION functions to support `omp critical`, `omp barrier`, `omp master`, reductions etc - @ref ATOMIC_OPS functions to support atomic operations - @ref STATS_GATHERING macros to support developer profiling of libomp - Documentation on tasking has still to be written... @section SEC_EXAMPLES Examples @subsection SEC_WORKSHARING_EXAMPLE Work Sharing Example This example shows the code generated for a parallel for with reduction and dynamic scheduling. @code extern float foo( void ); int main () { int i; float r = 0.0; #pragma omp parallel for schedule(dynamic) reduction(+:r) for ( i = 0; i < 10; i ++ ) { r += foo(); } } @endcode The transformed code looks like this. @code extern float foo( void ); int main () { static int zero = 0; auto int gtid; auto float r = 0.0; __kmpc_begin( & loc3, 0 ); // The gtid is not actually required in this example so could be omitted; // We show its initialization here because it is often required for calls into // the runtime and should be locally cached like this. gtid = __kmpc_global thread num( & loc3 ); __kmpc_fork call( & loc7, 1, main_7_parallel_3, & r ); __kmpc_end( & loc0 ); return 0; } struct main_10_reduction_t_5 { float r_10_rpr; }; static kmp_critical_name lck = { 0 }; static ident_t loc10; // loc10.flags should contain KMP_IDENT_ATOMIC_REDUCE bit set // if compiler has generated an atomic reduction. void main_7_parallel_3( int *gtid, int *btid, float *r_7_shp ) { auto int i_7_pr; auto int lower, upper, liter, incr; auto struct main_10_reduction_t_5 reduce; reduce.r_10_rpr = 0.F; liter = 0; __kmpc_dispatch_init_4( & loc7,*gtid, 35, 0, 9, 1, 1 ); while ( __kmpc_dispatch_next_4( & loc7, *gtid, & liter, & lower, & upper, & incr ) ) { for( i_7_pr = lower; upper >= i_7_pr; i_7_pr ++ ) reduce.r_10_rpr += foo(); } switch( __kmpc_reduce_nowait( & loc10, *gtid, 1, 4, & reduce, main_10_reduce_5, & lck ) ) { case 1: *r_7_shp += reduce.r_10_rpr; __kmpc_end_reduce_nowait( & loc10, *gtid, & lck ); break; case 2: __kmpc_atomic_float4_add( & loc10, *gtid, r_7_shp, reduce.r_10_rpr ); break; default:; } } void main_10_reduce_5( struct main_10_reduction_t_5 *reduce_lhs, struct main_10_reduction_t_5 *reduce_rhs ) { reduce_lhs->r_10_rpr += reduce_rhs->r_10_rpr; } @endcode @defgroup BASIC_TYPES Basic Types Types that are used throughout the runtime. @defgroup DEPRECATED Deprecated Functions Functions in this group are for backwards compatibility only, and should not be used in new code. @defgroup STARTUP_SHUTDOWN Startup and Shutdown These functions are for library initialization and shutdown. @defgroup PARALLEL Parallel (fork/join) These functions are used for implementing <tt>\#pragma omp parallel</tt>. @defgroup THREAD_STATES Thread Information These functions return information about the currently executing thread. @defgroup WORK_SHARING Work Sharing These functions are used for implementing <tt>\#pragma omp for</tt>, <tt>\#pragma omp sections</tt>, <tt>\#pragma omp single</tt> and <tt>\#pragma omp master</tt> constructs. When handling loops, there are different functions for each of the signed and unsigned 32 and 64 bit integer types which have the name suffixes `_4`, `_4u`, `_8` and `_8u`. The semantics of each of the functions is the same, so they are only described once. Static loop scheduling is handled by @ref __kmpc_for_static_init_4 and friends. Only a single call is needed, since the iterations to be executed by any give thread can be determined as soon as the loop parameters are known. Dynamic scheduling is handled by the @ref __kmpc_dispatch_init_4 and @ref __kmpc_dispatch_next_4 functions. The init function is called once in each thread outside the loop, while the next function is called each time that the previous chunk of work has been exhausted. @defgroup SYNCHRONIZATION Synchronization These functions are used for implementing barriers. @defgroup THREADPRIVATE Thread private data support These functions support copyin/out and thread private data. @defgroup STATS_GATHERING Statistics Gathering from OMPTB These macros support profiling the libomp library. Use --stats=on when building with build.pl to enable and then use the KMP_* macros to profile (through counts or clock ticks) libomp during execution of an OpenMP program. @section sec_stats_env_vars Environment Variables This section describes the environment variables relevant to stats-gathering in libomp @code KMP_STATS_FILE @endcode This environment variable is set to an output filename that will be appended *NOT OVERWRITTEN* if it exists. If this environment variable is undefined, the statistics will be output to stderr @code KMP_STATS_THREADS @endcode This environment variable indicates to print thread-specific statistics as well as aggregate statistics. Each thread's statistics will be shown as well as the collective sum of all threads. The values "true", "on", "1", "yes" will all indicate to print per thread statistics. @defgroup TASKING Tasking support These functions support tasking constructs. @defgroup USER User visible functions These functions can be called directly by the user, but are runtime library specific, rather than being OpenMP interfaces. */
omp-axpygpu.c
// // omp-axpy.c // // // Created by Yaying Shi on 10/2/19. // /* Manual implementation of the following AXPY OpenMP offloading version using LLVM OpenMP runtime #include "omp-axpy.h" void axpy(int N, float *Y, float *X, float a) { int i,j; #pragma omp target map(to:X[0:N]) map(tofrom:Y[0:N]) #pragma omp parallel for for (i = 0; i < N; ++i){ Y[i] += a * X[i]; printf("this a tset: %f %f\n",X[i],Y[i]); } } int main(int argc, char*argv[]){ int N = 100; float Y[N], X[N]; float x = 5.0; for (int i = 0; i <N; i++){ Y[i] = (((float)rand()/(float)(10)) * x); X[i] = (((float)rand()/(float)(10)) * x); printf("this is Y: %f\n",Y[i]); } float a = 0.5; axpy(N,&Y[0],&X[0],a); return 0; } */ #include "omp-axpycpu.h" //extern void __kmpc_fork_call(ident_t *, kmp_int32, kmpc_micro, ...); //extern void __kmpc_for_static_init_4(ident_t *, kmp_int32,kmp_int32,kmp_int32 *,kmp_int32 *,kmp_int32 *,kmp_int32 *,kmp_int32,kmp_int32); //extern void __kmpc_for_static_fini(ident_t *, kmp_int32); //extern void __kmpc_global_thread_num(ident_t *); struct __tgt_offload_entry { void *addr; char *name; size_t size; int32_t flags; int32_t reserved; }; struct __tgt_device_image { void *ImageStart; void *ImageEnd; __tgt_offload_entry *EntriesBegin; __tgt_offload_entry *EntriesEnd; }; struct __tgt_bin_desc { int32_t NumDeviceImages; __tgt_device_image *DeviceImages; __tgt_offload_entry *HostEntriesBegin; __tgt_offload_entry *HostEntriesEnd; }; void __kmp_axpy_microtask(int *gtid, int *btid, int N, float *a, float *Y, float *X){ __attribute__((visibility("hidden"))) __tgt_offload_entry *__start_omp_offloading_entries; __attribute__((visibility("hidden"))) __tgt_offload_entry *__stop_omp_offloading_entries; const char imag[] = ; static const __tgt_device_image Images[] = { { Image0, /*ImageStart*/ Image0 + sizeof(Image0), /*ImageEnd*/ __start_omp_offloading_entries, /*EntriesBegin*/ __stop_omp_offloading_entries /*EntriesEnd*/ } static const __tgt_bin_desc BinDesc = { sizeof(Images) / sizeof(Images[0]), /*NumDeviceImages*/ Images, /*DeviceImages*/ __start_omp_offloading_entries, /*HostEntriesBegin*/ __stop_omp_offloading_entries /*HostEntriesEnd*/ } auto int last,upper,lower,inc; //last = N; //lower = 0; //upper = N; //inc = 1; __kmpc_for_static_init_4(NULL, *gtid, 33, &last,&lower,&upper,&inc,1,1); for (int i = 0; i < N; ++i){ Y[i] += (*a) * X[i]; printf("this a tset: %f %f\n",X[i],Y[i]); } __kmpc_for_static_fini(NULL,*gtid); } int main(int argc, char*argv[]){ int N = 100; float Y[N], X[N]; float x = 5.0; for (int i = 0; i <N; i++){ //Y[i] = (((float)rand()/(float)(10)) * x); //X[i] = (((float)rand()/(float)(10)) * x); Y[i]=1.0; X[i]=1.0; printf("this is Y: %f\n",Y[i]); } float a = 0.5; auto int gtid; __kmpc_begin(NULL, 0); gtid = __kmpc_global_thread_num(NULL); __kmpc_fork_call(NULL, 4, __kmp_axpy_microtask,N,&a,&Y[0],&X[0]); __kmpc_end(NULL); return 0; }
laplace2d.c
/* * Copyright 2012 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <math.h> #include <string.h> #include "timer.h" #include <stdio.h> #ifdef _OPENACCM #include <openacc.h> #endif #ifndef VERIFICATION #define VERIFICATION 0 #endif #ifndef NN #define NN 1024 #endif #ifdef _OPENARC_ #if NN == 1024 #pragma openarc #define NN 1024 #elif NN == 2048 #pragma openarc #define NN 2048 #elif NN == 3072 #pragma openarc #define NN 3072 #elif NN == 3072 #pragma openarc #define NN 3072 #elif NN == 4096 #pragma openarc #define NN 4096 #endif #endif #if VERIFICATION == 1 double A_CPU[NN][NN]; double Anew_CPU[NN][NN]; #endif int main(int argc, char** argv) { int n = NN; int m = n; int iter_max = 10; double tol = 1.0e-6; double error = 1.0; int i, j; int iter = 0; double runtime; double (*A)[NN] = (double (*)[NN])malloc(sizeof(double)*n*n); double (*Anew)[NN] = (double (*)[NN])malloc(sizeof(double)*n*n); memset(A, 0, n * m * sizeof(double)); memset(Anew, 0, n * m * sizeof(double)); #if VERIFICATION == 1 memset(A_CPU, 0, n * m * sizeof(double)); memset(Anew_CPU, 0, n * m * sizeof(double)); #endif for (j = 0; j < n; j++) { A[j][0] = 1.0; Anew[j][0] = 1.0; #if VERIFICATION == 1 A_CPU[j][0] = 1.0; Anew_CPU[j][0] = 1.0; #endif } printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m); StartTimer(); #pragma aspen enter modelregion #ifdef _OPENACCM acc_init(acc_device_default); #endif //aspen_param_whilecnt = 1000 for NN = NN = 4096 //aspen_param_whilecnt = 1000 for NN = NN = 8192 #pragma aspen declare param(aspen_param_whilecnt:10) #pragma aspen control loop(aspen_param_whilecnt) #pragma acc data copy(A[0:n][0:n]), create(Anew[0:n][0:n]) while ( error > tol && iter < iter_max ) { error = 0.0; //#pragma omp parallel for shared(m, n, Anew, A) #pragma acc parallel num_gangs(16) num_workers(32) reduction(max:error) private(j) { double lerror = 0.0; #pragma acc loop gang for( j = 1; j < n-1; j++) { #pragma acc loop worker reduction(max:lerror) for( i = 1; i < m-1; i++ ) { Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]); lerror = fmax( lerror, fabs(Anew[j][i] - A[j][i])); } //[DEBUG] intentionally ignore to flatten nested map constructs. #pragma aspen control ignore error = fmax(error, lerror); } } //#pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels loop gang for( j = 1; j < n-1; j++) { #pragma acc loop worker for( i = 1; i < m-1; i++ ) { A[j][i] = Anew[j][i]; } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } #ifdef _OPENACCM acc_shutdown(acc_device_default); #endif #pragma aspen exit modelregion printf("iter: %d\n", iter); runtime = GetTimer(); printf("Accelerator Elapsed time %f s\n", runtime / 1000); #if VERIFICATION == 1 { StartTimer(); error = 1.0; iter = 0; while ( error > tol && iter < iter_max ) { error = 0.0; { #pragma omp parallel for private(j, i) for( j = 1; j < n-1; j++) { double lerror = 0.0; for( i = 1; i < m-1; i++ ) { Anew_CPU[j][i] = 0.25 * ( A_CPU[j][i+1] + A_CPU[j][i-1] + A_CPU[j-1][i] + A_CPU[j+1][i]); lerror = fmax( lerror, fabs(Anew_CPU[j][i] - A_CPU[j][i])); } #pragma omp critical error = fmax(error,lerror); } } #pragma omp parallel for private(j, i) for( j = 1; j < n-1; j++) { for( i = 1; i < m-1; i++ ) { A_CPU[j][i] = Anew_CPU[j][i]; } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } runtime = GetTimer(); printf("CPU Elapsed time %f s\n", runtime / 1000); { double cpu_sum = 0.0f; double gpu_sum = 0.0f; double rel_err = 0.0f; for (i = 1; i < m-1; i++) { cpu_sum += A_CPU[i][i]*A_CPU[i][i]; gpu_sum += A[i][i]*A[i][i]; } cpu_sum = sqrt(cpu_sum); gpu_sum = sqrt(gpu_sum); rel_err = (cpu_sum-gpu_sum)/cpu_sum; if(rel_err < 1e-6) { printf("Verification Successful err = %e\n", rel_err); } else { printf("Verification Fail err = %e\n", rel_err); } } } #endif }
FastTree.c
/* * FastTree -- inferring approximately-maximum-likelihood trees for large * multiple sequence alignments. * * Morgan N. Price * http://www.microbesonline.org/fasttree/ * * Thanks to Jim Hester of the Cleveland Clinic Foundation for * providing the first parallel (OpenMP) code, Siavash Mirarab of * UT Austin for implementing the WAG option, Samuel Shepard * at the CDC for suggesting and helping with the -quote option, and * Aaron Darling (University of Technology, Sydney) for numerical changes * for wide alignments of closely-related sequences. * * Copyright (C) 2008-2015 The Regents of the University of California * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * or visit http://www.gnu.org/copyleft/gpl.html * * Disclaimer * * NEITHER THE UNITED STATES NOR THE UNITED STATES DEPARTMENT OF ENERGY, * NOR ANY OF THEIR EMPLOYEES, MAKES ANY WARRANTY, EXPRESS OR IMPLIED, * OR ASSUMES ANY LEGAL LIABILITY OR RESPONSIBILITY FOR THE ACCURACY, * COMPLETENESS, OR USEFULNESS OF ANY INFORMATION, APPARATUS, PRODUCT, * OR PROCESS DISCLOSED, OR REPRESENTS THAT ITS USE WOULD NOT INFRINGE * PRIVATELY OWNED RIGHTS. */ /* * To compile FastTree, do: * gcc -Wall -O3 -finline-functions -funroll-loops -o FastTree -lm FastTree.c * Use -DNO_SSE to turn off use of SSE3 instructions * (should not be necessary because compiler should not set __SSE__ if * not available, and modern mallocs should return 16-byte-aligned values) * Use -DOPENMP -fopenmp to use multiple threads (note, old versions of gcc * may not support -fopenmp) * Use -DTRACK_MEMORY if you want detailed reports of memory usage, * but results are not correct above 4GB because mallinfo stores int values. * It also makes FastTree run significantly slower. * * To get usage guidance, do: * FastTree -help * * FastTree uses profiles instead of a distance matrix, and computes * support values for each split from the profiles of the 4 nodes * around the split. It stores a profile for each node and a average * profile over all active nodes (the "out-profile" for computing the * total sum of distance to other nodes). The neighbor joining phase * requires O(N*L*a) space, where N is the number of sequences, L is * the alignment width, and a is the alphabet size. The top-hits * heuristic requires an additional O(N sqrt(N)) memory. After * neighbor-joining, FastTree improves the topology with * nearest-neighbor interchanges (NNIs) and subtree-prune-regraft * moves (SPRs), which does not have a significant additional memory * requirement. (We need only store "up-profiles" on the path from our * current traversal point to the root.) These take O(NLa) time per * round, and with default settings, O(N log(N) L a) time total. * FastTree further improves the topology with maximum-likelihood * NNIs, using similar data structures and complexity, but with a * higher constant factor, and now the "profiles" are actually * posterior distributions for that subtree. Finally, FastTree * resamples the site likelihoods around each NNI and uses * the Shimodaira Hasegawa test to estimate the reliability of each split. * * Overview of the neighbor-joining phase: * * Although FastTree uses a log correction on profile distances to * account for multiple substitutions when doing NNIs and SPRs, the * operations on the profiles themselves involve "additive" distances * -- either %different (for nucleotide) or by using an amino acid * similarity matrix (for proteins). If we are using %different as * our distance matrix then * * Profile_distance(A,B) = 1 - sum over characters of freq(A)*freq(B) * * and we can average this value over positions. Positions with gaps * are weighted by %ungapped(A) * %ungapped(B). * * If we are using an amino acid dissimilarity matrix D(i,j) then at * each position * * Profile_distance(A,B) = sum(i,j) freq(A==i) * freq(B==j) * D(i,j) * = sum(k) Ak * Bk * Lambda(k) * * where k iterates over 20 eigenvectors, Lambda(k) is the eigenvalue, * and if A==i, then Ak is the kth column of the inverse of the * eigenvector matrix. * * The exhaustive approach (-slow) takes O(N**3*L*a) time, but * this can be reduced to as little as O(N**(3/2)*log(N)*L*a) time * by using heuristics. * * It uses a combination of three heuristics: a visible set similar to * that of FastTree (Elias & Lagergren 2005), a local hill-climbing * search for a better join (as in relaxed neighbor-joining, Evans et * al. 2006), and a top-hit list to reduce the search space (see * below). * * The "visible" set stores, for each node, the best join for that * node, as identified at some point in the past * * If top-hits are not being used, then the neighbor-joining phase can * be summarized as: * * Compute the out-profile by averaging the leaves * Compute the out-distance of each leaf quickly, using the out-profile * Compute the visible set (or approximate it using top-hits, see below) * Until we're down to 3 active nodes: * Find the best join in the visible set * (This involves recomputing the neighbor-joining criterion, * as out-distances and #active nodes may have changed) * Follow a chain of best hits (again recomputing the criterion) * until we find a locally best join, as in relaxed neighbor joining * Create a profile of the parent node, either using simple averages (default) * or using weighted joining as in BIONJ (if -bionj was specified) * Update the out-profile and the out-distances * Update the visible set: * find the best join for the new joined node * replace hits to the joined children with hits to the parent * if we stumble across a join for the new node that is better * than the corresponding entry in the visible set, "reset" * that entry. * * For each iteration, this method does * O(N) work to find the best hit in the visible set * O(L*N*a*log(N)) work to do the local search, where log(N) * is a pessimistic estimate of the number of iterations. In * practice, we average <1 iteration for 2,000 sequences. * With -fastest, this step is omitted. * O(N*a) work to compute the joined profile and update the out-profile * O(L*N*a) work to update the out-distances * O(L*N*a) work to compare the joined profile to the other nodes * (to find the new entry in the visible set) * * and there are N-3 iterations, so it takes O(N**2 * L * log(N) * a) time. * * The profile distances give exactly the same result as matrix * distances in neighbor-joining or BIONJ would if there are no gaps * in the alignment. If there are gaps, then it is an * approximation. To get the same result we also store a "diameter" * for each node (diameter is 0 for leaves). * * In the simpler case (NJ rather than BIONJ), when we join A and B to * give a new node AB, * * Profile(AB) = (A+B)/2 * Profile_distance(AB,C) = (Profile_distance(A,C)+Profile_distance(B,C))/2 * because the formulas above are linear * * And according to the neighor-joining rule, * d(AB,C) = (d(A,C)+d(B,C)-d(A,B))/2 * * and we can achieve the same value by writing * diameter(AB) = pd(A,B)/2 * diameter(leaf) = 0 * d(A,B) = pd(A,B) - diameter(A) - diameter(B) * * because * d(AB,C) = (d(A,C)+d(B,C)-d(A,B))/2 * = (pd(A,C)-diam(A)-diam(C)+pd(B,C)-diam(B)-diam(C)-d(A,B)+diam(A)+diam(B))/2 * = (pd(A,C)+pd(B,C))/2 - diam(C) - pd(A,B) * = pd(AB,C) - diam(AB) - diam(C) * * If we are using BIONJ, with weight lambda for the join: * Profile(AB) = lambda*A + (1-lambda)*B * then a similar argument gives * diam(AB) = lambda*diam(A) + (1-lambda)*diam(B) + lambda*d(A,AB) + (1-lambda)*d(B,AB), * * where, as in neighbor joining, * d(A,AB) = d(A,B) + (total out_distance(A) - total out_distance(B))/(n-2) * * A similar recursion formula works for the "variance" matrix of BIONJ, * var(AB,C) = lambda*var(A,C) + (1-lambda)*var(B,C) - lambda*(1-lambda)*var(A,B) * is equivalent to * var(A,B) = pv(A,B) - vd(A) - vd(B), where * pv(A,B) = pd(A,B) * vd(A) = 0 for leaves * vd(AB) = lambda*vd(A) + (1-lambda)*vd(B) + lambda*(1-lambda)*var(A,B) * * The top-hist heuristic to reduce the work below O(N**2*L) stores a top-hit * list of size m=sqrt(N) for each active node. * * The list can be initialized for all the leaves in sub (N**2 * L) time as follows: * Pick a "seed" sequence and compare it to all others * Store the top m hits of the seed as its top-hit list * Take "close" hits of the seed(within the top m, and see the "close" parameter), * and assume that their top m hits lie within the top 2*m hits of the seed. * So, compare them to the seed's neighors (if they do not already * have a top hit list) and set their top hits. * * This method does O(N*L) work for each seed, or O(N**(3/2)*L) work total. * * To avoid doing O(N*L) work at each iteration, we need to avoid * updating the visible set and the out-distances. So, we use "stale" * out-distances, and when searching the visible set for the best hit, * we only inspect the top m=sqrt(N) entries. We then update those * out-distances (up to 2*m*L*a work) and then find the best hit. * * To avoid searching the entire visible set, FastTree keeps * and updates a list of the top sqrt(N) entries in the visible set. * This costs O(sqrt(N)) time per join to find the best entry and to * update, or (N sqrt(N)) time overall. * * Similarly, when doing the local hill-climbing, we avoid O(N*L) work * by only considering the top-hits for the current node. So this adds * O(m*a*log(N)) work per iteration. * * When we join two nodes, we compute profiles and update the * out-profile as before. We need to compute the best hits of the node * -- we merge the lists for the children and select the best up-to-m * hits. If the top hit list contains a stale node we replace it with * its parent. If we still have <m/2 entries, we do a "refresh". * * In a "refresh", similar to the fast top-hit computation above, we * compare the "seed", in this case the new joined node, to all other * nodes. We compare its close neighbors (the top m hits) to all * neighbors (the top 2*m hits) and update the top-hit lists of all * neighbors (by merging to give a list of 3*m entries and then * selecting the best m entries). * * Finally, during these processes we update the visible sets for * other nodes with better hits if we find them, and we set the * visible entry for the new joined node to the best entry in its * top-hit list. (And whenever we update a visible entry, we * do O(sqrt(N)) work to update the top-visible list.) * These udpates are not common so they do not alter the * O(N sqrt(N) log(N) L a) total running time for the joining phase. * * Second-level top hits * * With -fastest or with -2nd, FastTree uses an additional "2nd-level" top hits * heuristic to reduce the running time for the top-hits phase to * O(N**1.25 L) and for the neighbor-joining phase to O(N**1.25 L a). * This also reduces the memory usage for the top-hits lists to * O(N**1.25), which is important for alignments with a million * sequences. The key idea is to store just q = sqrt(m) top hits for * most sequences. * * Given the neighbors of A -- either for a seed or for a neighbor * from the top-hits heuristic, if B is within the top q hits of A, we * set top-hits(B) from the top 3*q top-hits of A. And, we record that * A is the "source" of the hits for B, so if we run low on hits for * B, instead of doing a full refresh, we can do top-hits(B) := * top-hits(B) union top-hits(active_ancestor(A)). * During a refresh, these "2nd-level" top hits are updated just as * normal, but the source is maintained and only q entries are stored, * until we near the end of the neighbor joining phase (until the * root as 2*m children or less). * * Parallel execution with OpenMP * * If you compile FastTree with OpenMP support, it will take * advantage of multiple CPUs on one machine. It will parallelize: * * The top hits phase * Comparing one node to many others during the NJ phase (the simplest kind of join) * The refresh phase * Optimizing likelihoods for 3 alternate topologies during ML NNIs and ML supports * (only 3 threads can be used) * * This accounts for most of the O(N L a) or slower steps except for * minimum-evolution NNIs (which are fast anyway), minimum-evolution SPRs, * selecting per-site rates, and optimizing branch lengths outside of ML NNIs. * * Parallelizing the top hits phase may lead to a slight change in the tree, * as some top hits are computed from different (and potentially less optimal source). * This means that results on repeated runs may not be 100% identical. * However, this should not have any significant effect on tree quality * after the NNIs and SPRs. * * The OpenMP code also turns off the star-topology test during ML * NNIs, which may lead to slight improvements in likelihood. */ #include <stdio.h> #include <stdbool.h> #include <string.h> #include <assert.h> #include <math.h> #include <stdlib.h> #include <sys/time.h> #include <ctype.h> #include <unistd.h> #ifdef TRACK_MEMORY /* malloc.h apparently doesn't exist on MacOS */ #include <malloc.h> #endif /* Compile with -DOPENMP to turn on multithreading */ #ifdef OPENMP #include <omp.h> #endif /* By default, tries to compile with SSE instructions for greater speed. But if compiled with -DUSE_DOUBLE, uses double precision instead of single-precision floating point (2x memory required), does not use SSE, and allows much shorter branch lengths. */ #ifdef __SSE__ #if !defined(NO_SSE) && !defined(USE_DOUBLE) #define USE_SSE3 #endif #endif #ifdef USE_DOUBLE #define SSE_STRING "Double precision (No SSE3)" typedef double numeric_t; #define ScanNumericSpec "%lf" #else typedef float numeric_t; #define ScanNumericSpec "%f" #endif #ifdef USE_SSE3 #define SSE_STRING "SSE3" #define ALIGNED __attribute__((aligned(16))) #define IS_ALIGNED(X) ((((unsigned long) new) & 15L) == 0L) #include <xmmintrin.h> #else #define ALIGNED #define IS_ALIGNED(X) 1 #ifndef USE_DOUBLE #define SSE_STRING "No SSE3" #endif #endif /* USE_SSE3 */ #define FT_VERSION "2.1.11" char *usage = " FastTree protein_alignment > tree\n" " FastTree < protein_alignment > tree\n" " FastTree -out tree protein_alignment\n" " FastTree -nt nucleotide_alignment > tree\n" " FastTree -nt -gtr < nucleotide_alignment > tree\n" " FastTree < nucleotide_alignment > tree\n" "FastTree accepts alignments in fasta or phylip interleaved formats\n" "\n" "Common options (must be before the alignment file):\n" " -quiet to suppress reporting information\n" " -nopr to suppress progress indicator\n" " -log logfile -- save intermediate trees, settings, and model details\n" " -fastest -- speed up the neighbor joining phase & reduce memory usage\n" " (recommended for >50,000 sequences)\n" " -n <number> to analyze multiple alignments (phylip format only)\n" " (use for global bootstrap, with seqboot and CompareToBootstrap.pl)\n" " -nosupport to not compute support values\n" " -intree newick_file to set the starting tree(s)\n" " -intree1 newick_file to use this starting tree for all the alignments\n" " (for faster global bootstrap on huge alignments)\n" " -pseudo to use pseudocounts (recommended for highly gapped sequences)\n" " -gtr -- generalized time-reversible model (nucleotide alignments only)\n" " -lg -- Le-Gascuel 2008 model (amino acid alignments only)\n" " -wag -- Whelan-And-Goldman 2001 model (amino acid alignments only)\n" " -quote -- allow spaces and other restricted characters (but not ' ) in\n" " sequence names and quote names in the output tree (fasta input only;\n" " FastTree will not be able to read these trees back in)\n" " -noml to turn off maximum-likelihood\n" " -nome to turn off minimum-evolution NNIs and SPRs\n" " (recommended if running additional ML NNIs with -intree)\n" " -nome -mllen with -intree to optimize branch lengths for a fixed topology\n" " -cat # to specify the number of rate categories of sites (default 20)\n" " or -nocat to use constant rates\n" " -gamma -- after optimizing the tree under the CAT approximation,\n" " rescale the lengths to optimize the Gamma20 likelihood\n" " -constraints constraintAlignment to constrain the topology search\n" " constraintAlignment should have 1s or 0s to indicates splits\n" " -expert -- see more options\n" "For more information, see http://www.microbesonline.org/fasttree/\n"; char *expertUsage = "FastTree [-nt] [-n 100] [-quote] [-pseudo | -pseudo 1.0]\n" " [-boot 1000 | -nosupport]\n" " [-intree starting_trees_file | -intree1 starting_tree_file]\n" " [-quiet | -nopr]\n" " [-nni 10] [-spr 2] [-noml | -mllen | -mlnni 10]\n" " [-mlacc 2] [-cat 20 | -nocat] [-gamma]\n" " [-slow | -fastest] [-2nd | -no2nd] [-slownni] [-seed 1253] \n" " [-top | -notop] [-topm 1.0 [-close 0.75] [-refresh 0.8]]\n" " [-gtr] [-gtrrates ac ag at cg ct gt] [-gtrfreq A C G T]\n" " [ -lg | -wag | -trans transitionmatrixfile ]\n" " [-matrix Matrix | -nomatrix] [-nj | -bionj]\n" " [ -constraints constraintAlignment [ -constraintWeight 100.0 ] ]\n" " [-log logfile]\n" " [ alignment_file ]\n" " [ -out output_newick_file | > newick_tree]\n" "\n" "or\n" "\n" "FastTree [-nt] [-matrix Matrix | -nomatrix] [-rawdist] -makematrix [alignment]\n" " [-n 100] > phylip_distance_matrix\n" "\n" " FastTree supports fasta or phylip interleaved alignments\n" " By default FastTree expects protein alignments, use -nt for nucleotides\n" " FastTree reads standard input if no alignment file is given\n" "\n" "Input/output options:\n" " -n -- read in multiple alignments in. This only\n" " works with phylip interleaved format. For example, you can\n" " use it with the output from phylip's seqboot. If you use -n, FastTree\n" " will write 1 tree per line to standard output.\n" " -intree newickfile -- read the starting tree in from newickfile.\n" " Any branch lengths in the starting trees are ignored.\n" " -intree with -n will read a separate starting tree for each alignment.\n" " -intree1 newickfile -- read the same starting tree for each alignment\n" " -quiet -- do not write to standard error during normal operation (no progress\n" " indicator, no options summary, no likelihood values, etc.)\n" " -nopr -- do not write the progress indicator to stderr\n" " -log logfile -- save intermediate trees so you can extract\n" " the trees and restart long-running jobs if they crash\n" " -log also reports the per-site rates (1 means slowest category)\n" " -quote -- quote sequence names in the output and allow spaces, commas,\n" " parentheses, and colons in them but not ' characters (fasta files only)\n" "\n" "Distances:\n" " Default: For protein sequences, log-corrected distances and an\n" " amino acid dissimilarity matrix derived from BLOSUM45\n" " or for nucleotide sequences, Jukes-Cantor distances\n" " To specify a different matrix, use -matrix FilePrefix or -nomatrix\n" " Use -rawdist to turn the log-correction off\n" " or to use %different instead of Jukes-Cantor\n" " (These options affect minimum-evolution computations only;\n" " use -trans to affect maximum-likelihoood computations)\n" "\n" " -pseudo [weight] -- Use pseudocounts to estimate distances between\n" " sequences with little or no overlap. (Off by default.) Recommended\n" " if analyzing the alignment has sequences with little or no overlap.\n" " If the weight is not specified, it is 1.0\n" "\n" "Topology refinement:\n" " By default, FastTree tries to improve the tree with up to 4*log2(N)\n" " rounds of minimum-evolution nearest-neighbor interchanges (NNI),\n" " where N is the number of unique sequences, 2 rounds of\n" " subtree-prune-regraft (SPR) moves (also min. evo.), and\n" " up to 2*log(N) rounds of maximum-likelihood NNIs.\n" " Use -nni to set the number of rounds of min. evo. NNIs,\n" " and -spr to set the rounds of SPRs.\n" " Use -noml to turn off both min-evo NNIs and SPRs (useful if refining\n" " an approximately maximum-likelihood tree with further NNIs)\n" " Use -sprlength set the maximum length of a SPR move (default 10)\n" " Use -mlnni to set the number of rounds of maximum-likelihood NNIs\n" " Use -mlacc 2 or -mlacc 3 to always optimize all 5 branches at each NNI,\n" " and to optimize all 5 branches in 2 or 3 rounds\n" " Use -mllen to optimize branch lengths without ML NNIs\n" " Use -mllen -nome with -intree to optimize branch lengths on a fixed topology\n" " Use -slownni to turn off heuristics to avoid constant subtrees (affects both\n" " ML and ME NNIs)\n" "\n" "Maximum likelihood model options:\n" " -lg -- Le-Gascuel 2008 model instead of (default) Jones-Taylor-Thorton 1992 model (a.a. only)\n" " -wag -- Whelan-And-Goldman 2001 model instead of (default) Jones-Taylor-Thorton 1992 model (a.a. only)\n" " -gtr -- generalized time-reversible instead of (default) Jukes-Cantor (nt only)\n" " -cat # -- specify the number of rate categories of sites (default 20)\n" " -nocat -- no CAT model (just 1 category)\n" " - trans filename -- use the transition matrix from filename\n" " This is supported for amino acid alignments only\n" " The file must be tab-delimited with columns in the order ARNDCQEGHILKMFPSTWYV*\n" " The additional column named * is for the stationary distribution\n" " Each row must have a row name in the same order ARNDCQEGHILKMFPSTWYV\n" " -gamma -- after the final round of optimizing branch lengths with the CAT model,\n" " report the likelihood under the discrete gamma model with the same\n" " number of categories. FastTree uses the same branch lengths but\n" " optimizes the gamma shape parameter and the scale of the lengths.\n" " The final tree will have rescaled lengths. Used with -log, this\n" " also generates per-site likelihoods for use with CONSEL, see\n" " GammaLogToPaup.pl and documentation on the FastTree web site.\n" "\n" "Support value options:\n" " By default, FastTree computes local support values by resampling the site\n" " likelihoods 1,000 times and the Shimodaira Hasegawa test. If you specify -nome,\n" " it will compute minimum-evolution bootstrap supports instead\n" " In either case, the support values are proportions ranging from 0 to 1\n" "\n" " Use -nosupport to turn off support values or -boot 100 to use just 100 resamples\n" " Use -seed to initialize the random number generator\n" "\n" "Searching for the best join:\n" " By default, FastTree combines the 'visible set' of fast neighbor-joining with\n" " local hill-climbing as in relaxed neighbor-joining\n" " -slow -- exhaustive search (like NJ or BIONJ, but different gap handling)\n" " -slow takes half an hour instead of 8 seconds for 1,250 proteins\n" " -fastest -- search the visible set (the top hit for each node) only\n" " Unlike the original fast neighbor-joining, -fastest updates visible(C)\n" " after joining A and B if join(AB,C) is better than join(C,visible(C))\n" " -fastest also updates out-distances in a very lazy way,\n" " -fastest sets -2nd on as well, use -fastest -no2nd to avoid this\n" "\n" "Top-hit heuristics:\n" " By default, FastTree uses a top-hit list to speed up search\n" " Use -notop (or -slow) to turn this feature off\n" " and compare all leaves to each other,\n" " and all new joined nodes to each other\n" " -topm 1.0 -- set the top-hit list size to parameter*sqrt(N)\n" " FastTree estimates the top m hits of a leaf from the\n" " top 2*m hits of a 'close' neighbor, where close is\n" " defined as d(seed,close) < 0.75 * d(seed, hit of rank 2*m),\n" " and updates the top-hits as joins proceed\n" " -close 0.75 -- modify the close heuristic, lower is more conservative\n" " -refresh 0.8 -- compare a joined node to all other nodes if its\n" " top-hit list is less than 80% of the desired length,\n" " or if the age of the top-hit list is log2(m) or greater\n" " -2nd or -no2nd to turn 2nd-level top hits heuristic on or off\n" " This reduces memory usage and running time but may lead to\n" " marginal reductions in tree quality.\n" " (By default, -fastest turns on -2nd.)\n" "\n" "Join options:\n" " -nj: regular (unweighted) neighbor-joining (default)\n" " -bionj: weighted joins as in BIONJ\n" " FastTree will also weight joins during NNIs\n" "\n" "Constrained topology search options:\n" " -constraints alignmentfile -- an alignment with values of 0, 1, and -\n" " Not all sequences need be present. A column of 0s and 1s defines a\n" " constrained split. Some constraints may be violated\n" " (see 'violating constraints:' in standard error).\n" " -constraintWeight -- how strongly to weight the constraints. A value of 1\n" " means a penalty of 1 in tree length for violating a constraint\n" " Default: 100.0\n" "\n" "For more information, see http://www.microbesonline.org/fasttree/\n" " or the comments in the source code\n"; ; #define MAXCODES 20 #define NOCODE 127 /* Note -- sequence lines longer than BUFFER_SIZE are allowed, but FASTA header lines must be within this limit */ #define BUFFER_SIZE 5000 #define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) #define MAX(X,Y) ((X) > (Y) ? (X) : (Y)) typedef struct { int nPos; int nSeq; char **names; char **seqs; int nSaved; /* actual allocated size of names and seqs */ } alignment_t; /* For each position in a profile, we have a weight (% non-gapped) and a frequency vector. (If using a matrix, the frequency vector is in eigenspace). We also store codes for simple profile positions (all gaps or only 1 value) If weight[pos] > 0 && codes[pos] == NOCODE then we store the vector vectors itself is sets of nCodes long, so the vector for the ith nonconstant position starts at &vectors[nCodes*i] To speed up comparison of outprofile to a sequence or other simple profile, we also (for outprofiles) store codeDist[iPos*nCodes+k] = dist(k,profile[iPos]) For constraints, we store a vector of nOn and nOff If not using constraints, those will be NULL */ typedef struct { /* alignment profile */ numeric_t *weights; unsigned char *codes; numeric_t *vectors; /* NULL if no non-constant positions, e.g. for leaves */ int nVectors; numeric_t *codeDist; /* Optional -- distance to each code at each position */ /* constraint profile */ int *nOn; int *nOff; } profile_t; /* A visible node is a pair of nodes i, j such that j is the best hit of i, using the neighbor-joining criterion, at the time the comparison was made, or approximately so since then. Note that variance = dist because in BIONJ, constant factors of variance do not matter, and because we weight ungapped sequences higher naturally when averaging profiles, so we do not take this into account in the computation of "lambda" for BIONJ. For the top-hit list heuristic, if the top hit list becomes "too short", we store invalid entries with i=j=-1 and dist/criterion very high. */ typedef struct { int i, j; numeric_t weight; /* Total product of weights (maximum value is nPos) This is needed for weighted joins and for pseudocounts, but not in most other places. For example, it is not maintained by the top hits code */ numeric_t dist; /* The uncorrected distance (includes diameter correction) */ numeric_t criterion; /* changes when we update the out-profile or change nActive */ } besthit_t; typedef struct { int nChild; int child[3]; } children_t; typedef struct { /* Distances between amino acids */ numeric_t distances[MAXCODES][MAXCODES]; /* Inverse of the eigenvalue matrix, for rotating a frequency vector into eigenspace so that profile similarity computations are O(alphabet) not O(alphabet*alphabet) time. */ numeric_t eigeninv[MAXCODES][MAXCODES]; numeric_t eigenval[MAXCODES]; /* eigenvalues */ /* eigentot=eigeninv times the all-1s frequency vector useful for normalizing rotated frequency vectors */ numeric_t eigentot[MAXCODES]; /* codeFreq is the transpose of the eigeninv matrix is the rotated frequency vector for each code */ numeric_t codeFreq[MAXCODES][MAXCODES]; numeric_t gapFreq[MAXCODES]; } distance_matrix_t; /* A transition matrix gives the instantaneous rate of change of frequencies df/dt = M . f which is solved by f(t) = exp(M) . f(0) and which is not a symmetric matrix because of non-uniform stationary frequencies stat, so that M stat = 0 M(i,j) is instantaneous rate of j -> i, not of i -> j S = diag(sqrt(stat)) is a correction so that M' = S**-1 M S is symmetric Let W L W**-1 = M' be an eigendecomposition of M' Because M' is symmetric, W can be a rotation, and W**-1 = t(W) Set V = S*W M = V L V**-1 is an eigendecomposition of M Note V**-1 = W**-1 S**-1 = t(W) S**-1 Evolution by time t is given by exp(M*t) = V exp(L*t) V**-1 P(A & B | t) = B . exp(M*t) . (A * stat) note this is *not* the same as P(A->B | t) and we can reduce some of the computations from O(a**2) to O(a) time, where a is the alphabet size, by storing frequency vectors as t(V) . f = t(W) . t(S) . f Then P(f0 & f1 | t) = f1 . exp(M*t) . f0 * (f0 . stat) = sum(r0j * r1j * exp(l_j*t)) where r0 and r1 are the transformed vectors Posterior distribution of P given children f0 and f1 is given by P(i | f0, f1, t0, t1) = stat * P(i->f0 | t0) * P(i->f1 | t1) = P(i & f0 | t0) * P(i & f1 | t1) / stat ~ (V . exp(t0*L) . r0) * (V . exp(t1*L) . r1) / stat When normalize this posterior distribution (to sum to 1), divide by stat, and transform by t(V) -- this is the "profile" of internal nodes To eliminate the O(N**2) step of transforming by t(V), if the posterior distribution of an amino acid is near 1 then we can approximate it by P(i) ~= (i==A) * w + nearP(i) * (1-w), where w is fit so that P(i==A) is correct nearP = Posterior(i | i, i, 0.1, 0.1) [0.1 is an arbitrary choice] and we confirm that the approximation works well before we use it. Given this parameter w we can set rotated_posterior = rotation(w * (i==A)/stat + (1-w) * nearP/stat) = codeFreq(A) * w/stat(A) + nearFreq(A) * (1-w) */ typedef struct { numeric_t stat[MAXCODES]; /* The stationary distribution */ numeric_t statinv[MAXCODES]; /* 1/stat */ /* the eigenmatrix, with the eigenvectors as columns and rotations of individual characters as rows. Also includes a NOCODE entry for gaps */ numeric_t codeFreq[NOCODE+1][MAXCODES]; numeric_t eigeninv[MAXCODES][MAXCODES]; /* Inverse of eigenmatrix */ numeric_t eigeninvT[MAXCODES][MAXCODES]; /* transpose of eigeninv */ numeric_t eigenval[MAXCODES]; /* Eigenvalues */ /* These are for approximate posteriors (off by default) */ numeric_t nearP[MAXCODES][MAXCODES]; /* nearP[i][j] = P(parent=j | both children are i, both lengths are 0.1 */ numeric_t nearFreq[MAXCODES][MAXCODES]; /* rotation of nearP/stat */ } transition_matrix_t; typedef struct { int nRateCategories; numeric_t *rates; /* 1 per rate category */ unsigned int *ratecat; /* 1 category per position */ } rates_t; typedef struct { /* The input */ int nSeq; int nPos; char **seqs; /* the aligment sequences array (not reallocated) */ distance_matrix_t *distance_matrix; /* a pointer (not reallocated), or NULL if using %identity distance */ transition_matrix_t *transmat; /* a pointer (is allocated), or NULL for Jukes-Cantor */ /* Topological constraints are represented for each sequence as binary characters with values of '0', '1', or '-' (for missing data) Sequences that have no constraint may have a NULL string */ int nConstraints; char **constraintSeqs; /* The profile data structures */ int maxnode; /* The next index to allocate */ int maxnodes; /* Space allocated in data structures below */ profile_t **profiles; /* Profiles of leaves and intermediate nodes */ numeric_t *diameter; /* To correct for distance "up" from children (if any) */ numeric_t *varDiameter; /* To correct variances for distance "up" */ numeric_t *selfdist; /* Saved for use in some formulas */ numeric_t *selfweight; /* Saved for use in some formulas */ /* Average profile of all active nodes, the "outprofile" * If all inputs are ungapped, this has weight 1 (not nSequences) at each position * The frequencies all sum to one (or that is implied by the eigen-representation) */ profile_t *outprofile; double totdiam; /* We sometimes use stale out-distances, so we remember what nActive was */ numeric_t *outDistances; /* Sum of distances to other active (parent==-1) nodes */ int *nOutDistActive; /* What nActive was when this outDistance was computed */ /* the inferred tree */ int root; /* index of the root. Unlike other internal nodes, it has 3 children */ int *parent; /* -1 or index of parent */ children_t *child; numeric_t *branchlength; /* Distance to parent */ numeric_t *support; /* 1 for high-confidence nodes */ /* auxilliary data for maximum likelihood (defaults to 1 category of rate=1.0) */ rates_t rates; } NJ_t; /* Uniquify sequences in an alignment -- map from indices in the alignment to unique indicies in a NJ_t */ typedef struct { int nSeq; int nUnique; int *uniqueFirst; /* iUnique -> iAln */ int *alnNext; /* iAln -> next, or -1 */ int *alnToUniq; /* iAln -> iUnique, or -1 if another was the exemplar */ char **uniqueSeq; /* indexed by iUniq -- points to strings allocated elsewhere */ } uniquify_t; /* Describes which switch to do */ typedef enum {ABvsCD,ACvsBD,ADvsBC} nni_t; /* A list of these describes a chain of NNI moves in a rooted tree, making up, in total, an SPR move */ typedef struct { int nodes[2]; double deltaLength; /* change in tree length for this step (lower is better) */ } spr_step_t; /* Keep track of hits for the top-hits heuristic without wasting memory j = -1 means empty If j is an inactive node, this may be replaced by that node's parent (and dist recomputed) */ typedef struct { int j; numeric_t dist; } hit_t; typedef struct { int nHits; /* the allocated and desired size; some of them may be empty */ hit_t *hits; int hitSource; /* where to refresh hits from if a 2nd-level top-hit list, or -1 */ int age; /* number of joins since a refresh */ } top_hits_list_t; typedef struct { int m; /* size of a full top hits list, usually sqrt(N) */ int q; /* size of a 2nd-level top hits, usually sqrt(m) */ int maxnodes; top_hits_list_t *top_hits_lists; /* one per node */ hit_t *visible; /* the "visible" (very best) hit for each node */ /* The top-visible set is a subset, usually of size m, of the visible set -- it is the set of joins to select from Each entry is either a node whose visible set entry has a good (low) criterion, or -1 for empty, or is an obsolete node (which is effectively the same). Whenever we update the visible set, should also call UpdateTopVisible() which ensures that none of the topvisible set are stale (that is, they all point to an active node). */ int nTopVisible; /* nTopVisible = m * topvisibleMult */ int *topvisible; int topvisibleAge; /* joins since the top-visible list was recomputed */ #ifdef OPENMP /* 1 lock to read or write any top hits list, no thread grabs more than one */ omp_lock_t *locks; #endif } top_hits_t; /* Global variables */ /* Options */ int verbose = 1; int showProgress = 1; int slow = 0; int fastest = 0; bool useTopHits2nd = false; /* use the second-level top hits heuristic? */ int bionj = 0; double tophitsMult = 1.0; /* 0 means compare nodes to all other nodes */ double tophitsClose = -1.0; /* Parameter for how close is close; also used as a coverage req. */ double topvisibleMult = 1.5; /* nTopVisible = m * topvisibleMult; 1 or 2 did not make much difference in either running time or accuracy so I chose a compromise. */ double tophitsRefresh = 0.8; /* Refresh if fraction of top-hit-length drops to this */ double tophits2Mult = 1.0; /* Second-level top heuristic -- only with -fastest */ int tophits2Safety = 3; /* Safety factor for second level of top-hits heuristic */ double tophits2Refresh = 0.6; /* Refresh 2nd-level top hits if drops down to this fraction of length */ double staleOutLimit = 0.01; /* nActive changes by at most this amount before we recompute an out-distance. (Only applies if using the top-hits heuristic) */ double fResetOutProfile = 0.02; /* Recompute out profile from scratch if nActive has changed by more than this proportion, and */ int nResetOutProfile = 200; /* nActive has also changed more than this amount */ int nCodes=20; /* 20 if protein, 4 if nucleotide */ bool useMatrix=true; /* If false, use %different as the uncorrected distance */ bool logdist = true; /* If true, do a log-correction (scoredist-like or Jukes-Cantor) but only during NNIs and support values, not during neighbor-joining */ double pseudoWeight = 0.0; /* The weight of pseudocounts to avoid artificial long branches when nearby sequences in the tree have little or no overlap (off by default). The prior distance is based on all overlapping positions among the quartet or triplet under consideration. The log correction takes place after the pseudocount is used. */ double constraintWeight = 100.0;/* Cost of violation of a topological constraint in evolutionary distance or likelihood */ double MEMinDelta = 1.0e-4; /* Changes of less than this in tree-length are discounted for purposes of identifying fixed subtrees */ bool fastNNI = true; bool gammaLogLk = false; /* compute gamma likelihood without reoptimizing branch lengths? */ /* Maximum likelihood options and constants */ /* These are used to rescale likelihood values and avoid taking a logarithm at each position */ const double LkUnderflow = 1.0e-4; const double LkUnderflowInv = 1.0e4; const double LogLkUnderflow = 9.21034037197618; /* -log(LkUnderflowInv) */ const double Log2 = 0.693147180559945; /* These are used to limit the optimization of branch lengths. Also very short branch lengths can create numerical problems. In version 2.1.7, the minimum branch lengths (MLMinBranchLength and MLMinRelBranchLength) were increased to prevent numerical problems in rare cases. In version 2.1.8, to provide useful branch lengths for genome-wide alignments, the minimum branch lengths were dramatically decreased if USE_DOUBLE is defined. */ #ifndef USE_DOUBLE const double MLMinBranchLengthTolerance = 1.0e-4; /* absolute tolerance for optimizing branch lengths */ const double MLFTolBranchLength = 0.001; /* fractional tolerance for optimizing branch lengths */ const double MLMinBranchLength = 5.0e-4; /* minimum value for branch length */ const double MLMinRelBranchLength = 2.5e-4; /* minimum of rate * length */ const double fPostTotalTolerance = 1.0e-10; /* posterior vector must sum to at least this before rescaling */ #else const double MLMinBranchLengthTolerance = 1.0e-9; const double MLFTolBranchLength = 0.001; const double MLMinBranchLength = 5.0e-9; const double MLMinRelBranchLength = 2.5e-9; const double fPostTotalTolerance = 1.0e-20; #endif int mlAccuracy = 1; /* Rounds of optimization of branch lengths; 1 means do 2nd round only if close */ double closeLogLkLimit = 5.0; /* If partial optimization of an NNI looks like it would decrease the log likelihood by this much or more then do not optimize it further */ double treeLogLkDelta = 0.1; /* Give up if tree log-lk changes by less than this; NNIs that change likelihood by less than this also are considered unimportant by some heuristics */ bool exactML = true; /* Exact or approximate posterior distributions for a.a.s */ double approxMLminf = 0.95; /* Only try to approximate posterior distributions if max. value is at least this high */ double approxMLminratio = 2/3.0;/* Ratio of approximated/true posterior values must be at least this high */ double approxMLnearT = 0.2; /* 2nd component of near-constant posterior distribution uses this time scale */ const int nDefaultRateCats = 20; /* Performance and memory usage */ long profileOps = 0; /* Full profile-based distance operations */ long outprofileOps = 0; /* How many of profileOps are comparisons to outprofile */ long seqOps = 0; /* Faster leaf-based distance operations */ long profileAvgOps = 0; /* Number of profile-average steps */ long nHillBetter = 0; /* Number of hill-climbing steps */ long nCloseUsed = 0; /* Number of "close" neighbors we avoid full search for */ long nClose2Used = 0; /* Number of "close" neighbors we use 2nd-level top hits for */ long nRefreshTopHits = 0; /* Number of full-blown searches (interior nodes) */ long nVisibleUpdate = 0; /* Number of updates of the visible set */ long nNNI = 0; /* Number of NNI changes performed */ long nSPR = 0; /* Number of SPR changes performed */ long nML_NNI = 0; /* Number of max-lik. NNI changes performed */ long nSuboptimalSplits = 0; /* # of splits that are rejected given final tree (during bootstrap) */ long nSuboptimalConstrained = 0; /* Bad splits that are due to constraints */ long nConstraintViolations = 0; /* Number of constraint violations */ long nProfileFreqAlloc = 0; long nProfileFreqAvoid = 0; long szAllAlloc = 0; long mymallocUsed = 0; /* useful allocations by mymalloc */ long maxmallocHeap = 0; /* Maximum of mi.arena+mi.hblkhd from mallinfo (actual mem usage) */ long nLkCompute = 0; /* # of likelihood computations for pairs of probability vectors */ long nPosteriorCompute = 0; /* # of computations of posterior probabilities */ long nAAPosteriorExact = 0; /* # of times compute exact AA posterior */ long nAAPosteriorRough = 0; /* # of times use rough approximation */ long nStarTests = 0; /* # of times we use star test to avoid testing an NNI */ /* Protein character set */ unsigned char *codesStringAA = (unsigned char*) "ARNDCQEGHILKMFPSTWYV"; unsigned char *codesStringNT = (unsigned char*) "ACGT"; unsigned char *codesString = NULL; distance_matrix_t *ReadDistanceMatrix(char *prefix); void SetupDistanceMatrix(/*IN/OUT*/distance_matrix_t *); /* set eigentot, codeFreq, gapFreq */ void ReadMatrix(char *filename, /*OUT*/numeric_t codes[MAXCODES][MAXCODES], bool check_codes); void ReadVector(char *filename, /*OUT*/numeric_t codes[MAXCODES]); alignment_t *ReadAlignment(/*READ*/FILE *fp, bool bQuote); /* Returns a list of strings (exits on failure) */ alignment_t *FreeAlignment(alignment_t *); /* returns NULL */ void FreeAlignmentSeqs(/*IN/OUT*/alignment_t *); /* Takes as input the transpose of the matrix V, with i -> j This routine takes care of setting the diagonals */ transition_matrix_t *CreateTransitionMatrix(/*IN*/double matrix[MAXCODES][MAXCODES], /*IN*/double stat[MAXCODES]); transition_matrix_t *CreateGTR(double *gtrrates/*ac,ag,at,cg,ct,gt*/, double *gtrfreq/*ACGT*/); transition_matrix_t *ReadAATransitionMatrix(/*IN*/char *filename); /* For converting profiles from 1 rotation to another, or converts NULL to NULL */ distance_matrix_t *TransMatToDistanceMat(transition_matrix_t *transmat); /* Allocates memory, initializes leaf profiles */ NJ_t *InitNJ(char **sequences, int nSeqs, int nPos, /*IN OPTIONAL*/char **constraintSeqs, int nConstraints, /*IN OPTIONAL*/distance_matrix_t *, /*IN OPTIONAL*/transition_matrix_t *); NJ_t *FreeNJ(NJ_t *NJ); /* returns NULL */ void FastNJ(/*IN/OUT*/NJ_t *NJ); /* Does the joins */ void ReliabilityNJ(/*IN/OUT*/NJ_t *NJ, int nBootstrap); /* Estimates the reliability of the joins */ /* nni_stats_t is meaningless for leaves and root, so all of those entries will just be high (for age) or 0 (for delta) */ typedef struct { int age; /* number of rounds since this node was modified by an NNI */ int subtreeAge; /* number of rounds since self or descendent had a significant improvement */ double delta; /* improvement in score for this node (or 0 if no change) */ double support; /* improvement of score for self over better of alternatives */ } nni_stats_t; /* One round of nearest-neighbor interchanges according to the minimum-evolution or approximate maximum-likelihood criterion. If doing maximum likelihood then this modifies the branch lengths. age is the # of rounds since a node was NNId Returns the # of topological changes performed */ int NNI(/*IN/OUT*/NJ_t *NJ, int iRound, int nRounds, bool useML, /*IN/OUT*/nni_stats_t *stats, /*OUT*/double *maxDeltaCriterion); nni_stats_t *InitNNIStats(NJ_t *NJ); nni_stats_t *FreeNNIStats(nni_stats_t *, NJ_t *NJ); /* returns NULL */ /* One round of subtree-prune-regraft moves (minimum evolution) */ void SPR(/*IN/OUT*/NJ_t *NJ, int maxSPRLength, int iRound, int nRounds); /* Recomputes all branch lengths by minimum evolution criterion*/ void UpdateBranchLengths(/*IN/OUT*/NJ_t *NJ); /* Recomputes all branch lengths and, optionally, internal profiles */ double TreeLength(/*IN/OUT*/NJ_t *NJ, bool recomputeProfiles); typedef struct { int nBadSplits; int nConstraintViolations; int nBadBoth; int nSplits; /* How much length would be reduce or likelihood would be increased by the best NNI we find (the worst "miss") */ double dWorstDeltaUnconstrained; double dWorstDeltaConstrained; } SplitCount_t; void TestSplitsMinEvo(NJ_t *NJ, /*OUT*/SplitCount_t *splitcount); /* Sets SH-like support values if nBootstrap>0 */ void TestSplitsML(/*IN/OUT*/NJ_t *NJ, /*OUT*/SplitCount_t *splitcount, int nBootstrap); /* Pick columns for resampling, stored as returned_vector[iBoot*nPos + j] */ int *ResampleColumns(int nPos, int nBootstrap); /* Use out-profile and NJ->totdiam to recompute out-distance for node iNode Only does this computation if the out-distance is "stale" (nOutDistActive[iNode] != nActive) Note "IN/UPDATE" for NJ always means that we may update out-distances but otherwise make no changes. */ void SetOutDistance(/*IN/UPDATE*/NJ_t *NJ, int iNode, int nActive); /* Always sets join->criterion; may update NJ->outDistance and NJ->nOutDistActive, assumes join's weight and distance are already set, and that the constraint penalty (if any) is included in the distance */ void SetCriterion(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *join); /* Computes weight and distance (which includes the constraint penalty) and then sets the criterion (maybe update out-distances) */ void SetDistCriterion(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *join); /* If join->i or join->j are inactive nodes, replaces them with their active ancestors. After doing this, if i == j, or either is -1, sets weight to 0 and dist and criterion to 1e20 and returns false (not a valid join) Otherwise, if i or j changed, recomputes the distance and criterion. Note that if i and j are unchanged then the criterion could be stale If bUpdateDist is false, and i or j change, then it just sets dist to a negative number */ bool UpdateBestHit(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *join, bool bUpdateDist); /* This recomputes the criterion, or returns false if the visible node is no longer active. */ bool GetVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits, int iNode, /*OUT*/besthit_t *visible); int ActiveAncestor(/*IN*/NJ_t *NJ, int node); /* Compute the constraint penalty for a join. This is added to the "distance" by SetCriterion */ int JoinConstraintPenalty(/*IN*/NJ_t *NJ, int node1, int node2); int JoinConstraintPenaltyPiece(NJ_t *NJ, int node1, int node2, int iConstraint); /* Helper function for computing the number of constraints violated by a split, represented as counts of on and off on each side */ int SplitConstraintPenalty(int nOn1, int nOff1, int nOn2, int nOff2); /* Reports the (min. evo.) support for the (1,2) vs. (3,4) split col[iBoot*nPos+j] is column j for bootstrap iBoot */ double SplitSupport(profile_t *p1, profile_t *p2, profile_t *p3, profile_t *p4, /*OPTIONAL*/distance_matrix_t *dmat, int nPos, int nBootstrap, int *col); /* Returns SH-like support given resampling spec. (in col) and site likelihods for the three quartets */ double SHSupport(int nPos, int nBoostrap, int *col, double loglk[3], double *site_likelihoods[3]); profile_t *SeqToProfile(/*IN/OUT*/NJ_t *NJ, char *seq, int nPos, /*OPTIONAL*/char *constraintSeqs, int nConstraints, int iNode, unsigned long counts[256]); /* ProfileDist and SeqDist only set the dist and weight fields If using an outprofile, use the second argument of ProfileDist for better performance. These produce uncorrected distances. */ void ProfileDist(profile_t *profile1, profile_t *profile2, int nPos, /*OPTIONAL*/distance_matrix_t *distance_matrix, /*OUT*/besthit_t *hit); void SeqDist(unsigned char *codes1, unsigned char *codes2, int nPos, /*OPTIONAL*/distance_matrix_t *distance_matrix, /*OUT*/besthit_t *hit); /* Computes all pairs of profile distances, applies pseudocounts if pseudoWeight > 0, and applies log-correction if logdist is true. The lower index is compared to the higher index, e.g. for profiles A,B,C,D the comparison will be as in quartet_pair_t */ typedef enum {qAB,qAC,qAD,qBC,qBD,qCD} quartet_pair_t; void CorrectedPairDistances(profile_t **profiles, int nProfiles, /*OPTIONAL*/distance_matrix_t *distance_matrix, int nPos, /*OUT*/double *distances); /* output is indexed by nni_t To ensure good behavior while evaluating a subtree-prune-regraft move as a series of nearest-neighbor interchanges, this uses a distance-ish model of constraints, as given by PairConstraintDistance(), rather than counting the number of violated splits (which is what FastTree does during neighbor-joining). Thus, penalty values may well be >0 even if no constraints are violated, but the relative scores for the three NNIs will be correct. */ void QuartetConstraintPenalties(profile_t *profiles[4], int nConstraints, /*OUT*/double d[3]); double PairConstraintDistance(int nOn1, int nOff1, int nOn2, int nOff2); /* the split is consistent with the constraint if any of the profiles have no data or if three of the profiles have the same uniform value (all on or all off) or if AB|CD = 00|11 or 11|00 (all uniform) */ bool SplitViolatesConstraint(profile_t *profiles[4], int iConstraint); /* If false, no values were set because this constraint was not relevant. output is for the 3 splits */ bool QuartetConstraintPenaltiesPiece(profile_t *profiles[4], int iConstraint, /*OUT*/double penalty[3]); /* Apply Jukes-Cantor or scoredist-like log(1-d) transform to correct the distance for multiple substitutions. */ double LogCorrect(double distance); /* AverageProfile is used to do a weighted combination of nodes when doing a join. If weight is negative, then the value is ignored and the profiles are averaged. The weight is *not* adjusted for the gap content of the nodes. Also, the weight does not affect the representation of the constraints */ profile_t *AverageProfile(profile_t *profile1, profile_t *profile2, int nPos, int nConstraints, distance_matrix_t *distance_matrix, double weight1); /* PosteriorProfile() is like AverageProfile() but it computes posterior probabilities rather than an average */ profile_t *PosteriorProfile(profile_t *profile1, profile_t *profile2, double len1, double len2, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, int nPos, int nConstraints); /* Set a node's profile from its children. Deletes the previous profile if it exists Use -1.0 for a balanced join Fails unless the node has two children (e.g., no leaves or root) */ void SetProfile(/*IN/OUT*/NJ_t *NJ, int node, double weight1); /* OutProfile does an unweighted combination of nodes to create the out-profile. It always sets code to NOCODE so that UpdateOutProfile can work. */ profile_t *OutProfile(profile_t **profiles, int nProfiles, int nPos, int nConstraints, distance_matrix_t *distance_matrix); void UpdateOutProfile(/*UPDATE*/profile_t *out, profile_t *old1, profile_t *old2, profile_t *new, int nActiveOld, int nPos, int nConstraints, distance_matrix_t *distance_matrix); profile_t *NewProfile(int nPos, int nConstraints); /* returned has no vectors */ profile_t *FreeProfile(profile_t *profile, int nPos, int nConstraints); /* returns NULL */ void AllocRateCategories(/*IN/OUT*/rates_t *rates, int nRateCategories, int nPos); /* f1 can be NULL if code1 != NOCODE, and similarly for f2 Or, if (say) weight1 was 0, then can have code1==NOCODE *and* f1==NULL In that case, returns an arbitrary large number. */ double ProfileDistPiece(unsigned int code1, unsigned int code2, numeric_t *f1, numeric_t *f2, /*OPTIONAL*/distance_matrix_t *dmat, /*OPTIONAL*/numeric_t *codeDist2); /* Adds (or subtracts, if weight is negative) fIn/codeIn from fOut fOut is assumed to exist (as from an outprofile) do not call unless weight of input profile > 0 */ void AddToFreq(/*IN/OUT*/numeric_t *fOut, double weight, unsigned int codeIn, /*OPTIONAL*/numeric_t *fIn, /*OPTIONAL*/distance_matrix_t *dmat); /* Divide the vector (of length nCodes) by a constant so that the total (unrotated) frequency is 1.0 */ void NormalizeFreq(/*IN/OUT*/numeric_t *freq, distance_matrix_t *distance_matrix); /* Allocate, if necessary, and recompute the codeDist*/ void SetCodeDist(/*IN/OUT*/profile_t *profile, int nPos, distance_matrix_t *dmat); /* The allhits list contains the distances of the node to all other active nodes This is useful for the "reset" improvement to the visible set Note that the following routines do not handle the tophits heuristic and assume that out-distances are up to date. */ void SetBestHit(int node, NJ_t *NJ, int nActive, /*OUT*/besthit_t *bestjoin, /*OUT OPTIONAL*/besthit_t *allhits); void ExhaustiveNJSearch(NJ_t *NJ, int nActive, /*OUT*/besthit_t *bestjoin); /* Searches the visible set */ void FastNJSearch(NJ_t *NJ, int nActive, /*UPDATE*/besthit_t *visible, /*OUT*/besthit_t *bestjoin); /* Subroutines for handling the tophits heuristic */ top_hits_t *InitTopHits(NJ_t *NJ, int m); top_hits_t *FreeTopHits(top_hits_t *tophits); /* returns NULL */ /* Before we do any joins -- sets tophits and visible NJ may be modified by setting out-distances */ void SetAllLeafTopHits(/*IN/UPDATE*/NJ_t *NJ, /*IN/OUT*/top_hits_t *tophits); /* Find the best join to do. */ void TopHitNJSearch(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits, /*OUT*/besthit_t *bestjoin); /* Returns the best hit within top hits NJ may be modified because it updates out-distances if they are too stale Does *not* update visible set */ void GetBestFromTopHits(int iNode, /*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN*/top_hits_t *tophits, /*OUT*/besthit_t *bestjoin); /* visible set is modifiable so that we can reset it more globally when we do a "refresh", but we also set the visible set for newnode and do any "reset" updates too. And, we update many outdistances. */ void TopHitJoin(int newnode, /*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits); /* Sort the input besthits by criterion and save the best nOut hits as a new array in top_hits_lists Does not update criterion or out-distances Ignores (silently removes) hit to self Saved list may be shorter than requested if there are insufficient entries */ void SortSaveBestHits(int iNode, /*IN/SORT*/besthit_t *besthits, int nIn, int nOut, /*IN/OUT*/top_hits_t *tophits); /* Given candidate hits from one node, "transfer" them to another node: Stores them in a new place in the same order searches up to active nodes if hits involve non-active nodes If update flag is set, it also recomputes distance and criterion (and ensures that out-distances are updated); otherwise it sets dist to -1e20 and criterion to 1e20 */ void TransferBestHits(/*IN/UPDATE*/NJ_t *NJ, int nActive, int iNode, /*IN*/besthit_t *oldhits, int nOldHits, /*OUT*/besthit_t *newhits, bool updateDistance); /* Create best hit objects from 1 or more hits. Do not update out-distances or set criteria */ void HitsToBestHits(/*IN*/hit_t *hits, int nHits, int iNode, /*OUT*/besthit_t *newhits); besthit_t HitToBestHit(int i, hit_t hit); /* Given a set of besthit entries, look for improvements to the visible set of the j entries. Updates out-distances as it goes. Also replaces stale nodes with this node, because a join is usually how this happens (i.e. it does not need to walk up to ancestors). Note this calls UpdateTopVisible() on any change */ void UpdateVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN*/besthit_t *tophitsNode, int nTopHits, /*IN/OUT*/top_hits_t *tophits); /* Update the top-visible list to perhaps include this hit (O(sqrt(N)) time) */ void UpdateTopVisible(/*IN*/NJ_t * NJ, int nActive, int iNode, /*IN*/hit_t *hit, /*IN/OUT*/top_hits_t *tophits); /* Recompute the top-visible subset of the visible set */ void ResetTopVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits); /* Make a shorter list with only unique entries. Replaces any "dead" hits to nodes that have parents with their active ancestors and ignores any that become dead. Updates all criteria. Combined gets sorted by i & j The returned list is allocated to nCombined even though only *nUniqueOut entries are filled */ besthit_t *UniqueBestHits(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/SORT*/besthit_t *combined, int nCombined, /*OUT*/int *nUniqueOut); nni_t ChooseNNI(profile_t *profiles[4], /*OPTIONAL*/distance_matrix_t *dmat, int nPos, int nConstraints, /*OUT*/double criteria[3]); /* The three internal branch lengths or log likelihoods*/ /* length[] is ordered as described by quartet_length_t, but after we do the swap of B with C (to give AC|BD) or B with D (to get AD|BC), if that is the returned choice bFast means do not consider NNIs if AB|CD is noticeably better than the star topology (as implemented by MLQuartetOptimize). If there are constraints, then the constraint penalty is included in criteria[] */ nni_t MLQuartetNNI(profile_t *profiles[4], /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, int nPos, int nConstraints, /*OUT*/double criteria[3], /* The three potential quartet log-likelihoods */ /*IN/OUT*/numeric_t length[5], bool bFast); void OptimizeAllBranchLengths(/*IN/OUT*/NJ_t *NJ); double TreeLogLk(/*IN*/NJ_t *NJ, /*OPTIONAL OUT*/double *site_loglk); double MLQuartetLogLk(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*IN*/double branch_lengths[5], /*OPTIONAL OUT*/double *site_likelihoods); /* Given a topology and branch lengths, estimate rates & recompute profiles */ void SetMLRates(/*IN/OUT*/NJ_t *NJ, int nRateCategories); /* Returns a set of nRateCategories potential rates; the caller must free it */ numeric_t *MLSiteRates(int nRateCategories); /* returns site_loglk so that site_loglk[nPos*iRate + j] is the log likelihood of site j with rate iRate The caller must free it. */ double *MLSiteLikelihoodsByRate(/*IN*/NJ_t *NJ, /*IN*/numeric_t *rates, int nRateCategories); typedef struct { double mult; /* multiplier for the rates / divisor for the tree-length */ double alpha; int nPos; int nRateCats; numeric_t *rates; double *site_loglk; } siteratelk_t; double GammaLogLk(/*IN*/siteratelk_t *s, /*OPTIONAL OUT*/double *gamma_loglk_sites); /* Input site_loglk must be for each rate. Note that FastTree does not reoptimize the branch lengths under the Gamma model -- it optimizes the overall scale. Reports the gamma log likelihhod (and logs site likelihoods if fpLog is set), and reports the rescaling value. */ double RescaleGammaLogLk(int nPos, int nRateCats, /*IN*/numeric_t *rates, /*IN*/double *site_loglk, /*OPTIONAL*/FILE *fpLog); /* P(value<=x) for the gamma distribution with shape parameter alpha and scale 1/alpha */ double PGamma(double x, double alpha); /* Given a topology and branch lengths, optimize GTR rates and quickly reoptimize branch lengths If gtrfreq is NULL, then empirical frequencies are used */ void SetMLGtr(/*IN/OUT*/NJ_t *NJ, /*OPTIONAL IN*/double *gtrfreq, /*OPTIONAL WRITE*/FILE *fpLog); /* P(A & B | len) = P(B | A, len) * P(A) If site_likelihoods is present, multiplies those values by the site likelihood at each point (Note it does not handle underflow) */ double PairLogLk(/*IN*/profile_t *p1, /*IN*/profile_t *p2, double length, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*OPTIONAL IN/OUT*/double *site_likelihoods); /* Branch lengths for 4-taxon tree ((A,B),C,D); I means internal */ typedef enum {LEN_A,LEN_B,LEN_C,LEN_D,LEN_I} quartet_length_t; typedef struct { int nPos; transition_matrix_t *transmat; rates_t *rates; int nEval; /* number of likelihood evaluations */ /* The pair to optimize */ profile_t *pair1; profile_t *pair2; } quartet_opt_t; double PairNegLogLk(double x, void *data); /* data must be a quartet_opt_t */ typedef struct { NJ_t *NJ; double freq[4]; double rates[6]; int iRate; /* which rate to set x from */ FILE *fpLog; /* OPTIONAL WRITE */ } gtr_opt_t; /* Returns -log_likelihood for the tree with the given rates data must be a gtr_opt_t and x is used to set rate iRate Does not recompute profiles -- assumes that the caller will */ double GTRNegLogLk(double x, void *data); /* Returns the resulting log likelihood. Optionally returns whether other topologies should be abandoned, based on the difference between AB|CD and the "star topology" (AB|CD with a branch length of MLMinBranchLength) exceeding closeLogLkLimit. If bStarTest is passed in, it only optimized the internal branch if the star test is true. Otherwise, it optimized all 5 branch lengths in turn. */ double MLQuartetOptimize(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*IN/OUT*/double branch_lengths[5], /*OPTIONAL OUT*/bool *pStarTest, /*OPTIONAL OUT*/double *site_likelihoods); /* Returns the resulting log likelihood */ double MLPairOptimize(profile_t *pA, profile_t *pB, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*IN/OUT*/double *branch_length); /* Returns the number of steps considered, with the actual steps in steps[] Modifies the tree by this chain of NNIs */ int FindSPRSteps(/*IN/OUT*/NJ_t *NJ, int node, int parent, /* sibling or parent of node to NNI to start the chain */ /*IN/OUT*/profile_t **upProfiles, /*OUT*/spr_step_t *steps, int maxSteps, bool bFirstAC); /* Undo a single NNI */ void UnwindSPRStep(/*IN/OUT*/NJ_t *NJ, /*IN*/spr_step_t *step, /*IN/OUT*/profile_t **upProfiles); /* Update the profile of node and its ancestor, and delete nearby out-profiles */ void UpdateForNNI(/*IN/OUT*/NJ_t *NJ, int node, /*IN/OUT*/profile_t **upProfiles, bool useML); /* Sets NJ->parent[newchild] and replaces oldchild with newchild in the list of children of parent */ void ReplaceChild(/*IN/OUT*/NJ_t *NJ, int parent, int oldchild, int newchild); int CompareHitsByCriterion(const void *c1, const void *c2); int CompareHitsByIJ(const void *c1, const void *c2); int NGaps(NJ_t *NJ, int node); /* only handles leaf sequences */ /* node is the parent of AB, sibling of C node cannot be root or a leaf If node is the child of root, then D is the other sibling of node, and the 4th profile is D's profile. Otherwise, D is the parent of node, and we use its upprofile Call this with profiles=NULL to get the nodes, without fetching or computing profiles */ void SetupABCD(NJ_t *NJ, int node, /* the 4 profiles for ABCD; the last one is an upprofile */ /*OPTIONAL OUT*/profile_t *profiles[4], /*OPTIONAL IN/OUT*/profile_t **upProfiles, /*OUT*/int nodeABCD[4], bool useML); int Sibling(NJ_t *NJ, int node); /* At root, no unique sibling so returns -1 */ void RootSiblings(NJ_t *NJ, int node, /*OUT*/int sibs[2]); /* JC probability of nucleotide not changing, for each rate category */ double *PSameVector(double length, rates_t *rates); /* JC probability of nucleotide not changing, for each rate category */ double *PDiffVector(double *pSame, rates_t *rates); /* expeigen[iRate*nCodes + j] = exp(length * rate iRate * eigenvalue j) */ numeric_t *ExpEigenRates(double length, transition_matrix_t *transmat, rates_t *rates); /* Print a progress report if more than 0.1 second has gone by since the progress report */ /* Format should include 0-4 %d references and no newlines */ void ProgressReport(char *format, int iArg1, int iArg2, int iArg3, int iArg4); void LogTree(char *format, int round, /*OPTIONAL WRITE*/FILE *fp, NJ_t *NJ, char **names, uniquify_t *unique, bool bQuote); void LogMLRates(/*OPTIONAL WRITE*/FILE *fpLog, NJ_t *NJ); void *mymalloc(size_t sz); /* Prints "Out of memory" and exits on failure */ void *myfree(void *, size_t sz); /* Always returns NULL */ /* One-dimensional minimization using brent's function, with a fractional and an absolute tolerance */ double onedimenmin(double xmin, double xguess, double xmax, double (*f)(double,void*), void *data, double ftol, double atol, /*OUT*/double *fx, /*OUT*/double *f2x); double brent(double ax, double bx, double cx, double (*f)(double, void *), void *data, double ftol, double atol, double *foptx, double *f2optx, double fax, double fbx, double fcx); /* Vector operations, either using SSE3 or not Code assumes that vectors are a multiple of 4 in size */ void vector_multiply(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, int n, /*OUT*/numeric_t *fOut); numeric_t vector_multiply_sum(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, int n); void vector_add_mult(/*IN/OUT*/numeric_t *f, /*IN*/numeric_t *add, numeric_t weight, int n); /* multiply the transpose of a matrix by a vector */ void matrixt_by_vector4(/*IN*/numeric_t mat[4][MAXCODES], /*IN*/numeric_t vec[4], /*OUT*/numeric_t out[4]); /* sum(f1*fBy)*sum(f2*fBy) */ numeric_t vector_dot_product_rot(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, /*IN*/numeric_t* fBy, int n); /* sum(f1*f2*f3) */ numeric_t vector_multiply3_sum(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, /*IN*/numeric_t* f3, int n); numeric_t vector_sum(/*IN*/numeric_t *f1, int n); void vector_multiply_by(/*IN/OUT*/numeric_t *f, /*IN*/numeric_t fBy, int n); double clockDiff(/*IN*/struct timeval *clock_start); int timeval_subtract (/*OUT*/struct timeval *result, /*IN*/struct timeval *x, /*IN*/struct timeval *y); char *OpenMPString(void); void ran_start(long seed); double knuth_rand(); /* Random number between 0 and 1 */ void tred2 (double *a, const int n, const int np, double *d, double *e); double pythag(double a, double b); void tqli(double *d, double *e, int n, int np, double *z); /* Like mymalloc; duplicates the input (returns NULL if given NULL) */ void *mymemdup(void *data, size_t sz); void *myrealloc(void *data, size_t szOld, size_t szNew, bool bCopy); double pnorm(double z); /* Probability(value <=z) */ /* Hashtable functions */ typedef struct { char *string; int nCount; /* number of times this entry was seen */ int first; /* index of first entry with this value */ } hashbucket_t; typedef struct { int nBuckets; /* hashvalue -> bucket. Or look in bucket + 1, +2, etc., till you hit a NULL string */ hashbucket_t *buckets; } hashstrings_t; typedef int hashiterator_t; hashstrings_t *MakeHashtable(char **strings, int nStrings); hashstrings_t *FreeHashtable(hashstrings_t* hash); /*returns NULL*/ hashiterator_t FindMatch(hashstrings_t *hash, char *string); /* Return NULL if we have run out of values */ char *GetHashString(hashstrings_t *hash, hashiterator_t hi); int HashCount(hashstrings_t *hash, hashiterator_t hi); int HashFirst(hashstrings_t *hash, hashiterator_t hi); void PrintNJ(/*WRITE*/FILE *, NJ_t *NJ, char **names, uniquify_t *unique, bool bShowSupport, bool bQuoteNames); /* Print topology using node indices as node names */ void PrintNJInternal(/*WRITE*/FILE *, NJ_t *NJ, bool useLen); uniquify_t *UniquifyAln(/*IN*/alignment_t *aln); uniquify_t *FreeUniquify(uniquify_t *); /* returns NULL */ /* Convert a constraint alignment to a list of sequences. The returned array is indexed by iUnique and points to values in the input alignment */ char **AlnToConstraints(alignment_t *constraints, uniquify_t *unique, hashstrings_t *hashnames); /* ReadTree ignores non-unique leaves after the first instance. At the end, it prunes the tree to ignore empty children and it unroots the tree if necessary. */ void ReadTree(/*IN/OUT*/NJ_t *NJ, /*IN*/uniquify_t *unique, /*IN*/hashstrings_t *hashnames, /*READ*/FILE *fpInTree); char *ReadTreeToken(/*READ*/FILE *fp); /* returns a static array, or NULL on EOF */ void ReadTreeAddChild(int parent, int child, /*IN/OUT*/int *parents, /*IN/OUT*/children_t *children); /* Do not add the leaf if we already set this unique-set to another parent */ void ReadTreeMaybeAddLeaf(int parent, char *name, hashstrings_t *hashnames, uniquify_t *unique, /*IN/OUT*/int *parents, /*IN/OUT*/children_t *children); void ReadTreeRemove(/*IN/OUT*/int *parents, /*IN/OUT*/children_t *children, int node); /* Routines to support tree traversal and prevent visiting a node >1 time (esp. if topology changes). */ typedef bool *traversal_t; traversal_t InitTraversal(NJ_t*); void SkipTraversalInto(int node, /*IN/OUT*/traversal_t traversal); traversal_t FreeTraversal(traversal_t, NJ_t*); /*returns NULL*/ /* returns new node, or -1 if nothing left to do. Use root for the first call. Will return every node and then root. Uses postorder tree traversal (depth-first search going down to leaves first) Keeps track of which nodes are visited, so even after an NNI that swaps a visited child with an unvisited uncle, the next call will visit the was-uncle-now-child. (However, after SPR moves, there is no such guarantee.) If pUp is not NULL, then, if going "back up" through a previously visited node (presumably due to an NNI), then it will return the node another time, with *pUp = true. */ int TraversePostorder(int lastnode, NJ_t *NJ, /*IN/OUT*/traversal_t, /*OUT OPTIONAL*/bool *pUp); /* Routines to support storing up-profiles during tree traversal Eventually these should be smart enough to do weighted joins and to minimize memory usage */ profile_t **UpProfiles(NJ_t *NJ); profile_t *GetUpProfile(/*IN/OUT*/profile_t **upProfiles, NJ_t *NJ, int node, bool useML); profile_t *DeleteUpProfile(/*IN/OUT*/profile_t **upProfiles, NJ_t *NJ, int node); /* returns NULL */ profile_t **FreeUpProfiles(profile_t **upProfiles, NJ_t *NJ); /* returns NULL */ /* Recomputes the profile for a node, presumably to reflect topology changes If bionj is set, does a weighted join -- which requires using upProfiles If useML is set, computes the posterior probability instead of averaging */ void RecomputeProfile(/*IN/OUT*/NJ_t *NJ, /*IN/OUT*/profile_t **upProfiles, int node, bool useML); /* Recompute profiles going up from the leaves, using the provided distance matrix and unweighted joins */ void RecomputeProfiles(/*IN/OUT*/NJ_t *NJ, /*OPTIONAL*/distance_matrix_t *dmat); void RecomputeMLProfiles(/*IN/OUT*/NJ_t *NJ); /* If bionj is set, computes the weight to be given to A when computing the profile for the ancestor of A and B. C and D are the other profiles in the quartet If bionj is not set, returns -1 (which means unweighted in AverageProfile). (A and B are the first two profiles in the array) */ double QuartetWeight(profile_t *profiles[4], distance_matrix_t *dmat, int nPos); /* Returns a list of nodes, starting with node and ending with root */ int *PathToRoot(NJ_t *NJ, int node, /*OUT*/int *depth); int *FreePath(int *path, NJ_t *NJ); /* returns NULL */ /* The default amino acid distance matrix, derived from the BLOSUM45 similarity matrix */ distance_matrix_t matrixBLOSUM45; /* The default amino acid transition matrix (Jones Taylor Thorton 1992) */ double matrixJTT92[MAXCODES][MAXCODES]; double statJTT92[MAXCODES]; /* The Le-Gascuel 2008 amino acid transition matrix */ double matrixLG08[MAXCODES][MAXCODES]; double statLG08[MAXCODES]; /* The WAG amino acid transition matrix (Whelan-And-Goldman 2001) */ double matrixWAG01[MAXCODES][MAXCODES]; double statWAG01[MAXCODES]; int main(int argc, char **argv) { int nAlign = 1; /* number of alignments to read */ int iArg; char *matrixPrefix = NULL; char *transitionFile = NULL; distance_matrix_t *distance_matrix = NULL; bool make_matrix = false; char *constraintsFile = NULL; char *intreeFile = NULL; bool intree1 = false; /* the same starting tree each round */ int nni = -1; /* number of rounds of NNI, defaults to 4*log2(n) */ int spr = 2; /* number of rounds of SPR */ int maxSPRLength = 10; /* maximum distance to move a node */ int MLnni = -1; /* number of rounds of ML NNI, defaults to 2*log2(n) */ bool MLlen = false; /* optimize branch lengths; no topology changes */ int nBootstrap = 1000; /* If set, number of replicates of local bootstrap to do */ int nRateCats = nDefaultRateCats; char *logfile = NULL; bool bUseGtr = false; bool bUseLg = false; bool bUseWag = false; bool bUseGtrRates = false; double gtrrates[6] = {1,1,1,1,1,1}; bool bUseGtrFreq = false; double gtrfreq[4] = {0.25,0.25,0.25,0.25}; bool bQuote = false; FILE *fpOut = stdout; if (isatty(STDIN_FILENO) && argc == 1) { fprintf(stderr,"Usage for FastTree version %s %s%s:\n%s", FT_VERSION, SSE_STRING, OpenMPString(), usage); #if (defined _WIN32 || defined WIN32 || defined WIN64 || defined _WIN64) fprintf(stderr, "Windows users: Please remember to run this inside a command shell\n"); fprintf(stderr,"Hit return to continue\n"); fgetc(stdin); #endif exit(0); } for (iArg = 1; iArg < argc; iArg++) { if (strcmp(argv[iArg],"-makematrix") == 0) { make_matrix = true; } else if (strcmp(argv[iArg],"-logdist") == 0) { fprintf(stderr, "Warning: logdist is now on by default and obsolete\n"); } else if (strcmp(argv[iArg],"-rawdist") == 0) { logdist = false; } else if (strcmp(argv[iArg],"-verbose") == 0 && iArg < argc-1) { verbose = atoi(argv[++iArg]); } else if (strcmp(argv[iArg],"-quiet") == 0) { verbose = 0; showProgress = 0; } else if (strcmp(argv[iArg],"-nopr") == 0) { showProgress = 0; } else if (strcmp(argv[iArg],"-slow") == 0) { slow = 1; } else if (strcmp(argv[iArg],"-fastest") == 0) { fastest = 1; tophitsRefresh = 0.5; useTopHits2nd = true; } else if (strcmp(argv[iArg],"-2nd") == 0) { useTopHits2nd = true; } else if (strcmp(argv[iArg],"-no2nd") == 0) { useTopHits2nd = false; } else if (strcmp(argv[iArg],"-slownni") == 0) { fastNNI = false; } else if (strcmp(argv[iArg], "-matrix") == 0 && iArg < argc-1) { iArg++; matrixPrefix = argv[iArg]; } else if (strcmp(argv[iArg], "-nomatrix") == 0) { useMatrix = false; } else if (strcmp(argv[iArg], "-n") == 0 && iArg < argc-1) { iArg++; nAlign = atoi(argv[iArg]); if (nAlign < 1) { fprintf(stderr, "-n argument for #input alignments must be > 0 not %s\n", argv[iArg]); exit(1); } } else if (strcmp(argv[iArg], "-quote") == 0) { bQuote = true; } else if (strcmp(argv[iArg], "-nt") == 0) { nCodes = 4; } else if (strcmp(argv[iArg], "-intree") == 0 && iArg < argc-1) { iArg++; intreeFile = argv[iArg]; } else if (strcmp(argv[iArg], "-intree1") == 0 && iArg < argc-1) { iArg++; intreeFile = argv[iArg]; intree1 = true; } else if (strcmp(argv[iArg], "-nj") == 0) { bionj = 0; } else if (strcmp(argv[iArg], "-bionj") == 0) { bionj = 1; } else if (strcmp(argv[iArg], "-boot") == 0 && iArg < argc-1) { iArg++; nBootstrap = atoi(argv[iArg]); } else if (strcmp(argv[iArg], "-noboot") == 0 || strcmp(argv[iArg], "-nosupport") == 0) { nBootstrap = 0; } else if (strcmp(argv[iArg], "-seed") == 0 && iArg < argc-1) { iArg++; long seed = atol(argv[iArg]); ran_start(seed); } else if (strcmp(argv[iArg],"-top") == 0) { if(tophitsMult < 0.01) tophitsMult = 1.0; } else if (strcmp(argv[iArg],"-notop") == 0) { tophitsMult = 0.0; } else if (strcmp(argv[iArg], "-topm") == 0 && iArg < argc-1) { iArg++; tophitsMult = atof(argv[iArg]); } else if (strcmp(argv[iArg], "-close") == 0 && iArg < argc-1) { iArg++; tophitsClose = atof(argv[iArg]); if (tophitsMult <= 0) { fprintf(stderr, "Cannot use -close unless -top is set above 0\n"); exit(1); } if (tophitsClose <= 0 || tophitsClose >= 1) { fprintf(stderr, "-close argument must be between 0 and 1\n"); exit(1); } } else if (strcmp(argv[iArg], "-refresh") == 0 && iArg < argc-1) { iArg++; tophitsRefresh = atof(argv[iArg]); if (tophitsMult <= 0) { fprintf(stderr, "Cannot use -refresh unless -top is set above 0\n"); exit(1); } if (tophitsRefresh <= 0 || tophitsRefresh >= 1) { fprintf(stderr, "-refresh argument must be between 0 and 1\n"); exit(1); } } else if (strcmp(argv[iArg],"-nni") == 0 && iArg < argc-1) { iArg++; nni = atoi(argv[iArg]); if (nni == 0) spr = 0; } else if (strcmp(argv[iArg],"-spr") == 0 && iArg < argc-1) { iArg++; spr = atoi(argv[iArg]); } else if (strcmp(argv[iArg],"-sprlength") == 0 && iArg < argc-1) { iArg++; maxSPRLength = atoi(argv[iArg]); } else if (strcmp(argv[iArg],"-mlnni") == 0 && iArg < argc-1) { iArg++; MLnni = atoi(argv[iArg]); } else if (strcmp(argv[iArg],"-noml") == 0) { MLnni = 0; } else if (strcmp(argv[iArg],"-mllen") == 0) { MLnni = 0; MLlen = true; } else if (strcmp(argv[iArg],"-nome") == 0) { spr = 0; nni = 0; } else if (strcmp(argv[iArg],"-help") == 0) { fprintf(stderr,"FastTree %s %s%s:\n%s", FT_VERSION, SSE_STRING, OpenMPString(), usage); exit(0); } else if (strcmp(argv[iArg],"-expert") == 0) { fprintf(stderr, "Detailed usage for FastTree %s %s%s:\n%s", FT_VERSION, SSE_STRING, OpenMPString(), expertUsage); exit(0); } else if (strcmp(argv[iArg],"-pseudo") == 0) { if (iArg < argc-1 && isdigit(argv[iArg+1][0])) { iArg++; pseudoWeight = atof(argv[iArg]); if (pseudoWeight < 0.0) { fprintf(stderr,"Illegal argument to -pseudo: %s\n", argv[iArg]); exit(1); } } else { pseudoWeight = 1.0; } } else if (strcmp(argv[iArg],"-constraints") == 0 && iArg < argc-1) { iArg++; constraintsFile = argv[iArg]; } else if (strcmp(argv[iArg],"-constraintWeight") == 0 && iArg < argc-1) { iArg++; constraintWeight = atof(argv[iArg]); if (constraintWeight <= 0.0) { fprintf(stderr, "Illegal argument to -constraintWeight (must be greater than zero): %s\n", argv[iArg]); exit(1); } } else if (strcmp(argv[iArg],"-mlacc") == 0 && iArg < argc-1) { iArg++; mlAccuracy = atoi(argv[iArg]); if (mlAccuracy < 1) { fprintf(stderr, "Illlegal -mlacc argument: %s\n", argv[iArg]); exit(1); } } else if (strcmp(argv[iArg],"-exactml") == 0 || strcmp(argv[iArg],"-mlexact") == 0) { fprintf(stderr,"-exactml is not required -- exact posteriors is the default now\n"); } else if (strcmp(argv[iArg],"-approxml") == 0 || strcmp(argv[iArg],"-mlapprox") == 0) { exactML = false; } else if (strcmp(argv[iArg],"-cat") == 0 && iArg < argc-1) { iArg++; nRateCats = atoi(argv[iArg]); if (nRateCats < 1) { fprintf(stderr, "Illlegal argument to -ncat (must be greater than zero): %s\n", argv[iArg]); exit(1); } } else if (strcmp(argv[iArg],"-nocat") == 0) { nRateCats = 1; } else if (strcmp(argv[iArg], "-lg") == 0) { bUseLg = true; } else if (strcmp(argv[iArg], "-wag") == 0) { bUseWag = true; } else if (strcmp(argv[iArg], "-gtr") == 0) { bUseGtr = true; } else if (strcmp(argv[iArg], "-trans") == 0 && iArg < argc-1) { iArg++; transitionFile = argv[iArg]; } else if (strcmp(argv[iArg], "-gtrrates") == 0 && iArg < argc-6) { bUseGtr = true; bUseGtrRates = true; int i; for (i = 0; i < 6; i++) { gtrrates[i] = atof(argv[++iArg]); if (gtrrates[i] < 1e-5) { fprintf(stderr, "Illegal or too small value of GTR rate: %s\n", argv[iArg]); exit(1); } } } else if (strcmp(argv[iArg],"-gtrfreq") == 0 && iArg < argc-4) { bUseGtr = true; bUseGtrFreq = true; int i; double sum = 0; for (i = 0; i < 4; i++) { gtrfreq[i] = atof(argv[++iArg]); sum += gtrfreq[i]; if (gtrfreq[i] < 1e-5) { fprintf(stderr, "Illegal or too small value of GTR frequency: %s\n", argv[iArg]); exit(1); } } if (fabs(1.0-sum) > 0.01) { fprintf(stderr, "-gtrfreq values do not sum to 1\n"); exit(1); } for (i = 0; i < 4; i++) gtrfreq[i] /= sum; } else if (strcmp(argv[iArg],"-log") == 0 && iArg < argc-1) { iArg++; logfile = argv[iArg]; } else if (strcmp(argv[iArg],"-gamma") == 0) { gammaLogLk = true; } else if (strcmp(argv[iArg],"-out") == 0 && iArg < argc-1) { iArg++; fpOut = fopen(argv[iArg],"w"); if(fpOut==NULL) { fprintf(stderr,"Cannot write to %s\n",argv[iArg]); exit(1); } } else if (argv[iArg][0] == '-') { fprintf(stderr, "Unknown or incorrect use of option %s\n%s", argv[iArg], usage); exit(1); } else break; } if(iArg < argc-1) { fprintf(stderr, "%s", usage); exit(1); } codesString = nCodes == 20 ? codesStringAA : codesStringNT; if (nCodes == 4 && matrixPrefix == NULL) useMatrix = false; /* no default nucleotide matrix */ if (transitionFile && nCodes != 20) { fprintf(stderr, "The -trans option is only supported for amino acid alignments\n"); exit(1); } #ifndef USE_DOUBLE if (transitionFile) fprintf(stderr, "Warning: custom matrices may create numerical problems for single-precision FastTree.\n" "You may want to recompile with -DUSE_DOUBLE\n"); #endif char *fileName = iArg == (argc-1) ? argv[argc-1] : NULL; if (slow && fastest) { fprintf(stderr,"Cannot be both slow and fastest\n"); exit(1); } if (slow && tophitsMult > 0) { tophitsMult = 0.0; } FILE *fpLog = NULL; if (logfile != NULL) { fpLog = fopen(logfile, "w"); if (fpLog == NULL) { fprintf(stderr, "Cannot write to: %s\n", logfile); exit(1); } fprintf(fpLog, "Command:"); int i; for (i=0; i < argc; i++) fprintf(fpLog, " %s", argv[i]); fprintf(fpLog,"\n"); fflush(fpLog); } int i; FILE *fps[2] = {NULL,NULL}; int nFPs = 0; if (verbose) fps[nFPs++] = stderr; if (fpLog != NULL) fps[nFPs++] = fpLog; if (!make_matrix) { /* Report settings */ char tophitString[100] = "no"; char tophitsCloseStr[100] = "default"; if(tophitsClose > 0) sprintf(tophitsCloseStr,"%.2f",tophitsClose); if(tophitsMult>0) sprintf(tophitString,"%.2f*sqrtN close=%s refresh=%.2f", tophitsMult, tophitsCloseStr, tophitsRefresh); char supportString[100] = "none"; if (nBootstrap>0) { if (MLnni != 0 || MLlen) sprintf(supportString, "SH-like %d", nBootstrap); else sprintf(supportString,"Local boot %d",nBootstrap); } char nniString[100] = "(no NNI)"; if (nni > 0) sprintf(nniString, "+NNI (%d rounds)", nni); if (nni == -1) strcpy(nniString, "+NNI"); char sprString[100] = "(no SPR)"; if (spr > 0) sprintf(sprString, "+SPR (%d rounds range %d)", spr, maxSPRLength); char mlnniString[100] = "(no ML-NNI)"; if(MLnni > 0) sprintf(mlnniString, "+ML-NNI (%d rounds)", MLnni); else if (MLnni == -1) sprintf(mlnniString, "+ML-NNI"); else if (MLlen) sprintf(mlnniString, "+ML branch lengths"); if ((MLlen || MLnni != 0) && !exactML) strcat(mlnniString, " approx"); if (MLnni != 0) sprintf(mlnniString+strlen(mlnniString), " opt-each=%d",mlAccuracy); for (i = 0; i < nFPs; i++) { FILE *fp = fps[i]; fprintf(fp,"FastTree Version %s %s%s\nAlignment: %s", FT_VERSION, SSE_STRING, OpenMPString(), fileName != NULL ? fileName : "standard input"); if (nAlign>1) fprintf(fp, " (%d alignments)", nAlign); fprintf(fp,"\n%s distances: %s Joins: %s Support: %s\n", nCodes == 20 ? "Amino acid" : "Nucleotide", matrixPrefix ? matrixPrefix : (useMatrix? "BLOSUM45" : (nCodes==4 && logdist ? "Jukes-Cantor" : "%different")), bionj ? "weighted" : "balanced" , supportString); if (intreeFile == NULL) fprintf(fp, "Search: %s%s %s %s %s\nTopHits: %s\n", slow?"Exhaustive (slow)" : (fastest ? "Fastest" : "Normal"), useTopHits2nd ? "+2nd" : "", nniString, sprString, mlnniString, tophitString); else fprintf(fp, "Start at tree from %s %s %s\n", intreeFile, nniString, sprString); if (MLnni != 0 || MLlen) { fprintf(fp, "ML Model: %s,", (nCodes == 4) ? (bUseGtr ? "Generalized Time-Reversible" : "Jukes-Cantor") : (transitionFile ? transitionFile : (bUseLg ? "Le-Gascuel 2008" : (bUseWag ? "Whelan-And-Goldman" : "Jones-Taylor-Thorton")))); if (nRateCats == 1) fprintf(fp, " No rate variation across sites"); else fprintf(fp, " CAT approximation with %d rate categories", nRateCats); fprintf(fp, "\n"); if (nCodes == 4 && bUseGtrRates) fprintf(fp, "GTR rates(ac ag at cg ct gt) %.4f %.4f %.4f %.4f %.4f %.4f\n", gtrrates[0],gtrrates[1],gtrrates[2],gtrrates[3],gtrrates[4],gtrrates[5]); if (nCodes == 4 && bUseGtrFreq) fprintf(fp, "GTR frequencies(A C G T) %.4f %.4f %.4f %.4f\n", gtrfreq[0],gtrfreq[1],gtrfreq[2],gtrfreq[3]); } if (constraintsFile != NULL) fprintf(fp, "Constraints: %s Weight: %.3f\n", constraintsFile, constraintWeight); if (pseudoWeight > 0) fprintf(fp, "Pseudocount weight for comparing sequences with little overlap: %.3lf\n",pseudoWeight); fflush(fp); } } if (matrixPrefix != NULL) { if (!useMatrix) { fprintf(stderr,"Cannot use both -matrix and -nomatrix arguments!"); exit(1); } distance_matrix = ReadDistanceMatrix(matrixPrefix); } else if (useMatrix) { /* use default matrix */ assert(nCodes==20); distance_matrix = &matrixBLOSUM45; SetupDistanceMatrix(distance_matrix); } else { distance_matrix = NULL; } int iAln; FILE *fpIn = fileName != NULL ? fopen(fileName, "r") : stdin; if (fpIn == NULL) { fprintf(stderr, "Cannot read %s\n", fileName); exit(1); } FILE *fpConstraints = NULL; if (constraintsFile != NULL) { fpConstraints = fopen(constraintsFile, "r"); if (fpConstraints == NULL) { fprintf(stderr, "Cannot read %s\n", constraintsFile); exit(1); } } FILE *fpInTree = NULL; if (intreeFile != NULL) { fpInTree = fopen(intreeFile,"r"); if (fpInTree == NULL) { fprintf(stderr, "Cannot read %s\n", intreeFile); exit(1); } } for(iAln = 0; iAln < nAlign; iAln++) { alignment_t *aln = ReadAlignment(fpIn, bQuote); if (aln->nSeq < 1) { fprintf(stderr, "No alignment sequences\n"); exit(1); } if (fpLog) { fprintf(fpLog, "Read %d sequences, %d positions\n", aln->nSeq, aln->nPos); fflush(fpLog); } struct timeval clock_start; gettimeofday(&clock_start,NULL); ProgressReport("Read alignment",0,0,0,0); /* Check that all names in alignment are unique */ hashstrings_t *hashnames = MakeHashtable(aln->names, aln->nSeq); int i; for (i=0; i<aln->nSeq; i++) { hashiterator_t hi = FindMatch(hashnames,aln->names[i]); if (HashCount(hashnames,hi) != 1) { fprintf(stderr,"Non-unique name '%s' in the alignment\n",aln->names[i]); exit(1); } } /* Make a list of unique sequences -- note some lists are bigger than required */ ProgressReport("Hashed the names",0,0,0,0); if (make_matrix) { NJ_t *NJ = InitNJ(aln->seqs, aln->nSeq, aln->nPos, /*constraintSeqs*/NULL, /*nConstraints*/0, distance_matrix, /*transmat*/NULL); printf(" %d\n",aln->nSeq); int i,j; for(i = 0; i < NJ->nSeq; i++) { printf("%s",aln->names[i]); for (j = 0; j < NJ->nSeq; j++) { besthit_t hit; SeqDist(NJ->profiles[i]->codes,NJ->profiles[j]->codes,NJ->nPos,NJ->distance_matrix,/*OUT*/&hit); if (logdist) hit.dist = LogCorrect(hit.dist); /* Make sure -0 prints as 0 */ printf(" %f", hit.dist <= 0.0 ? 0.0 : hit.dist); } printf("\n"); } } else { /* reset counters*/ profileOps = 0; outprofileOps = 0; seqOps = 0; profileAvgOps = 0; nHillBetter = 0; nCloseUsed = 0; nClose2Used = 0; nRefreshTopHits = 0; nVisibleUpdate = 0; nNNI = 0; nML_NNI = 0; nProfileFreqAlloc = 0; nProfileFreqAvoid = 0; szAllAlloc = 0; mymallocUsed = 0; maxmallocHeap = 0; nLkCompute = 0; nPosteriorCompute = 0; nAAPosteriorExact = 0; nAAPosteriorRough = 0; nStarTests = 0; uniquify_t *unique = UniquifyAln(aln); ProgressReport("Identified unique sequences",0,0,0,0); /* read constraints */ alignment_t *constraints = NULL; char **uniqConstraints = NULL; if (constraintsFile != NULL) { constraints = ReadAlignment(fpConstraints, bQuote); if (constraints->nSeq < 4) { fprintf(stderr, "Warning: constraints file with less than 4 sequences ignored:\nalignment #%d in %s\n", iAln+1, constraintsFile); constraints = FreeAlignment(constraints); } else { uniqConstraints = AlnToConstraints(constraints, unique, hashnames); ProgressReport("Read the constraints",0,0,0,0); } } /* end load constraints */ transition_matrix_t *transmat = NULL; if (nCodes == 20) { transmat = transitionFile? ReadAATransitionMatrix(transitionFile) : (bUseLg? CreateTransitionMatrix(matrixLG08,statLG08) : (bUseWag? CreateTransitionMatrix(matrixWAG01,statWAG01) : CreateTransitionMatrix(matrixJTT92,statJTT92))); } else if (nCodes == 4 && bUseGtr && (bUseGtrRates || bUseGtrFreq)) { transmat = CreateGTR(gtrrates,gtrfreq); } NJ_t *NJ = InitNJ(unique->uniqueSeq, unique->nUnique, aln->nPos, uniqConstraints, uniqConstraints != NULL ? constraints->nPos : 0, /* nConstraints */ distance_matrix, transmat); if (verbose>2) fprintf(stderr, "read %s seqs %d (%d unique) positions %d nameLast %s seqLast %s\n", fileName ? fileName : "standard input", aln->nSeq, unique->nUnique, aln->nPos, aln->names[aln->nSeq-1], aln->seqs[aln->nSeq-1]); FreeAlignmentSeqs(/*IN/OUT*/aln); /*no longer needed*/ if (fpInTree != NULL) { if (intree1) fseek(fpInTree, 0L, SEEK_SET); ReadTree(/*IN/OUT*/NJ, /*IN*/unique, /*IN*/hashnames, /*READ*/fpInTree); if (verbose > 2) fprintf(stderr, "Read tree from %s\n", intreeFile); if (verbose > 2) PrintNJ(stderr, NJ, aln->names, unique, /*support*/false, bQuote); } else { FastNJ(NJ); } LogTree("NJ", 0, fpLog, NJ, aln->names, unique, bQuote); /* profile-frequencies for the "up-profiles" in ReliabilityNJ take only diameter(Tree)*L*a space not N*L*a space, because we can free them as we go. And up-profile by their nature tend to be complicated. So save the profile-frequency memory allocation counters now to exclude later results. */ #ifdef TRACK_MEMORY long svProfileFreqAlloc = nProfileFreqAlloc; long svProfileFreqAvoid = nProfileFreqAvoid; #endif int nniToDo = nni == -1 ? (int)(0.5 + 4.0 * log(NJ->nSeq)/log(2)) : nni; int sprRemaining = spr; int MLnniToDo = (MLnni != -1) ? MLnni : (int)(0.5 + 2.0*log(NJ->nSeq)/log(2)); if(verbose>0) { if (fpInTree == NULL) fprintf(stderr, "Initial topology in %.2f seconds\n", clockDiff(&clock_start)); if (spr > 0 || nniToDo > 0 || MLnniToDo > 0) fprintf(stderr,"Refining topology: %d rounds ME-NNIs, %d rounds ME-SPRs, %d rounds ML-NNIs\n", nniToDo, spr, MLnniToDo); } if (nniToDo>0) { int i; bool bConverged = false; nni_stats_t *nni_stats = InitNNIStats(NJ); for (i=0; i < nniToDo; i++) { double maxDelta; if (!bConverged) { int nChange = NNI(/*IN/OUT*/NJ, i, nniToDo, /*use ml*/false, /*IN/OUT*/nni_stats, /*OUT*/&maxDelta); LogTree("ME_NNI%d",i+1, fpLog, NJ, aln->names, unique, bQuote); if (nChange == 0) { bConverged = true; if (verbose>1) fprintf(stderr, "Min_evolution NNIs converged at round %d -- skipping some rounds\n", i+1); if (fpLog) fprintf(fpLog, "Min_evolution NNIs converged at round %d -- skipping some rounds\n", i+1); } } /* Interleave SPRs with NNIs (typically 1/3rd NNI, SPR, 1/3rd NNI, SPR, 1/3rd NNI */ if (sprRemaining > 0 && (nniToDo/(spr+1) > 0 && ((i+1) % (nniToDo/(spr+1))) == 0)) { SPR(/*IN/OUT*/NJ, maxSPRLength, spr-sprRemaining, spr); LogTree("ME_SPR%d",spr-sprRemaining+1, fpLog, NJ, aln->names, unique, bQuote); sprRemaining--; /* Restart the NNIs -- set all ages to 0, etc. */ bConverged = false; nni_stats = FreeNNIStats(nni_stats, NJ); nni_stats = InitNNIStats(NJ); } } nni_stats = FreeNNIStats(nni_stats, NJ); } while(sprRemaining > 0) { /* do any remaining SPR rounds */ SPR(/*IN/OUT*/NJ, maxSPRLength, spr-sprRemaining, spr); LogTree("ME_SPR%d",spr-sprRemaining+1, fpLog, NJ, aln->names, unique, bQuote); sprRemaining--; } /* In minimum-evolution mode, update branch lengths, even if no NNIs or SPRs, so that they are log-corrected, do not include penalties from constraints, and avoid errors due to approximation of out-distances. If doing maximum-likelihood NNIs, then we'll also use these to get estimates of starting distances for quartets, etc. */ UpdateBranchLengths(/*IN/OUT*/NJ); LogTree("ME_Lengths",0, fpLog, NJ, aln->names, unique, bQuote); double total_len = 0; int iNode; for (iNode = 0; iNode < NJ->maxnode; iNode++) total_len += fabs(NJ->branchlength[iNode]); if (verbose>0) { fprintf(stderr, "Total branch-length %.3f after %.2f sec\n", total_len, clockDiff(&clock_start)); fflush(stderr); } if (fpLog) { fprintf(fpLog, "Total branch-length %.3f after %.2f sec\n", total_len, clockDiff(&clock_start)); fflush(stderr); } #ifdef TRACK_MEMORY if (verbose>1) { struct mallinfo mi = mallinfo(); fprintf(stderr, "Memory @ end of ME phase: %.2f MB (%.1f byte/pos) useful %.2f expected %.2f\n", (mi.arena+mi.hblkhd)/1.0e6, (mi.arena+mi.hblkhd)/(double)(NJ->nSeq*(double)NJ->nPos), mi.uordblks/1.0e6, mymallocUsed/1e6); } #endif SplitCount_t splitcount = {0,0,0,0,0.0,0.0}; if (MLnniToDo > 0 || MLlen) { bool warn_len = total_len/NJ->maxnode < 0.001 && MLMinBranchLengthTolerance > 1.0/aln->nPos; bool warn = warn_len || (total_len/NJ->maxnode < 0.001 && aln->nPos >= 10000); if (warn) fprintf(stderr, "\nWARNING! This alignment consists of closely-related and very-long sequences.\n"); if (warn_len) fprintf(stderr, "This version of FastTree may not report reasonable branch lengths!\n" #ifdef USE_DOUBLE "Consider changing MLMinBranchLengthTolerance.\n" #else "Consider recompiling FastTree with -DUSE_DOUBLE.\n" #endif "For more information, visit\n" "http://www.microbesonline.org/fasttree/#BranchLen\n\n"); if (warn) fprintf(stderr, "WARNING! FastTree (or other standard maximum-likelihood tools)\n" "may not be appropriate for aligments of very closely-related sequences\n" "like this one, as FastTree does not account for recombination or gene conversion\n\n"); /* Do maximum-likelihood computations */ /* Convert profiles to use the transition matrix */ distance_matrix_t *tmatAsDist = TransMatToDistanceMat(/*OPTIONAL*/NJ->transmat); RecomputeProfiles(NJ, /*OPTIONAL*/tmatAsDist); tmatAsDist = myfree(tmatAsDist, sizeof(distance_matrix_t)); double lastloglk = -1e20; nni_stats_t *nni_stats = InitNNIStats(NJ); bool resetGtr = nCodes == 4 && bUseGtr && !bUseGtrRates; if (MLlen) { int iRound; int maxRound = (int)(0.5 + log(NJ->nSeq)/log(2)); double dLastLogLk = -1e20; for (iRound = 1; iRound <= maxRound; iRound++) { int node; numeric_t *oldlength = (numeric_t*)mymalloc(sizeof(numeric_t)*NJ->maxnodes); for (node = 0; node < NJ->maxnode; node++) oldlength[node] = NJ->branchlength[node]; OptimizeAllBranchLengths(/*IN/OUT*/NJ); LogTree("ML_Lengths",iRound, fpLog, NJ, aln->names, unique, bQuote); double dMaxChange = 0; /* biggest change in branch length */ for (node = 0; node < NJ->maxnode; node++) { double d = fabs(oldlength[node] - NJ->branchlength[node]); if (dMaxChange < d) dMaxChange = d; } oldlength = myfree(oldlength, sizeof(numeric_t)*NJ->maxnodes); double loglk = TreeLogLk(NJ, /*site_likelihoods*/NULL); bool bConverged = iRound > 1 && (dMaxChange < 0.001 || loglk < (dLastLogLk+treeLogLkDelta)); if (verbose) fprintf(stderr, "%d rounds ML lengths: LogLk %s= %.3lf Max-change %.4lf%s Time %.2f\n", iRound, exactML || nCodes != 20 ? "" : "~", loglk, dMaxChange, bConverged ? " (converged)" : "", clockDiff(&clock_start)); if (fpLog) fprintf(fpLog, "TreeLogLk\tLength%d\t%.4lf\tMaxChange\t%.4lf\n", iRound, loglk, dMaxChange); if (iRound == 1) { if (resetGtr) SetMLGtr(/*IN/OUT*/NJ, bUseGtrFreq ? gtrfreq : NULL, fpLog); SetMLRates(/*IN/OUT*/NJ, nRateCats); LogMLRates(fpLog, NJ); } if (bConverged) break; } } if (MLnniToDo > 0) { /* This may help us converge faster, and is fast */ OptimizeAllBranchLengths(/*IN/OUT*/NJ); LogTree("ML_Lengths%d",1, fpLog, NJ, aln->names, unique, bQuote); } int iMLnni; double maxDelta; bool bConverged = false; for (iMLnni = 0; iMLnni < MLnniToDo; iMLnni++) { int changes = NNI(/*IN/OUT*/NJ, iMLnni, MLnniToDo, /*use ml*/true, /*IN/OUT*/nni_stats, /*OUT*/&maxDelta); LogTree("ML_NNI%d",iMLnni+1, fpLog, NJ, aln->names, unique, bQuote); double loglk = TreeLogLk(NJ, /*site_likelihoods*/NULL); bool bConvergedHere = (iMLnni > 0) && ((loglk < lastloglk + treeLogLkDelta) || maxDelta < treeLogLkDelta); if (verbose) fprintf(stderr, "ML-NNI round %d: LogLk %s= %.3f NNIs %d max delta %.2f Time %.2f%s\n", iMLnni+1, exactML || nCodes != 20 ? "" : "~", loglk, changes, maxDelta, clockDiff(&clock_start), bConverged ? " (final)" : ""); if (fpLog) fprintf(fpLog, "TreeLogLk\tML_NNI%d\t%.4lf\tMaxChange\t%.4lf\n", iMLnni+1, loglk, maxDelta); if (bConverged) break; /* we did our extra round */ if (bConvergedHere) bConverged = true; if (bConverged || iMLnni == MLnniToDo-2) { /* last round uses high-accuracy seettings -- reset NNI stats to tone down heuristics */ nni_stats = FreeNNIStats(nni_stats, NJ); nni_stats = InitNNIStats(NJ); if (verbose) fprintf(stderr, "Turning off heuristics for final round of ML NNIs%s\n", bConvergedHere? " (converged)" : ""); if (fpLog) fprintf(fpLog, "Turning off heuristics for final round of ML NNIs%s\n", bConvergedHere? " (converged)" : ""); } lastloglk = loglk; if (iMLnni == 0 && NJ->rates.nRateCategories == 1) { if (resetGtr) SetMLGtr(/*IN/OUT*/NJ, bUseGtrFreq ? gtrfreq : NULL, fpLog); SetMLRates(/*IN/OUT*/NJ, nRateCats); LogMLRates(fpLog, NJ); } } nni_stats = FreeNNIStats(nni_stats, NJ); /* This does not take long and improves the results */ if (MLnniToDo > 0) { OptimizeAllBranchLengths(/*IN/OUT*/NJ); LogTree("ML_Lengths%d",2, fpLog, NJ, aln->names, unique, bQuote); if (verbose || fpLog) { double loglk = TreeLogLk(NJ, /*site_likelihoods*/NULL); if (verbose) fprintf(stderr, "Optimize all lengths: LogLk %s= %.3f Time %.2f\n", exactML || nCodes != 20 ? "" : "~", loglk, clockDiff(&clock_start)); if (fpLog) { fprintf(fpLog, "TreeLogLk\tML_Lengths%d\t%.4f\n", 2, loglk); fflush(fpLog); } } } /* Count bad splits and compute SH-like supports if desired */ if ((MLnniToDo > 0 && !fastest) || nBootstrap > 0) TestSplitsML(NJ, /*OUT*/&splitcount, nBootstrap); /* Compute gamma-based likelihood? */ if (gammaLogLk && nRateCats > 1) { numeric_t *rates = MLSiteRates(nRateCats); double *site_loglk = MLSiteLikelihoodsByRate(NJ, rates, nRateCats); double scale = RescaleGammaLogLk(NJ->nPos, nRateCats, rates, /*IN*/site_loglk, /*OPTIONAL*/fpLog); rates = myfree(rates, sizeof(numeric_t) * nRateCats); site_loglk = myfree(site_loglk, sizeof(double) * nRateCats * NJ->nPos); for (i = 0; i < NJ->maxnodes; i++) NJ->branchlength[i] *= scale; } } else { /* Minimum evolution supports */ TestSplitsMinEvo(NJ, /*OUT*/&splitcount); if (nBootstrap > 0) ReliabilityNJ(NJ, nBootstrap); } for (i = 0; i < nFPs; i++) { FILE *fp = fps[i]; fprintf(fp, "Total time: %.2f seconds Unique: %d/%d Bad splits: %d/%d", clockDiff(&clock_start), NJ->nSeq, aln->nSeq, splitcount.nBadSplits, splitcount.nSplits); if (splitcount.dWorstDeltaUnconstrained > 0) fprintf(fp, " Worst %sdelta-%s %.3f", uniqConstraints != NULL ? "unconstrained " : "", (MLnniToDo > 0 || MLlen) ? "LogLk" : "Len", splitcount.dWorstDeltaUnconstrained); fprintf(fp,"\n"); if (NJ->nSeq > 3 && NJ->nConstraints > 0) { fprintf(fp, "Violating constraints: %d both bad: %d", splitcount.nConstraintViolations, splitcount.nBadBoth); if (splitcount.dWorstDeltaConstrained > 0) fprintf(fp, " Worst delta-%s due to constraints: %.3f", (MLnniToDo > 0 || MLlen) ? "LogLk" : "Len", splitcount.dWorstDeltaConstrained); fprintf(fp,"\n"); } if (verbose > 1 || fp == fpLog) { double dN2 = NJ->nSeq*(double)NJ->nSeq; fprintf(fp, "Dist/N**2: by-profile %.3f (out %.3f) by-leaf %.3f avg-prof %.3f\n", profileOps/dN2, outprofileOps/dN2, seqOps/dN2, profileAvgOps/dN2); if (nCloseUsed>0 || nClose2Used > 0 || nRefreshTopHits>0) fprintf(fp, "Top hits: close neighbors %ld/%d 2nd-level %ld refreshes %ld", nCloseUsed, NJ->nSeq, nClose2Used, nRefreshTopHits); if(!slow) fprintf(fp, " Hill-climb: %ld Update-best: %ld\n", nHillBetter, nVisibleUpdate); if (nniToDo > 0 || spr > 0 || MLnniToDo > 0) fprintf(fp, "NNI: %ld SPR: %ld ML-NNI: %ld\n", nNNI, nSPR, nML_NNI); if (MLnniToDo > 0) { fprintf(fp, "Max-lk operations: lk %ld posterior %ld", nLkCompute, nPosteriorCompute); if (nAAPosteriorExact > 0 || nAAPosteriorRough > 0) fprintf(fp, " approximate-posteriors %.2f%%", (100.0*nAAPosteriorRough)/(double)(nAAPosteriorExact+nAAPosteriorRough)); if (mlAccuracy < 2) fprintf(fp, " star-only %ld", nStarTests); fprintf(fp, "\n"); } } #ifdef TRACK_MEMORY fprintf(fp, "Memory: %.2f MB (%.1f byte/pos) ", maxmallocHeap/1.0e6, maxmallocHeap/(double)(aln->nSeq*(double)aln->nPos)); /* Only report numbers from before we do reliability estimates */ fprintf(fp, "profile-freq-alloc %ld avoided %.2f%%\n", svProfileFreqAlloc, svProfileFreqAvoid > 0 ? 100.0*svProfileFreqAvoid/(double)(svProfileFreqAlloc+svProfileFreqAvoid) : 0); #endif fflush(fp); } PrintNJ(fpOut, NJ, aln->names, unique, /*support*/nBootstrap > 0, bQuote); fflush(fpOut); if (fpLog) { fprintf(fpLog,"TreeCompleted\n"); fflush(fpLog); } FreeNJ(NJ); if (uniqConstraints != NULL) uniqConstraints = myfree(uniqConstraints, sizeof(char*) * unique->nUnique); constraints = FreeAlignment(constraints); unique = FreeUniquify(unique); } /* end build tree */ hashnames = FreeHashtable(hashnames); aln = FreeAlignment(aln); } /* end loop over alignments */ if (fpLog != NULL) fclose(fpLog); if (fpOut != stdout) fclose(fpOut); exit(0); } void ProgressReport(char *format, int i1, int i2, int i3, int i4) { static bool time_set = false; static struct timeval time_last; static struct timeval time_begin; if (!showProgress) return; static struct timeval time_now; gettimeofday(&time_now,NULL); if (!time_set) { time_begin = time_last = time_now; time_set = true; } static struct timeval elapsed; timeval_subtract(&elapsed,&time_now,&time_last); if (elapsed.tv_sec > 1 || elapsed.tv_usec > 100*1000 || verbose > 1) { timeval_subtract(&elapsed,&time_now,&time_begin); fprintf(stderr, "%7i.%2.2i seconds: ", (int)elapsed.tv_sec, (int)(elapsed.tv_usec/10000)); fprintf(stderr, format, i1, i2, i3, i4); if (verbose > 1 || !isatty(STDERR_FILENO)) { fprintf(stderr, "\n"); } else { fprintf(stderr, " \r"); } fflush(stderr); time_last = time_now; } } void LogMLRates(/*OPTIONAL WRITE*/FILE *fpLog, NJ_t *NJ) { if (fpLog != NULL) { rates_t *rates = &NJ->rates; fprintf(fpLog, "NCategories\t%d\nRates",rates->nRateCategories); assert(rates->nRateCategories > 0); int iRate; for (iRate = 0; iRate < rates->nRateCategories; iRate++) fprintf(fpLog, " %f", rates->rates[iRate]); fprintf(fpLog,"\nSiteCategories"); int iPos; for (iPos = 0; iPos < NJ->nPos; iPos++) { iRate = rates->ratecat[iPos]; fprintf(fpLog," %d",iRate+1); } fprintf(fpLog,"\n"); fflush(fpLog); } } void LogTree(char *format, int i, /*OPTIONAL WRITE*/FILE *fpLog, NJ_t *NJ, char **names, uniquify_t *unique, bool bQuote) { if(fpLog != NULL) { fprintf(fpLog, format, i); fprintf(fpLog, "\t"); PrintNJ(fpLog, NJ, names, unique, /*support*/false, bQuote); fflush(fpLog); } } NJ_t *InitNJ(char **sequences, int nSeq, int nPos, /*OPTIONAL*/char **constraintSeqs, int nConstraints, /*OPTIONAL*/distance_matrix_t *distance_matrix, /*OPTIONAL*/transition_matrix_t *transmat) { int iNode; NJ_t *NJ = (NJ_t*)mymalloc(sizeof(NJ_t)); NJ->root = -1; /* set at end of FastNJ() */ NJ->maxnode = NJ->nSeq = nSeq; NJ->nPos = nPos; NJ->maxnodes = 2*nSeq; NJ->seqs = sequences; NJ->distance_matrix = distance_matrix; NJ->transmat = transmat; NJ->nConstraints = nConstraints; NJ->constraintSeqs = constraintSeqs; NJ->profiles = (profile_t **)mymalloc(sizeof(profile_t*) * NJ->maxnodes); unsigned long counts[256]; int i; for (i = 0; i < 256; i++) counts[i] = 0; for (iNode = 0; iNode < NJ->nSeq; iNode++) { NJ->profiles[iNode] = SeqToProfile(NJ, NJ->seqs[iNode], nPos, constraintSeqs != NULL ? constraintSeqs[iNode] : NULL, nConstraints, iNode, /*IN/OUT*/counts); } unsigned long totCount = 0; for (i = 0; i < 256; i++) totCount += counts[i]; /* warnings about unknown characters */ for (i = 0; i < 256; i++) { if (counts[i] == 0 || i == '.' || i == '-') continue; unsigned char *codesP; bool bMatched = false; for (codesP = codesString; *codesP != '\0'; codesP++) { if (*codesP == i || tolower(*codesP) == i) { bMatched = true; break; } } if (!bMatched) fprintf(stderr, "Ignored unknown character %c (seen %lu times)\n", i, counts[i]); } /* warnings about the counts */ double fACGTUN = (counts['A'] + counts['C'] + counts['G'] + counts['T'] + counts['U'] + counts['N'] + counts['a'] + counts['c'] + counts['g'] + counts['t'] + counts['u'] + counts['n']) / (double)(totCount - counts['-'] - counts['.']); if (nCodes == 4 && fACGTUN < 0.9) fprintf(stderr, "WARNING! ONLY %.1f%% NUCLEOTIDE CHARACTERS -- IS THIS REALLY A NUCLEOTIDE ALIGNMENT?\n", 100.0 * fACGTUN); else if (nCodes == 20 && fACGTUN >= 0.9) fprintf(stderr, "WARNING! %.1f%% NUCLEOTIDE CHARACTERS -- IS THIS REALLY A PROTEIN ALIGNMENT?\n", 100.0 * fACGTUN); if(verbose>10) fprintf(stderr,"Made sequence profiles\n"); for (iNode = NJ->nSeq; iNode < NJ->maxnodes; iNode++) NJ->profiles[iNode] = NULL; /* not yet exists */ NJ->outprofile = OutProfile(NJ->profiles, NJ->nSeq, NJ->nPos, NJ->nConstraints, NJ->distance_matrix); if(verbose>10) fprintf(stderr,"Made out-profile\n"); NJ->totdiam = 0.0; NJ->diameter = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->diameter[iNode] = 0; NJ->varDiameter = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->varDiameter[iNode] = 0; NJ->selfdist = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->selfdist[iNode] = 0; NJ->selfweight = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); for (iNode = 0; iNode < NJ->nSeq; iNode++) NJ->selfweight[iNode] = NJ->nPos - NGaps(NJ,iNode); NJ->outDistances = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); NJ->nOutDistActive = (int *)mymalloc(sizeof(int)*NJ->maxnodes); for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->nOutDistActive[iNode] = NJ->nSeq * 10; /* unreasonably high value */ NJ->parent = NULL; /* so SetOutDistance ignores it */ for (iNode = 0; iNode < NJ->nSeq; iNode++) SetOutDistance(/*IN/UPDATE*/NJ, iNode, /*nActive*/NJ->nSeq); if (verbose>2) { for (iNode = 0; iNode < 4 && iNode < NJ->nSeq; iNode++) fprintf(stderr, "Node %d outdist %f\n", iNode, NJ->outDistances[iNode]); } NJ->parent = (int *)mymalloc(sizeof(int)*NJ->maxnodes); for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->parent[iNode] = -1; NJ->branchlength = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); /* distance to parent */ for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->branchlength[iNode] = 0; NJ->support = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->support[iNode] = -1.0; NJ->child = (children_t*)mymalloc(sizeof(children_t)*NJ->maxnodes); for (iNode= 0; iNode < NJ->maxnode; iNode++) NJ->child[iNode].nChild = 0; NJ->rates.nRateCategories = 0; NJ->rates.rates = NULL; NJ->rates.ratecat = NULL; AllocRateCategories(&NJ->rates, 1, NJ->nPos); return(NJ); } NJ_t *FreeNJ(NJ_t *NJ) { if (NJ==NULL) return(NJ); int i; for (i=0; i < NJ->maxnode; i++) NJ->profiles[i] = FreeProfile(NJ->profiles[i], NJ->nPos, NJ->nConstraints); NJ->profiles = myfree(NJ->profiles, sizeof(profile_t*) * NJ->maxnodes); NJ->outprofile = FreeProfile(NJ->outprofile, NJ->nPos, NJ->nConstraints); NJ->diameter = myfree(NJ->diameter, sizeof(numeric_t)*NJ->maxnodes); NJ->varDiameter = myfree(NJ->varDiameter, sizeof(numeric_t)*NJ->maxnodes); NJ->selfdist = myfree(NJ->selfdist, sizeof(numeric_t)*NJ->maxnodes); NJ->selfweight = myfree(NJ->selfweight, sizeof(numeric_t)*NJ->maxnodes); NJ->outDistances = myfree(NJ->outDistances, sizeof(numeric_t)*NJ->maxnodes); NJ->nOutDistActive = myfree(NJ->nOutDistActive, sizeof(int)*NJ->maxnodes); NJ->parent = myfree(NJ->parent, sizeof(int)*NJ->maxnodes); NJ->branchlength = myfree(NJ->branchlength, sizeof(numeric_t)*NJ->maxnodes); NJ->support = myfree(NJ->support, sizeof(numeric_t)*NJ->maxnodes); NJ->child = myfree(NJ->child, sizeof(children_t)*NJ->maxnodes); NJ->transmat = myfree(NJ->transmat, sizeof(transition_matrix_t)); AllocRateCategories(&NJ->rates, 0, NJ->nPos); return(myfree(NJ, sizeof(NJ_t))); } /* Allocate or reallocate the rate categories, and set every position to category 0 and every category's rate to 1.0 If nRateCategories=0, just deallocate */ void AllocRateCategories(/*IN/OUT*/rates_t *rates, int nRateCategories, int nPos) { assert(nRateCategories >= 0); rates->rates = myfree(rates->rates, sizeof(numeric_t)*rates->nRateCategories); rates->ratecat = myfree(rates->ratecat, sizeof(unsigned int)*nPos); rates->nRateCategories = nRateCategories; if (rates->nRateCategories > 0) { rates->rates = (numeric_t*)mymalloc(sizeof(numeric_t)*rates->nRateCategories); int i; for (i = 0; i < nRateCategories; i++) rates->rates[i] = 1.0; rates->ratecat = (unsigned int *)mymalloc(sizeof(unsigned int)*nPos); for (i = 0; i < nPos; i++) rates->ratecat[i] = 0; } } void FastNJ(NJ_t *NJ) { int iNode; assert(NJ->nSeq >= 1); if (NJ->nSeq < 3) { NJ->root = NJ->maxnode++; NJ->child[NJ->root].nChild = NJ->nSeq; for (iNode = 0; iNode < NJ->nSeq; iNode++) { NJ->parent[iNode] = NJ->root; NJ->child[NJ->root].child[iNode] = iNode; } if (NJ->nSeq == 1) { NJ->branchlength[0] = 0; } else { assert (NJ->nSeq == 2); besthit_t hit; SeqDist(NJ->profiles[0]->codes,NJ->profiles[1]->codes,NJ->nPos,NJ->distance_matrix,/*OUT*/&hit); NJ->branchlength[0] = hit.dist/2.0; NJ->branchlength[1] = hit.dist/2.0; } return; } /* else 3 or more sequences */ /* The visible set stores the best hit of each node (unless using top hits, in which case it is handled by the top hits routines) */ besthit_t *visible = NULL; /* Not used if doing top hits */ besthit_t *besthitNew = NULL; /* All hits of new node -- not used if doing top-hits */ /* The top-hits lists, with the key parameter m = length of each top-hit list */ top_hits_t *tophits = NULL; int m = 0; /* maximum length of a top-hits list */ if (tophitsMult > 0) { m = (int)(0.5 + tophitsMult*sqrt(NJ->nSeq)); if(m<4 || 2*m >= NJ->nSeq) { m=0; if(verbose>1) fprintf(stderr,"Too few leaves, turning off top-hits\n"); } else { if(verbose>2) fprintf(stderr,"Top-hit-list size = %d of %d\n", m, NJ->nSeq); } } assert(!(slow && m>0)); /* Initialize top-hits or visible set */ if (m>0) { tophits = InitTopHits(NJ, m); SetAllLeafTopHits(/*IN/UPDATE*/NJ, /*OUT*/tophits); ResetTopVisible(/*IN/UPDATE*/NJ, /*nActive*/NJ->nSeq, /*IN/OUT*/tophits); } else if (!slow) { visible = (besthit_t*)mymalloc(sizeof(besthit_t)*NJ->maxnodes); besthitNew = (besthit_t*)mymalloc(sizeof(besthit_t)*NJ->maxnodes); for (iNode = 0; iNode < NJ->nSeq; iNode++) SetBestHit(iNode, NJ, /*nActive*/NJ->nSeq, /*OUT*/&visible[iNode], /*OUT IGNORED*/NULL); } /* Iterate over joins */ int nActiveOutProfileReset = NJ->nSeq; int nActive; for (nActive = NJ->nSeq; nActive > 3; nActive--) { int nJoinsDone = NJ->nSeq - nActive; if (nJoinsDone > 0 && (nJoinsDone % 100) == 0) ProgressReport("Joined %6d of %6d", nJoinsDone, NJ->nSeq-3, 0, 0); besthit_t join; /* the join to do */ if (slow) { ExhaustiveNJSearch(NJ,nActive,/*OUT*/&join); } else if (m>0) { TopHitNJSearch(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, /*OUT*/&join); } else { FastNJSearch(NJ, nActive, /*IN/OUT*/visible, /*OUT*/&join); } if (verbose>2) { double penalty = constraintWeight * (double)JoinConstraintPenalty(NJ, join.i, join.j); if (penalty > 0.001) { fprintf(stderr, "Constraint violation during neighbor-joining %d %d into %d penalty %.3f\n", join.i, join.j, NJ->maxnode, penalty); int iC; for (iC = 0; iC < NJ->nConstraints; iC++) { int local = JoinConstraintPenaltyPiece(NJ, join.i, join.j, iC); if (local > 0) fprintf(stderr, "Constraint %d piece %d %d/%d %d/%d %d/%d\n", iC, local, NJ->profiles[join.i]->nOn[iC], NJ->profiles[join.i]->nOff[iC], NJ->profiles[join.j]->nOn[iC], NJ->profiles[join.j]->nOff[iC], NJ->outprofile->nOn[iC] - NJ->profiles[join.i]->nOn[iC] - NJ->profiles[join.j]->nOn[iC], NJ->outprofile->nOff[iC] - NJ->profiles[join.i]->nOff[iC] - NJ->profiles[join.j]->nOff[iC]); } } } /* because of the stale out-distance heuristic, make sure that these are up-to-date */ SetOutDistance(NJ, join.i, nActive); SetOutDistance(NJ, join.j, nActive); /* Make sure weight is set and criterion is up to date */ SetDistCriterion(NJ, nActive, /*IN/OUT*/&join); assert(NJ->nOutDistActive[join.i] == nActive); assert(NJ->nOutDistActive[join.j] == nActive); int newnode = NJ->maxnode++; NJ->parent[join.i] = newnode; NJ->parent[join.j] = newnode; NJ->child[newnode].nChild = 2; NJ->child[newnode].child[0] = join.i < join.j ? join.i : join.j; NJ->child[newnode].child[1] = join.i > join.j ? join.i : join.j; double rawIJ = join.dist + NJ->diameter[join.i] + NJ->diameter[join.j]; double distIJ = join.dist; double deltaDist = (NJ->outDistances[join.i]-NJ->outDistances[join.j])/(double)(nActive-2); NJ->branchlength[join.i] = (distIJ + deltaDist)/2; NJ->branchlength[join.j] = (distIJ - deltaDist)/2; double bionjWeight = 0.5; /* IJ = bionjWeight*I + (1-bionjWeight)*J */ double varIJ = rawIJ - NJ->varDiameter[join.i] - NJ->varDiameter[join.j]; if (bionj && join.weight > 0.01 && varIJ > 0.001) { /* Set bionjWeight according to the BIONJ formula, where the variance matrix is approximated by Vij = ProfileVar(i,j) - varDiameter(i) - varDiameter(j) ProfileVar(i,j) = distance(i,j) = top(i,j)/weight(i,j) (The node's distance diameter does not affect the variances.) The BIONJ formula is equation 9 from Gascuel 1997: bionjWeight = 1/2 + sum(k!=i,j) (Vjk - Vik) / ((nActive-2)*Vij) sum(k!=i,j) (Vjk - Vik) = sum(k!=i,j) Vik - varDiameter(j) + varDiameter(i) = sum(k!=i,j) ProfileVar(j,k) - sum(k!=i,j) ProfileVar(i,k) + (nActive-2)*(varDiameter(i)-varDiameter(j)) sum(k!=i,j) ProfileVar(i,k) ~= (sum(k!=i,j) distance(i,k) * weight(i,k))/(mean(k!=i,j) weight(i,k)) ~= (N-2) * top(i, Out-i-j) / weight(i, Out-i-j) weight(i, Out-i-j) = N*weight(i,Out) - weight(i,i) - weight(i,j) top(i, Out-i-j) = N*top(i,Out) - top(i,i) - top(i,j) */ besthit_t outI; besthit_t outJ; ProfileDist(NJ->profiles[join.i],NJ->outprofile,NJ->nPos,NJ->distance_matrix,/*OUT*/&outI); ProfileDist(NJ->profiles[join.j],NJ->outprofile,NJ->nPos,NJ->distance_matrix,/*OUT*/&outJ); outprofileOps += 2; double varIWeight = (nActive * outI.weight - NJ->selfweight[join.i] - join.weight); double varJWeight = (nActive * outJ.weight - NJ->selfweight[join.j] - join.weight); double varITop = outI.dist * outI.weight * nActive - NJ->selfdist[join.i] * NJ->selfweight[join.i] - rawIJ * join.weight; double varJTop = outJ.dist * outJ.weight * nActive - NJ->selfdist[join.j] * NJ->selfweight[join.j] - rawIJ * join.weight; double deltaProfileVarOut = (nActive-2) * (varJTop/varJWeight - varITop/varIWeight); double deltaVarDiam = (nActive-2)*(NJ->varDiameter[join.i] - NJ->varDiameter[join.j]); if (varJWeight > 0.01 && varIWeight > 0.01) bionjWeight = 0.5 + (deltaProfileVarOut+deltaVarDiam)/(2*(nActive-2)*varIJ); if(bionjWeight<0) bionjWeight=0; if(bionjWeight>1) bionjWeight=1; if (verbose>2) fprintf(stderr,"dVarO %f dVarDiam %f varIJ %f from dist %f weight %f (pos %d) bionjWeight %f %f\n", deltaProfileVarOut, deltaVarDiam, varIJ, join.dist, join.weight, NJ->nPos, bionjWeight, 1-bionjWeight); if (verbose>3 && (newnode%5) == 0) { /* Compare weight estimated from outprofiles from weight made by summing over other nodes */ double deltaProfileVarTot = 0; for (iNode = 0; iNode < newnode; iNode++) { if (NJ->parent[iNode] < 0) { /* excludes join.i, join.j */ besthit_t di, dj; ProfileDist(NJ->profiles[join.i],NJ->profiles[iNode],NJ->nPos,NJ->distance_matrix,/*OUT*/&di); ProfileDist(NJ->profiles[join.j],NJ->profiles[iNode],NJ->nPos,NJ->distance_matrix,/*OUT*/&dj); deltaProfileVarTot += dj.dist - di.dist; } } double lambdaTot = 0.5 + (deltaProfileVarTot+deltaVarDiam)/(2*(nActive-2)*varIJ); if (lambdaTot < 0) lambdaTot = 0; if (lambdaTot > 1) lambdaTot = 1; if (fabs(bionjWeight-lambdaTot) > 0.01 || verbose > 4) fprintf(stderr, "deltaProfileVar actual %.6f estimated %.6f lambda actual %.3f estimated %.3f\n", deltaProfileVarTot,deltaProfileVarOut,lambdaTot,bionjWeight); } } if (verbose > 2) fprintf(stderr, "Join\t%d\t%d\t%.6f\tlambda\t%.6f\tselfw\t%.3f\t%.3f\tnew\t%d\n", join.i < join.j ? join.i : join.j, join.i < join.j ? join.j : join.i, join.criterion, bionjWeight, NJ->selfweight[join.i < join.j ? join.i : join.j], NJ->selfweight[join.i < join.j ? join.j : join.i], newnode); NJ->diameter[newnode] = bionjWeight * (NJ->branchlength[join.i] + NJ->diameter[join.i]) + (1-bionjWeight) * (NJ->branchlength[join.j] + NJ->diameter[join.j]); NJ->varDiameter[newnode] = bionjWeight * NJ->varDiameter[join.i] + (1-bionjWeight) * NJ->varDiameter[join.j] + bionjWeight * (1-bionjWeight) * varIJ; NJ->profiles[newnode] = AverageProfile(NJ->profiles[join.i],NJ->profiles[join.j], NJ->nPos, NJ->nConstraints, NJ->distance_matrix, bionj ? bionjWeight : /*noweight*/-1.0); /* Update out-distances and total diameters */ int changedActiveOutProfile = nActiveOutProfileReset - (nActive-1); if (changedActiveOutProfile >= nResetOutProfile && changedActiveOutProfile >= fResetOutProfile * nActiveOutProfileReset) { /* Recompute the outprofile from scratch to avoid roundoff error */ profile_t **activeProfiles = (profile_t**)mymalloc(sizeof(profile_t*)*(nActive-1)); int nSaved = 0; NJ->totdiam = 0; for (iNode=0;iNode<NJ->maxnode;iNode++) { if (NJ->parent[iNode]<0) { assert(nSaved < nActive-1); activeProfiles[nSaved++] = NJ->profiles[iNode]; NJ->totdiam += NJ->diameter[iNode]; } } assert(nSaved==nActive-1); FreeProfile(NJ->outprofile, NJ->nPos, NJ->nConstraints); if(verbose>2) fprintf(stderr,"Recomputing outprofile %d %d\n",nActiveOutProfileReset,nActive-1); NJ->outprofile = OutProfile(activeProfiles, nSaved, NJ->nPos, NJ->nConstraints, NJ->distance_matrix); activeProfiles = myfree(activeProfiles, sizeof(profile_t*)*(nActive-1)); nActiveOutProfileReset = nActive-1; } else { UpdateOutProfile(/*OUT*/NJ->outprofile, NJ->profiles[join.i], NJ->profiles[join.j], NJ->profiles[newnode], nActive, NJ->nPos, NJ->nConstraints, NJ->distance_matrix); NJ->totdiam += NJ->diameter[newnode] - NJ->diameter[join.i] - NJ->diameter[join.j]; } /* Store self-dist for use in other computations */ besthit_t selfdist; ProfileDist(NJ->profiles[newnode],NJ->profiles[newnode],NJ->nPos,NJ->distance_matrix,/*OUT*/&selfdist); NJ->selfdist[newnode] = selfdist.dist; NJ->selfweight[newnode] = selfdist.weight; /* Find the best hit of the joined node IJ */ if (m>0) { TopHitJoin(newnode, /*IN/UPDATE*/NJ, nActive-1, /*IN/OUT*/tophits); } else { /* Not using top-hits, so we update all out-distances */ for (iNode = 0; iNode < NJ->maxnode; iNode++) { if (NJ->parent[iNode] < 0) { /* True nActive is now nActive-1 */ SetOutDistance(/*IN/UPDATE*/NJ, iNode, nActive-1); } } if(visible != NULL) { SetBestHit(newnode, NJ, nActive-1, /*OUT*/&visible[newnode], /*OUT OPTIONAL*/besthitNew); if (verbose>2) fprintf(stderr,"Visible %d %d %f %f\n", visible[newnode].i, visible[newnode].j, visible[newnode].dist, visible[newnode].criterion); if (besthitNew != NULL) { /* Use distances to new node to update visible set entries that are non-optimal */ for (iNode = 0; iNode < NJ->maxnode; iNode++) { if (NJ->parent[iNode] >= 0 || iNode == newnode) continue; int iOldVisible = visible[iNode].j; assert(iOldVisible>=0); assert(visible[iNode].i == iNode); /* Update the criterion; use nActive-1 because haven't decremented nActive yet */ if (NJ->parent[iOldVisible] < 0) SetCriterion(/*IN/OUT*/NJ, nActive-1, &visible[iNode]); if (NJ->parent[iOldVisible] >= 0 || besthitNew[iNode].criterion < visible[iNode].criterion) { if(verbose>3) fprintf(stderr,"Visible %d reset from %d to %d (%f vs. %f)\n", iNode, iOldVisible, newnode, visible[iNode].criterion, besthitNew[iNode].criterion); if(NJ->parent[iOldVisible] < 0) nVisibleUpdate++; visible[iNode].j = newnode; visible[iNode].dist = besthitNew[iNode].dist; visible[iNode].criterion = besthitNew[iNode].criterion; } } /* end loop over all nodes */ } /* end if recording all hits of new node */ } /* end if keeping a visible set */ } /* end else (m==0) */ } /* end loop over nActive */ #ifdef TRACK_MEMORY if (verbose>1) { struct mallinfo mi = mallinfo(); fprintf(stderr, "Memory @ end of FastNJ(): %.2f MB (%.1f byte/pos) useful %.2f expected %.2f\n", (mi.arena+mi.hblkhd)/1.0e6, (mi.arena+mi.hblkhd)/(double)(NJ->nSeq*(double)NJ->nPos), mi.uordblks/1.0e6, mymallocUsed/1e6); } #endif /* We no longer need the tophits, visible set, etc. */ if (visible != NULL) visible = myfree(visible,sizeof(besthit_t)*NJ->maxnodes); if (besthitNew != NULL) besthitNew = myfree(besthitNew,sizeof(besthit_t)*NJ->maxnodes); tophits = FreeTopHits(tophits); /* Add a root for the 3 remaining nodes */ int top[3]; int nTop = 0; for (iNode = 0; iNode < NJ->maxnode; iNode++) { if (NJ->parent[iNode] < 0) { assert(nTop <= 2); top[nTop++] = iNode; } } assert(nTop==3); NJ->root = NJ->maxnode++; NJ->child[NJ->root].nChild = 3; for (nTop = 0; nTop < 3; nTop++) { NJ->parent[top[nTop]] = NJ->root; NJ->child[NJ->root].child[nTop] = top[nTop]; } besthit_t dist01, dist02, dist12; ProfileDist(NJ->profiles[top[0]], NJ->profiles[top[1]], NJ->nPos, NJ->distance_matrix, /*OUT*/&dist01); ProfileDist(NJ->profiles[top[0]], NJ->profiles[top[2]], NJ->nPos, NJ->distance_matrix, /*OUT*/&dist02); ProfileDist(NJ->profiles[top[1]], NJ->profiles[top[2]], NJ->nPos, NJ->distance_matrix, /*OUT*/&dist12); double d01 = dist01.dist - NJ->diameter[top[0]] - NJ->diameter[top[1]]; double d02 = dist02.dist - NJ->diameter[top[0]] - NJ->diameter[top[2]]; double d12 = dist12.dist - NJ->diameter[top[1]] - NJ->diameter[top[2]]; NJ->branchlength[top[0]] = (d01 + d02 - d12)/2; NJ->branchlength[top[1]] = (d01 + d12 - d02)/2; NJ->branchlength[top[2]] = (d02 + d12 - d01)/2; /* Check how accurate the outprofile is */ if (verbose>2) { profile_t *p[3] = {NJ->profiles[top[0]], NJ->profiles[top[1]], NJ->profiles[top[2]]}; profile_t *out = OutProfile(p, 3, NJ->nPos, NJ->nConstraints, NJ->distance_matrix); int i; double freqerror = 0; double weighterror = 0; for (i=0;i<NJ->nPos;i++) { weighterror += fabs(out->weights[i] - NJ->outprofile->weights[i]); int k; for(k=0;k<nCodes;k++) freqerror += fabs(out->vectors[nCodes*i+k] - NJ->outprofile->vectors[nCodes*i+k]); } fprintf(stderr,"Roundoff error in outprofile@end: WeightError %f FreqError %f\n", weighterror, freqerror); FreeProfile(out, NJ->nPos, NJ->nConstraints); } return; } void ExhaustiveNJSearch(NJ_t *NJ, int nActive, /*OUT*/besthit_t *join) { join->i = -1; join->j = -1; join->weight = 0; join->dist = 1e20; join->criterion = 1e20; double bestCriterion = 1e20; int i, j; for (i = 0; i < NJ->maxnode-1; i++) { if (NJ->parent[i] < 0) { for (j = i+1; j < NJ->maxnode; j++) { if (NJ->parent[j] < 0) { besthit_t hit; hit.i = i; hit.j = j; SetDistCriterion(NJ, nActive, /*IN/OUT*/&hit); if (hit.criterion < bestCriterion) { *join = hit; bestCriterion = hit.criterion; } } } } } assert (join->i >= 0 && join->j >= 0); } void FastNJSearch(NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *besthits, /*OUT*/besthit_t *join) { join->i = -1; join->j = -1; join->dist = 1e20; join->weight = 0; join->criterion = 1e20; int iNode; for (iNode = 0; iNode < NJ->maxnode; iNode++) { int jNode = besthits[iNode].j; if (NJ->parent[iNode] < 0 && NJ->parent[jNode] < 0) { /* both i and j still active */ /* recompute criterion to reflect the current out-distances */ SetCriterion(NJ, nActive, /*IN/OUT*/&besthits[iNode]); if (besthits[iNode].criterion < join->criterion) *join = besthits[iNode]; } } if(!fastest) { int changed; do { changed = 0; assert(join->i >= 0 && join->j >= 0); SetBestHit(join->i, NJ, nActive, /*OUT*/&besthits[join->i], /*OUT IGNORED*/NULL); if (besthits[join->i].j != join->j) { changed = 1; if (verbose>2) fprintf(stderr,"BetterI\t%d\t%d\t%d\t%d\t%f\t%f\n", join->i,join->j,besthits[join->i].i,besthits[join->i].j, join->criterion,besthits[join->i].criterion); } /* Save the best hit either way, because the out-distance has probably changed since we started the computation. */ join->j = besthits[join->i].j; join->weight = besthits[join->i].weight; join->dist = besthits[join->i].dist; join->criterion = besthits[join->i].criterion; SetBestHit(join->j, NJ, nActive, /*OUT*/&besthits[join->j], /*OUT IGNORE*/NULL); if (besthits[join->j].j != join->i) { changed = 1; if (verbose>2) fprintf(stderr,"BetterJ\t%d\t%d\t%d\t%d\t%f\t%f\n", join->i,join->j,besthits[join->j].i,besthits[join->j].j, join->criterion,besthits[join->j].criterion); join->i = besthits[join->j].j; join->weight = besthits[join->j].weight; join->dist = besthits[join->j].dist; join->criterion = besthits[join->j].criterion; } if(changed) nHillBetter++; } while(changed); } } /* A token is one of ():;, or an alphanumeric string without whitespace Any whitespace between tokens is ignored */ char *ReadTreeToken(FILE *fp) { static char buf[BUFFER_SIZE]; int len = 0; int c; for (c = fgetc(fp); c != EOF; c = fgetc(fp)) { if (c == '(' || c == ')' || c == ':' || c == ';' || c == ',') { /* standalone token */ if (len == 0) { buf[len++] = c; buf[len] = '\0'; return(buf); } else { ungetc(c, fp); buf[len] = '\0'; return(buf); } } else if (isspace(c)) { if (len > 0) { buf[len] = '\0'; return(buf); } /* else ignore whitespace at beginning of token */ } else { /* not whitespace or standalone token */ buf[len++] = c; if (len >= BUFFER_SIZE) { buf[BUFFER_SIZE-1] = '\0'; fprintf(stderr, "Token too long in tree file, token begins with\n%s\n", buf); exit(1); } } } if (len > 0) { /* return the token we have so far */ buf[len] = '\0'; return(buf); } /* else */ return(NULL); } void ReadTreeError(char *err, char *token) { fprintf(stderr, "Tree parse error: unexpected token '%s' -- %s\n", token == NULL ? "(End of file)" : token, err); exit(1); } void ReadTreeAddChild(int parent, int child, /*IN/OUT*/int *parents, /*IN/OUT*/children_t *children) { assert(parent >= 0); assert(child >= 0); assert(parents[child] < 0); assert(children[parent].nChild < 3); parents[child] = parent; children[parent].child[children[parent].nChild++] = child; } void ReadTreeMaybeAddLeaf(int parent, char *name, hashstrings_t *hashnames, uniquify_t *unique, /*IN/OUT*/int *parents, /*IN/OUT*/children_t *children) { hashiterator_t hi = FindMatch(hashnames,name); if (HashCount(hashnames,hi) != 1) ReadTreeError("not recognized as a sequence name", name); int iSeqNonunique = HashFirst(hashnames,hi); assert(iSeqNonunique >= 0 && iSeqNonunique < unique->nSeq); int iSeqUnique = unique->alnToUniq[iSeqNonunique]; assert(iSeqUnique >= 0 && iSeqUnique < unique->nUnique); /* Either record this leaves' parent (if it is -1) or ignore this leaf (if already seen) */ if (parents[iSeqUnique] < 0) { ReadTreeAddChild(parent, iSeqUnique, /*IN/OUT*/parents, /*IN/OUT*/children); if(verbose > 5) fprintf(stderr, "Found leaf uniq%d name %s child of %d\n", iSeqUnique, name, parent); } else { if (verbose > 5) fprintf(stderr, "Skipped redundant leaf uniq%d name %s\n", iSeqUnique, name); } } void ReadTreeRemove(/*IN/OUT*/int *parents, /*IN/OUT*/children_t *children, int node) { if(verbose > 5) fprintf(stderr,"Removing node %d parent %d\n", node, parents[node]); assert(parents[node] >= 0); int parent = parents[node]; parents[node] = -1; children_t *pc = &children[parent]; int oldn; for (oldn = 0; oldn < pc->nChild; oldn++) { if (pc->child[oldn] == node) break; } assert(oldn < pc->nChild); /* move successor nodes back in child list and shorten list */ int i; for (i = oldn; i < pc->nChild-1; i++) pc->child[i] = pc->child[i+1]; pc->nChild--; /* add its children to parent's child list */ children_t *nc = &children[node]; if (nc->nChild > 0) { assert(nc->nChild<=2); assert(pc->nChild < 3); assert(pc->nChild + nc->nChild <= 3); int j; for (j = 0; j < nc->nChild; j++) { if(verbose > 5) fprintf(stderr,"Repointing parent %d to child %d\n", parent, nc->child[j]); pc->child[pc->nChild++] = nc->child[j]; parents[nc->child[j]] = parent; } nc->nChild = 0; } } void ReadTree(/*IN/OUT*/NJ_t *NJ, /*IN*/uniquify_t *unique, /*IN*/hashstrings_t *hashnames, /*READ*/FILE *fpInTree) { assert(NJ->nSeq == unique->nUnique); /* First, do a preliminary parse of the tree to with non-unique leaves ignored We need to store this separately from NJ because it may have too many internal nodes (matching sequences show up once in the NJ but could be in multiple places in the tree) Will use iUnique as the index of nodes, as in the NJ structure */ int maxnodes = unique->nSeq*2; int maxnode = unique->nSeq; int *parent = (int*)mymalloc(sizeof(int)*maxnodes); children_t *children = (children_t *)mymalloc(sizeof(children_t)*maxnodes); int root = maxnode++; int i; for (i = 0; i < maxnodes; i++) { parent[i] = -1; children[i].nChild = 0; } /* The stack is the current path to the root, with the root at the first (top) position */ int stack_size = 1; int *stack = (int*)mymalloc(sizeof(int)*maxnodes); stack[0] = root; int nDown = 0; int nUp = 0; char *token; token = ReadTreeToken(fpInTree); if (token == NULL || *token != '(') ReadTreeError("No '(' at start", token); /* nDown is still 0 because we have created the root */ while ((token = ReadTreeToken(fpInTree)) != NULL) { if (nDown > 0) { /* In a stream of parentheses */ if (*token == '(') nDown++; else if (*token == ',' || *token == ';' || *token == ':' || *token == ')') ReadTreeError("while reading parentheses", token); else { /* Add intermediate nodes if nDown was > 1 (for nDown=1, the only new node is the leaf) */ while (nDown-- > 0) { int new = maxnode++; assert(new < maxnodes); ReadTreeAddChild(stack[stack_size-1], new, /*IN/OUT*/parent, /*IN/OUT*/children); if(verbose > 5) fprintf(stderr, "Added internal child %d of %d, stack size increase to %d\n", new, stack[stack_size-1],stack_size+1); stack[stack_size++] = new; assert(stack_size < maxnodes); } ReadTreeMaybeAddLeaf(stack[stack_size-1], token, hashnames, unique, /*IN/OUT*/parent, /*IN/OUT*/children); } } else if (nUp > 0) { if (*token == ';') { /* end the tree? */ if (nUp != stack_size) ReadTreeError("unbalanced parentheses", token); else break; } else if (*token == ')') nUp++; else if (*token == '(') ReadTreeError("unexpected '(' after ')'", token); else if (*token == ':') { token = ReadTreeToken(fpInTree); /* Read the branch length and ignore it */ if (token == NULL || (*token != '-' && !isdigit(*token))) ReadTreeError("not recognized as a branch length", token); } else if (*token == ',') { /* Go back up the stack the correct #times */ while (nUp-- > 0) { stack_size--; if(verbose > 5) fprintf(stderr, "Up to nUp=%d stack size %d at %d\n", nUp, stack_size, stack[stack_size-1]); if (stack_size <= 0) ReadTreeError("too many ')'", token); } nUp = 0; } else if (*token == '-' || isdigit(*token)) ; /* ignore bootstrap value */ else fprintf(stderr, "Warning while parsing tree: non-numeric label %s for internal node\n", token); } else if (*token == '(') { nDown = 1; } else if (*token == ')') { nUp = 1; } else if (*token == ':') { token = ReadTreeToken(fpInTree); if (token == NULL || (*token != '-' && !isdigit(*token))) ReadTreeError("not recognized as a branch length", token); } else if (*token == ',') { ; /* do nothing */ } else if (*token == ';') ReadTreeError("unexpected token", token); else ReadTreeMaybeAddLeaf(stack[stack_size-1], token, hashnames, unique, /*IN/OUT*/parent, /*IN/OUT*/children); } /* Verify that all sequences were seen */ for (i = 0; i < unique->nUnique; i++) { if (parent[i] < 0) { fprintf(stderr, "Alignment sequence %d (unique %d) absent from input tree\n" "The starting tree (the argument to -intree) must include all sequences in the alignment!\n", unique->uniqueFirst[i], i); exit(1); } } /* Simplify the tree -- remove all internal nodes with < 2 children Keep trying until no nodes get removed */ int nRemoved; do { nRemoved = 0; /* Here stack is the list of nodes we haven't visited yet while doing a tree traversal */ stack_size = 1; stack[0] = root; while (stack_size > 0) { int node = stack[--stack_size]; if (node >= unique->nUnique) { /* internal node */ if (children[node].nChild <= 1) { if (node != root) { ReadTreeRemove(/*IN/OUT*/parent,/*IN/OUT*/children,node); nRemoved++; } else if (node == root && children[node].nChild == 1) { int newroot = children[node].child[0]; parent[newroot] = -1; children[root].nChild = 0; nRemoved++; if(verbose > 5) fprintf(stderr,"Changed root from %d to %d\n",root,newroot); root = newroot; stack[stack_size++] = newroot; } } else { int j; for (j = 0; j < children[node].nChild; j++) { assert(stack_size < maxnodes); stack[stack_size++] = children[node].child[j]; if(verbose > 5) fprintf(stderr,"Added %d to stack\n", stack[stack_size-1]); } } } } } while (nRemoved > 0); /* Simplify the root node to 3 children if it has 2 */ if (children[root].nChild == 2) { for (i = 0; i < 2; i++) { int child = children[root].child[i]; assert(child >= 0 && child < maxnodes); if (children[child].nChild == 2) { ReadTreeRemove(parent,children,child); /* replace root -> child -> A,B with root->A,B */ break; } } } for (i = 0; i < maxnodes; i++) if(verbose > 5) fprintf(stderr,"Simplfied node %d has parent %d nchild %d\n", i, parent[i], children[i].nChild); /* Map the remaining internal nodes to NJ nodes */ int *map = (int*)mymalloc(sizeof(int)*maxnodes); for (i = 0; i < unique->nUnique; i++) map[i] = i; for (i = unique->nUnique; i < maxnodes; i++) map[i] = -1; stack_size = 1; stack[0] = root; while (stack_size > 0) { int node = stack[--stack_size]; if (node >= unique->nUnique) { /* internal node */ assert(node == root || children[node].nChild > 1); map[node] = NJ->maxnode++; for (i = 0; i < children[node].nChild; i++) { assert(stack_size < maxnodes); stack[stack_size++] = children[node].child[i]; } } } for (i = 0; i < maxnodes; i++) if(verbose > 5) fprintf(stderr,"Map %d to %d (parent %d nchild %d)\n", i, map[i], parent[i], children[i].nChild); /* Set NJ->parent, NJ->children, NJ->root */ NJ->root = map[root]; int node; for (node = 0; node < maxnodes; node++) { int njnode = map[node]; if (njnode >= 0) { NJ->child[njnode].nChild = children[node].nChild; for (i = 0; i < children[node].nChild; i++) { assert(children[node].child[i] >= 0 && children[node].child[i] < maxnodes); NJ->child[njnode].child[i] = map[children[node].child[i]]; } if (parent[node] >= 0) NJ->parent[njnode] = map[parent[node]]; } } /* Make sure that parent/child relationships match */ for (i = 0; i < NJ->maxnode; i++) { children_t *c = &NJ->child[i]; int j; for (j = 0; j < c->nChild;j++) assert(c->child[j] >= 0 && c->child[j] < NJ->maxnode && NJ->parent[c->child[j]] == i); } assert(NJ->parent[NJ->root] < 0); map = myfree(map,sizeof(int)*maxnodes); stack = myfree(stack,sizeof(int)*maxnodes); children = myfree(children,sizeof(children_t)*maxnodes); parent = myfree(parent,sizeof(int)*maxnodes); /* Compute profiles as balanced -- the NNI stage will recompute these profiles anyway */ traversal_t traversal = InitTraversal(NJ); node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { if (node >= NJ->nSeq && node != NJ->root) SetProfile(/*IN/OUT*/NJ, node, /*noweight*/-1.0); } traversal = FreeTraversal(traversal,NJ); } /* Print topology using node indices as node names */ void PrintNJInternal(FILE *fp, NJ_t *NJ, bool useLen) { if (NJ->nSeq < 4) { return; } typedef struct { int node; int end; } stack_t; stack_t *stack = (stack_t *)mymalloc(sizeof(stack_t)*NJ->maxnodes); int stackSize = 1; stack[0].node = NJ->root; stack[0].end = 0; while(stackSize>0) { stack_t *last = &stack[stackSize-1]; stackSize--; /* Save last, as we are about to overwrite it */ int node = last->node; int end = last->end; if (node < NJ->nSeq) { if (NJ->child[NJ->parent[node]].child[0] != node) fputs(",",fp); fprintf(fp, "%d", node); if (useLen) fprintf(fp, ":%.4f", NJ->branchlength[node]); } else if (end) { fprintf(fp, ")%d", node); if (useLen) fprintf(fp, ":%.4f", NJ->branchlength[node]); } else { if (node != NJ->root && NJ->child[NJ->parent[node]].child[0] != node) fprintf(fp, ","); fprintf(fp, "("); stackSize++; stack[stackSize-1].node = node; stack[stackSize-1].end = 1; children_t *c = &NJ->child[node]; /* put children on in reverse order because we use the last one first */ int i; for (i = c->nChild-1; i >=0; i--) { stackSize++; stack[stackSize-1].node = c->child[i]; stack[stackSize-1].end = 0; } } } fprintf(fp, ";\n"); stack = myfree(stack, sizeof(stack_t)*NJ->maxnodes); } void PrintNJ(FILE *fp, NJ_t *NJ, char **names, uniquify_t *unique, bool bShowSupport, bool bQuote) { /* And print the tree: depth first search * The stack contains * list of remaining children with their depth * parent node, with a flag of -1 so I know to print right-paren */ if (NJ->nSeq==1 && unique->alnNext[unique->uniqueFirst[0]] >= 0) { /* Special case -- otherwise we end up with double parens */ int first = unique->uniqueFirst[0]; assert(first >= 0 && first < unique->nSeq); fprintf(fp, bQuote ? "('%s':0.0" : "(%s:0.0", names[first]); int iName = unique->alnNext[first]; while (iName >= 0) { assert(iName < unique->nSeq); fprintf(fp, bQuote ? ",'%s':0.0" : ",%s:0.0", names[iName]); iName = unique->alnNext[iName]; } fprintf(fp,");\n"); return; } typedef struct { int node; int end; } stack_t; stack_t *stack = (stack_t *)mymalloc(sizeof(stack_t)*NJ->maxnodes); int stackSize = 1; stack[0].node = NJ->root; stack[0].end = 0; while(stackSize>0) { stack_t *last = &stack[stackSize-1]; stackSize--; /* Save last, as we are about to overwrite it */ int node = last->node; int end = last->end; if (node < NJ->nSeq) { if (NJ->child[NJ->parent[node]].child[0] != node) fputs(",",fp); int first = unique->uniqueFirst[node]; assert(first >= 0 && first < unique->nSeq); /* Print the name, or the subtree of duplicate names */ if (unique->alnNext[first] == -1) { fprintf(fp, bQuote ? "'%s'" : "%s", names[first]); } else { fprintf(fp, bQuote ? "('%s':0.0" : "(%s:0.0", names[first]); int iName = unique->alnNext[first]; while (iName >= 0) { assert(iName < unique->nSeq); fprintf(fp, bQuote ? ",'%s':0.0" : ",%s:0.0", names[iName]); iName = unique->alnNext[iName]; } fprintf(fp,")"); } /* Print the branch length */ #ifdef USE_DOUBLE #define FP_FORMAT "%.9f" #else #define FP_FORMAT "%.5f" #endif fprintf(fp, ":" FP_FORMAT, NJ->branchlength[node]); } else if (end) { if (node == NJ->root) fprintf(fp, ")"); else if (bShowSupport) fprintf(fp, ")%.3f:" FP_FORMAT, NJ->support[node], NJ->branchlength[node]); else fprintf(fp, "):" FP_FORMAT, NJ->branchlength[node]); } else { if (node != NJ->root && NJ->child[NJ->parent[node]].child[0] != node) fprintf(fp, ","); fprintf(fp, "("); stackSize++; stack[stackSize-1].node = node; stack[stackSize-1].end = 1; children_t *c = &NJ->child[node]; /* put children on in reverse order because we use the last one first */ int i; for (i = c->nChild-1; i >=0; i--) { stackSize++; stack[stackSize-1].node = c->child[i]; stack[stackSize-1].end = 0; } } } fprintf(fp, ";\n"); stack = myfree(stack, sizeof(stack_t)*NJ->maxnodes); } alignment_t *ReadAlignment(/*IN*/FILE *fp, bool bQuote) { /* bQuote supports the -quote option */ int nSeq = 0; int nPos = 0; char **names = NULL; char **seqs = NULL; char buf[BUFFER_SIZE] = ""; if (fgets(buf,sizeof(buf),fp) == NULL) { fprintf(stderr, "Error reading header line\n"); exit(1); } int nSaved = 100; if (buf[0] == '>') { /* FASTA, truncate names at any of these */ char *nameStop = bQuote ? "'\t\r\n" : "(),: \t\r\n"; char *seqSkip = " \t\r\n"; /* skip these characters in the sequence */ seqs = (char**)mymalloc(sizeof(char*) * nSaved); names = (char**)mymalloc(sizeof(char*) * nSaved); do { /* loop over lines */ if (buf[0] == '>') { /* truncate the name */ char *p, *q; for (p = buf+1; *p != '\0'; p++) { for (q = nameStop; *q != '\0'; q++) { if (*p == *q) { *p = '\0'; break; } } if (*p == '\0') break; } /* allocate space for another sequence */ nSeq++; if (nSeq > nSaved) { int nNewSaved = nSaved*2; seqs = myrealloc(seqs,sizeof(char*)*nSaved,sizeof(char*)*nNewSaved, /*copy*/false); names = myrealloc(names,sizeof(char*)*nSaved,sizeof(char*)*nNewSaved, /*copy*/false); nSaved = nNewSaved; } names[nSeq-1] = (char*)mymemdup(buf+1,strlen(buf)); seqs[nSeq-1] = NULL; } else { /* count non-space characters and append to sequence */ int nKeep = 0; char *p, *q; for (p=buf; *p != '\0'; p++) { for (q=seqSkip; *q != '\0'; q++) { if (*p == *q) break; } if (*p != *q) nKeep++; } int nOld = (seqs[nSeq-1] == NULL) ? 0 : strlen(seqs[nSeq-1]); seqs[nSeq-1] = (char*)myrealloc(seqs[nSeq-1], nOld, nOld+nKeep+1, /*copy*/false); if (nOld+nKeep > nPos) nPos = nOld + nKeep; char *out = seqs[nSeq-1] + nOld; for (p=buf; *p != '\0'; p++) { for (q=seqSkip; *q != '\0'; q++) { if (*p == *q) break; } if (*p != *q) { *out = *p; out++; } } assert(out-seqs[nSeq-1] == nKeep + nOld); *out = '\0'; } } while(fgets(buf,sizeof(buf),fp) != NULL); if (seqs[nSeq-1] == NULL) { fprintf(stderr, "No sequence data for last entry %s\n",names[nSeq-1]); exit(1); } names = myrealloc(names,sizeof(char*)*nSaved,sizeof(char*)*nSeq, /*copy*/false); seqs = myrealloc(seqs,sizeof(char*)*nSaved,sizeof(char*)*nSeq, /*copy*/false); } else { /* PHYLIP interleaved-like format Allow arbitrary length names, require spaces between names and sequences Allow multiple alignments, either separated by a single empty line (e.g. seqboot output) or not. */ if (buf[0] == '\n' || buf[0] == '\r') { if (fgets(buf,sizeof(buf),fp) == NULL) { fprintf(stderr, "Empty header line followed by EOF\n"); exit(1); } } if (sscanf(buf, "%d%d", &nSeq, &nPos) != 2 || nSeq < 1 || nPos < 1) { fprintf(stderr, "Error parsing header line:%s\n", buf); exit(1); } names = (char **)mymalloc(sizeof(char*) * nSeq); seqs = (char **)mymalloc(sizeof(char*) * nSeq); nSaved = nSeq; int i; for (i = 0; i < nSeq; i++) { names[i] = NULL; seqs[i] = (char *)mymalloc(nPos+1); /* null-terminate */ seqs[i][0] = '\0'; } int iSeq = 0; while(fgets(buf,sizeof(buf),fp)) { if ((buf[0] == '\n' || buf[0] == '\r') && (iSeq == nSeq || iSeq == 0)) { iSeq = 0; } else { int j = 0; /* character just past end of name */ if (buf[0] == ' ') { if (names[iSeq] == NULL) { fprintf(stderr, "No name in phylip line %s", buf); exit(1); } } else { while (buf[j] != '\n' && buf[j] != '\0' && buf[j] != ' ') j++; if (buf[j] != ' ' || j == 0) { fprintf(stderr, "No sequence in phylip line %s", buf); exit(1); } if (iSeq >= nSeq) { fprintf(stderr, "No empty line between sequence blocks (is the sequence count wrong?)\n"); exit(1); } if (names[iSeq] == NULL) { /* save the name */ names[iSeq] = (char *)mymalloc(j+1); int k; for (k = 0; k < j; k++) names[iSeq][k] = buf[k]; names[iSeq][j] = '\0'; } else { /* check the name */ int k; int match = 1; for (k = 0; k < j; k++) { if (names[iSeq][k] != buf[k]) { match = 0; break; } } if (!match || names[iSeq][j] != '\0') { fprintf(stderr, "Wrong name in phylip line %s\nExpected %s\n", buf, names[iSeq]); exit(1); } } } int seqlen = strlen(seqs[iSeq]); for (; buf[j] != '\n' && buf[j] != '\0'; j++) { if (buf[j] != ' ') { if (seqlen >= nPos) { fprintf(stderr, "Too many characters (expected %d) for sequence named %s\nSo far have:\n%s\n", nPos, names[iSeq], seqs[iSeq]); exit(1); } seqs[iSeq][seqlen++] = toupper(buf[j]); } } seqs[iSeq][seqlen] = '\0'; /* null-terminate */ if(verbose>10) fprintf(stderr,"Read iSeq %d name %s seqsofar %s\n", iSeq, names[iSeq], seqs[iSeq]); iSeq++; if (iSeq == nSeq && strlen(seqs[0]) == nPos) break; /* finished alignment */ } /* end else non-empty phylip line */ } if (iSeq != nSeq && iSeq != 0) { fprintf(stderr, "Wrong number of sequences: expected %d\n", nSeq); exit(1); } } /* Check lengths of sequences */ int i; for (i = 0; i < nSeq; i++) { int seqlen = strlen(seqs[i]); if (seqlen != nPos) { fprintf(stderr, "Wrong number of characters for %s: expected %d but have %d instead.\n" "This sequence may be truncated, or another sequence may be too long.\n", names[i], nPos, seqlen); exit(1); } } /* Replace "." with "-" and warn if we find any */ /* If nucleotide sequences, replace U with T and N with X */ bool findDot = false; for (i = 0; i < nSeq; i++) { char *p; for (p = seqs[i]; *p != '\0'; p++) { if (*p == '.') { findDot = true; *p = '-'; } if (nCodes == 4 && *p == 'U') *p = 'T'; if (nCodes == 4 && *p == 'N') *p = 'X'; } } if (findDot) fprintf(stderr, "Warning! Found \".\" character(s). These are treated as gaps\n"); if (ferror(fp)) { fprintf(stderr, "Error reading input file\n"); exit(1); } alignment_t *align = (alignment_t*)mymalloc(sizeof(alignment_t)); align->nSeq = nSeq; align->nPos = nPos; align->names = names; align->seqs = seqs; align->nSaved = nSaved; return(align); } void FreeAlignmentSeqs(/*IN/OUT*/alignment_t *aln) { assert(aln != NULL); int i; for (i = 0; i < aln->nSeq; i++) aln->seqs[i] = myfree(aln->seqs[i], aln->nPos+1); } alignment_t *FreeAlignment(alignment_t *aln) { if(aln==NULL) return(NULL); int i; for (i = 0; i < aln->nSeq; i++) { aln->names[i] = myfree(aln->names[i],strlen(aln->names[i])+1); aln->seqs[i] = myfree(aln->seqs[i], aln->nPos+1); } aln->names = myfree(aln->names, sizeof(char*)*aln->nSaved); aln->seqs = myfree(aln->seqs, sizeof(char*)*aln->nSaved); myfree(aln, sizeof(alignment_t)); return(NULL); } char **AlnToConstraints(alignment_t *constraints, uniquify_t *unique, hashstrings_t *hashnames) { /* look up constraints as names and map to unique-space */ char ** uniqConstraints = (char**)mymalloc(sizeof(char*) * unique->nUnique); int i; for (i = 0; i < unique->nUnique; i++) uniqConstraints[i] = NULL; for (i = 0; i < constraints->nSeq; i++) { char *name = constraints->names[i]; char *constraintSeq = constraints->seqs[i]; hashiterator_t hi = FindMatch(hashnames,name); if (HashCount(hashnames,hi) != 1) { fprintf(stderr, "Sequence %s from constraints file is not in the alignment\n", name); exit(1); } int iSeqNonunique = HashFirst(hashnames,hi); assert(iSeqNonunique >= 0 && iSeqNonunique < unique->nSeq); int iSeqUnique = unique->alnToUniq[iSeqNonunique]; assert(iSeqUnique >= 0 && iSeqUnique < unique->nUnique); if (uniqConstraints[iSeqUnique] != NULL) { /* Already set a constraint for this group of sequences! Warn that we are ignoring this one unless the constraints match */ if (strcmp(uniqConstraints[iSeqUnique],constraintSeq) != 0) { fprintf(stderr, "Warning: ignoring constraints for %s:\n%s\n" "Another sequence has the same sequence but different constraints\n", name, constraintSeq); } } else { uniqConstraints[iSeqUnique] = constraintSeq; } } return(uniqConstraints); } profile_t *SeqToProfile(/*IN/OUT*/NJ_t *NJ, char *seq, int nPos, /*OPTIONAL*/char *constraintSeq, int nConstraints, int iNode, unsigned long counts[256]) { static unsigned char charToCode[256]; static int codeSet = 0; int c, i; if (!codeSet) { for (c = 0; c < 256; c++) { charToCode[c] = nCodes; } for (i = 0; codesString[i]; i++) { charToCode[codesString[i]] = i; charToCode[tolower(codesString[i])] = i; } charToCode['-'] = NOCODE; codeSet=1; } assert(strlen(seq) == nPos); profile_t *profile = NewProfile(nPos,nConstraints); for (i = 0; i < nPos; i++) { unsigned int character = (unsigned int) seq[i]; counts[character]++; c = charToCode[character]; if(verbose>10 && i < 2) fprintf(stderr,"pos %d char %c code %d\n", i, seq[i], c); /* treat unknowns as gaps */ if (c == nCodes || c == NOCODE) { profile->codes[i] = NOCODE; profile->weights[i] = 0.0; } else { profile->codes[i] = c; profile->weights[i] = 1.0; } } if (nConstraints > 0) { for (i = 0; i < nConstraints; i++) { profile->nOn[i] = 0; profile->nOff[i] = 0; } bool bWarn = false; if (constraintSeq != NULL) { assert(strlen(constraintSeq) == nConstraints); for (i = 0; i < nConstraints; i++) { if (constraintSeq[i] == '1') { profile->nOn[i] = 1; } else if (constraintSeq[i] == '0') { profile->nOff[i] = 1; } else if (constraintSeq[i] != '-') { if (!bWarn) { fprintf(stderr, "Constraint characters in unique sequence %d replaced with gap:", iNode+1); bWarn = true; } fprintf(stderr, " %c%d", constraintSeq[i], i+1); /* For the benefit of ConstraintSequencePenalty -- this is a bit of a hack, as this modifies the value read from the alignment */ constraintSeq[i] = '-'; } } if (bWarn) fprintf(stderr, "\n"); } } return profile; } void SeqDist(unsigned char *codes1, unsigned char *codes2, int nPos, distance_matrix_t *dmat, /*OUT*/besthit_t *hit) { double top = 0; /* summed over positions */ int nUse = 0; int i; if (dmat==NULL) { int nDiff = 0; for (i = 0; i < nPos; i++) { if (codes1[i] != NOCODE && codes2[i] != NOCODE) { nUse++; if (codes1[i] != codes2[i]) nDiff++; } } top = (double)nDiff; } else { for (i = 0; i < nPos; i++) { if (codes1[i] != NOCODE && codes2[i] != NOCODE) { nUse++; top += dmat->distances[(unsigned int)codes1[i]][(unsigned int)codes2[i]]; } } } hit->weight = (double)nUse; hit->dist = nUse > 0 ? top/(double)nUse : 1.0; seqOps++; } void CorrectedPairDistances(profile_t **profiles, int nProfiles, /*OPTIONAL*/distance_matrix_t *distance_matrix, int nPos, /*OUT*/double *distances) { assert(distances != NULL); assert(profiles != NULL); assert(nProfiles>1 && nProfiles <= 4); besthit_t hit[6]; int iHit,i,j; for (iHit=0, i=0; i < nProfiles; i++) { for (j=i+1; j < nProfiles; j++, iHit++) { ProfileDist(profiles[i],profiles[j],nPos,distance_matrix,/*OUT*/&hit[iHit]); distances[iHit] = hit[iHit].dist; } } if (pseudoWeight > 0) { /* Estimate the prior distance */ double dTop = 0; double dBottom = 0; for (iHit=0; iHit < (nProfiles*(nProfiles-1))/2; iHit++) { dTop += hit[iHit].dist * hit[iHit].weight; dBottom += hit[iHit].weight; } double prior = (dBottom > 0.01) ? dTop/dBottom : 3.0; for (iHit=0; iHit < (nProfiles*(nProfiles-1))/2; iHit++) distances[iHit] = (distances[iHit] * hit[iHit].weight + prior * pseudoWeight) / (hit[iHit].weight + pseudoWeight); } if (logdist) { for (iHit=0; iHit < (nProfiles*(nProfiles-1))/2; iHit++) distances[iHit] = LogCorrect(distances[iHit]); } } /* During the neighbor-joining phase, a join only violates our constraints if node1, node2, and other are all represented in the constraint and if one of the 3 is split and the other two do not agree */ int JoinConstraintPenalty(/*IN*/NJ_t *NJ, int node1, int node2) { if (NJ->nConstraints == 0) return(0.0); int penalty = 0; int iC; for (iC = 0; iC < NJ->nConstraints; iC++) penalty += JoinConstraintPenaltyPiece(NJ, node1, node2, iC); return(penalty); } int JoinConstraintPenaltyPiece(NJ_t *NJ, int node1, int node2, int iC) { profile_t *pOut = NJ->outprofile; profile_t *p1 = NJ->profiles[node1]; profile_t *p2 = NJ->profiles[node2]; int nOn1 = p1->nOn[iC]; int nOff1 = p1->nOff[iC]; int nOn2 = p2->nOn[iC]; int nOff2 = p2->nOff[iC]; int nOnOut = pOut->nOn[iC] - nOn1 - nOn2; int nOffOut = pOut->nOff[iC] - nOff1 - nOff2; if ((nOn1+nOff1) > 0 && (nOn2+nOff2) > 0 && (nOnOut+nOffOut) > 0) { /* code is -1 for split, 0 for off, 1 for on */ int code1 = (nOn1 > 0 && nOff1 > 0) ? -1 : (nOn1 > 0 ? 1 : 0); int code2 = (nOn2 > 0 && nOff2 > 0) ? -1 : (nOn2 > 0 ? 1 : 0); int code3 = (nOnOut > 0 && nOffOut) > 0 ? -1 : (nOnOut > 0 ? 1 : 0); int nSplit = (code1 == -1 ? 1 : 0) + (code2 == -1 ? 1 : 0) + (code3 == -1 ? 1 : 0); int nOn = (code1 == 1 ? 1 : 0) + (code2 == 1 ? 1 : 0) + (code3 == 1 ? 1 : 0); if (nSplit == 1 && nOn == 1) return(SplitConstraintPenalty(nOn1+nOn2, nOff1+nOff2, nOnOut, nOffOut)); } /* else */ return(0); } void QuartetConstraintPenalties(profile_t *profiles[4], int nConstraints, /*OUT*/double penalty[3]) { int i; for (i=0; i < 3; i++) penalty[i] = 0.0; if(nConstraints == 0) return; int iC; for (iC = 0; iC < nConstraints; iC++) { double part[3]; if (QuartetConstraintPenaltiesPiece(profiles, iC, /*OUT*/part)) { for (i=0;i<3;i++) penalty[i] += part[i]; if (verbose>2 && (fabs(part[ABvsCD]-part[ACvsBD]) > 0.001 || fabs(part[ABvsCD]-part[ADvsBC]) > 0.001)) fprintf(stderr, "Constraint Penalties at %d: ABvsCD %.3f ACvsBD %.3f ADvsBC %.3f %d/%d %d/%d %d/%d %d/%d\n", iC, part[ABvsCD], part[ACvsBD], part[ADvsBC], profiles[0]->nOn[iC], profiles[0]->nOff[iC], profiles[1]->nOn[iC], profiles[1]->nOff[iC], profiles[2]->nOn[iC], profiles[2]->nOff[iC], profiles[3]->nOn[iC], profiles[3]->nOff[iC]); } } if (verbose>2) fprintf(stderr, "Total Constraint Penalties: ABvsCD %.3f ACvsBD %.3f ADvsBC %.3f\n", penalty[ABvsCD], penalty[ACvsBD], penalty[ADvsBC]); } double PairConstraintDistance(int nOn1, int nOff1, int nOn2, int nOff2) { double f1 = nOn1/(double)(nOn1+nOff1); double f2 = nOn2/(double)(nOn2+nOff2); /* 1 - f1 * f2 - (1-f1)*(1-f2) = 1 - f1 * f2 - 1 + f1 + f2 - f1 * f2 */ return(f1 + f2 - 2.0 * f1 * f2); } bool QuartetConstraintPenaltiesPiece(profile_t *profiles[4], int iC, /*OUT*/double piece[3]) { int nOn[4]; int nOff[4]; int i; int nSplit = 0; int nPlus = 0; int nMinus = 0; for (i=0; i < 4; i++) { nOn[i] = profiles[i]->nOn[iC]; nOff[i] = profiles[i]->nOff[iC]; if (nOn[i] + nOff[i] == 0) return(false); /* ignore */ else if (nOn[i] > 0 && nOff[i] > 0) nSplit++; else if (nOn[i] > 0) nPlus++; else nMinus++; } /* If just one of them is split or on the other side and the others all agree, also ignore */ if (nPlus >= 3 || nMinus >= 3) return(false); piece[ABvsCD] = constraintWeight * (PairConstraintDistance(nOn[0],nOff[0],nOn[1],nOff[1]) + PairConstraintDistance(nOn[2],nOff[2],nOn[3],nOff[3])); piece[ACvsBD] = constraintWeight * (PairConstraintDistance(nOn[0],nOff[0],nOn[2],nOff[2]) + PairConstraintDistance(nOn[1],nOff[1],nOn[3],nOff[3])); piece[ADvsBC] = constraintWeight * (PairConstraintDistance(nOn[0],nOff[0],nOn[3],nOff[3]) + PairConstraintDistance(nOn[2],nOff[2],nOn[1],nOff[1])); return(true); } /* Minimum number of constrained leaves that need to be moved to satisfy the constraint (or 0 if constraint is satisfied) Defining it this way should ensure that SPR moves that break constraints get a penalty */ int SplitConstraintPenalty(int nOn1, int nOff1, int nOn2, int nOff2) { return(nOn1 + nOff2 < nOn2 + nOff1 ? (nOn1 < nOff2 ? nOn1 : nOff2) : (nOn2 < nOff1 ? nOn2 : nOff1)); } bool SplitViolatesConstraint(profile_t *profiles[4], int iConstraint) { int i; int codes[4]; /* 0 for off, 1 for on, -1 for split (quit if not constrained at all) */ for (i = 0; i < 4; i++) { if (profiles[i]->nOn[iConstraint] + profiles[i]->nOff[iConstraint] == 0) return(false); else if (profiles[i]->nOn[iConstraint] > 0 && profiles[i]->nOff[iConstraint] == 0) codes[i] = 1; else if (profiles[i]->nOn[iConstraint] == 0 && profiles[i]->nOff[iConstraint] > 0) codes[i] = 0; else codes[i] = -1; } int n0 = 0; int n1 = 0; for (i = 0; i < 4; i++) { if (codes[i] == 0) n0++; else if (codes[i] == 1) n1++; } /* 3 on one side means no violation, even if other is code -1 otherwise must have code != -1 and agreement on the split */ if (n0 >= 3 || n1 >= 3) return(false); if (n0==2 && n1==2 && codes[0] == codes[1] && codes[2] == codes[3]) return(false); return(true); } double LogCorrect(double dist) { const double maxscore = 3.0; if (nCodes == 4 && !useMatrix) { /* Jukes-Cantor */ dist = dist < 0.74 ? -0.75*log(1.0 - dist * 4.0/3.0) : maxscore; } else { /* scoredist-like */ dist = dist < 0.99 ? -1.3*log(1.0 - dist) : maxscore; } return (dist < maxscore ? dist : maxscore); } /* A helper function -- f1 and f2 can be NULL if the corresponding code != NOCODE */ double ProfileDistPiece(unsigned int code1, unsigned int code2, numeric_t *f1, numeric_t *f2, /*OPTIONAL*/distance_matrix_t *dmat, /*OPTIONAL*/numeric_t *codeDist2) { if (dmat) { if (code1 != NOCODE && code2 != NOCODE) { /* code1 vs code2 */ return(dmat->distances[code1][code2]); } else if (codeDist2 != NULL && code1 != NOCODE) { /* code1 vs. codeDist2 */ return(codeDist2[code1]); } else { /* f1 vs f2 */ if (f1 == NULL) { if(code1 == NOCODE) return(10.0); f1 = &dmat->codeFreq[code1][0]; } if (f2 == NULL) { if(code2 == NOCODE) return(10.0); f2 = &dmat->codeFreq[code2][0]; } return(vector_multiply3_sum(f1,f2,dmat->eigenval,nCodes)); } } else { /* no matrix */ if (code1 != NOCODE) { if (code2 != NOCODE) { return(code1 == code2 ? 0.0 : 1.0); /* code1 vs code2 */ } else { if(f2 == NULL) return(10.0); return(1.0 - f2[code1]); /* code1 vs. f2 */ } } else { if (code2 != NOCODE) { if(f1 == NULL) return(10.0); return(1.0 - f1[code2]); /* f1 vs code2 */ } else { /* f1 vs. f2 */ if (f1 == NULL || f2 == NULL) return(10.0); double piece = 1.0; int k; for (k = 0; k < nCodes; k++) { piece -= f1[k] * f2[k]; } return(piece); } } } assert(0); } /* E.g. GET_FREQ(profile,iPos,iVector) Gets the next element of the vectors (and updates iVector), or returns NULL if we didn't store a vector */ #define GET_FREQ(P,I,IVECTOR) \ (P->weights[I] > 0 && P->codes[I] == NOCODE ? &P->vectors[nCodes*(IVECTOR++)] : NULL) void ProfileDist(profile_t *profile1, profile_t *profile2, int nPos, /*OPTIONAL*/distance_matrix_t *dmat, /*OUT*/besthit_t *hit) { double top = 0; double denom = 0; int iFreq1 = 0; int iFreq2 = 0; int i = 0; for (i = 0; i < nPos; i++) { numeric_t *f1 = GET_FREQ(profile1,i,/*IN/OUT*/iFreq1); numeric_t *f2 = GET_FREQ(profile2,i,/*IN/OUT*/iFreq2); if (profile1->weights[i] > 0 && profile2->weights[i] > 0) { double weight = profile1->weights[i] * profile2->weights[i]; denom += weight; double piece = ProfileDistPiece(profile1->codes[i],profile2->codes[i],f1,f2,dmat, profile2->codeDist ? &profile2->codeDist[i*nCodes] : NULL); top += weight * piece; } } assert(iFreq1 == profile1->nVectors); assert(iFreq2 == profile2->nVectors); hit->weight = denom > 0 ? denom : 0.01; /* 0.01 is an arbitrarily low value of weight (normally >>1) */ hit->dist = denom > 0 ? top/denom : 1; profileOps++; } /* This should not be called if the update weight is 0, as in that case code==NOCODE and in=NULL is possible, and then it will fail. */ void AddToFreq(/*IN/OUT*/numeric_t *fOut, double weight, unsigned int codeIn, /*OPTIONAL*/numeric_t *fIn, /*OPTIONAL*/distance_matrix_t *dmat) { assert(fOut != NULL); if (fIn != NULL) { vector_add_mult(fOut, fIn, weight, nCodes); } else if (dmat) { assert(codeIn != NOCODE); vector_add_mult(fOut, dmat->codeFreq[codeIn], weight, nCodes); } else { assert(codeIn != NOCODE); fOut[codeIn] += weight; } } void SetProfile(/*IN/OUT*/NJ_t *NJ, int node, double weight1) { children_t *c = &NJ->child[node]; assert(c->nChild == 2); assert(NJ->profiles[c->child[0]] != NULL); assert(NJ->profiles[c->child[1]] != NULL); if (NJ->profiles[node] != NULL) FreeProfile(NJ->profiles[node], NJ->nPos, NJ->nConstraints); NJ->profiles[node] = AverageProfile(NJ->profiles[c->child[0]], NJ->profiles[c->child[1]], NJ->nPos, NJ->nConstraints, NJ->distance_matrix, weight1); } /* bionjWeight is the weight of the first sequence (between 0 and 1), or -1 to do the average. */ profile_t *AverageProfile(profile_t *profile1, profile_t *profile2, int nPos, int nConstraints, distance_matrix_t *dmat, double bionjWeight) { int i; if (bionjWeight < 0) { bionjWeight = 0.5; } /* First, set codes and weights and see how big vectors will be */ profile_t *out = NewProfile(nPos, nConstraints); for (i = 0; i < nPos; i++) { out->weights[i] = bionjWeight * profile1->weights[i] + (1-bionjWeight) * profile2->weights[i]; out->codes[i] = NOCODE; if (out->weights[i] > 0) { if (profile1->weights[i] > 0 && profile1->codes[i] != NOCODE && (profile2->weights[i] <= 0 || profile1->codes[i] == profile2->codes[i])) { out->codes[i] = profile1->codes[i]; } else if (profile1->weights[i] <= 0 && profile2->weights[i] > 0 && profile2->codes[i] != NOCODE) { out->codes[i] = profile2->codes[i]; } if (out->codes[i] == NOCODE) out->nVectors++; } } /* Allocate and set the vectors */ out->vectors = (numeric_t*)mymalloc(sizeof(numeric_t)*nCodes*out->nVectors); for (i = 0; i < nCodes * out->nVectors; i++) out->vectors[i] = 0; nProfileFreqAlloc += out->nVectors; nProfileFreqAvoid += nPos - out->nVectors; int iFreqOut = 0; int iFreq1 = 0; int iFreq2 = 0; for (i=0; i < nPos; i++) { numeric_t *f = GET_FREQ(out,i,/*IN/OUT*/iFreqOut); numeric_t *f1 = GET_FREQ(profile1,i,/*IN/OUT*/iFreq1); numeric_t *f2 = GET_FREQ(profile2,i,/*IN/OUT*/iFreq2); if (f != NULL) { if (profile1->weights[i] > 0) AddToFreq(/*IN/OUT*/f, profile1->weights[i] * bionjWeight, profile1->codes[i], f1, dmat); if (profile2->weights[i] > 0) AddToFreq(/*IN/OUT*/f, profile2->weights[i] * (1.0-bionjWeight), profile2->codes[i], f2, dmat); NormalizeFreq(/*IN/OUT*/f, dmat); } /* end if computing f */ if (verbose > 10 && i < 5) { fprintf(stderr,"Average profiles: pos %d in-w1 %f in-w2 %f bionjWeight %f to weight %f code %d\n", i, profile1->weights[i], profile2->weights[i], bionjWeight, out->weights[i], out->codes[i]); if (f!= NULL) { int k; for (k = 0; k < nCodes; k++) fprintf(stderr, "\t%c:%f", codesString[k], f ? f[k] : -1.0); fprintf(stderr,"\n"); } } } /* end loop over positions */ assert(iFreq1 == profile1->nVectors); assert(iFreq2 == profile2->nVectors); assert(iFreqOut == out->nVectors); /* compute total constraints */ for (i = 0; i < nConstraints; i++) { out->nOn[i] = profile1->nOn[i] + profile2->nOn[i]; out->nOff[i] = profile1->nOff[i] + profile2->nOff[i]; } profileAvgOps++; return(out); } /* Make the (unrotated) frequencies sum to 1 Simply dividing by total_weight is not ideal because of roundoff error So compute total_freq instead */ void NormalizeFreq(/*IN/OUT*/numeric_t *freq, distance_matrix_t *dmat) { double total_freq = 0; int k; if (dmat != NULL) { /* The total frequency is dot_product(true_frequencies, 1) So we rotate the 1 vector by eigeninv (stored in eigentot) */ total_freq = vector_multiply_sum(freq, dmat->eigentot, nCodes); } else { for (k = 0; k < nCodes; k++) total_freq += freq[k]; } if (total_freq > fPostTotalTolerance) { numeric_t inverse_weight = 1.0/total_freq; vector_multiply_by(/*IN/OUT*/freq, inverse_weight, nCodes); } else { /* This can happen if we are in a very low-weight region, e.g. if a mostly-gap position gets weighted down repeatedly; just set them all to arbitrary but legal values */ if (dmat == NULL) { for (k = 0; k < nCodes; k++) freq[k] = 1.0/nCodes; } else { for (k = 0; k < nCodes; k++) freq[k] = dmat->codeFreq[0][k]; } } } /* OutProfile() computes the out-profile */ profile_t *OutProfile(profile_t **profiles, int nProfiles, int nPos, int nConstraints, distance_matrix_t *dmat) { int i; /* position */ int in; /* profile */ profile_t *out = NewProfile(nPos, nConstraints); double inweight = 1.0/(double)nProfiles; /* The maximal output weight is 1.0 */ /* First, set weights -- code is always NOCODE, prevent weight=0 */ for (i = 0; i < nPos; i++) { out->weights[i] = 0; for (in = 0; in < nProfiles; in++) out->weights[i] += profiles[in]->weights[i] * inweight; if (out->weights[i] <= 0) out->weights[i] = 1e-20; /* always store a vector */ out->nVectors++; out->codes[i] = NOCODE; /* outprofile is normally complicated */ } /* Initialize the frequencies to 0 */ out->vectors = (numeric_t*)mymalloc(sizeof(numeric_t)*nCodes*out->nVectors); for (i = 0; i < nCodes*out->nVectors; i++) out->vectors[i] = 0; /* Add up the weights, going through each sequence in turn */ for (in = 0; in < nProfiles; in++) { int iFreqOut = 0; int iFreqIn = 0; for (i = 0; i < nPos; i++) { numeric_t *fIn = GET_FREQ(profiles[in],i,/*IN/OUT*/iFreqIn); numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut); if (profiles[in]->weights[i] > 0) AddToFreq(/*IN/OUT*/fOut, profiles[in]->weights[i], profiles[in]->codes[i], fIn, dmat); } assert(iFreqOut == out->nVectors); assert(iFreqIn == profiles[in]->nVectors); } /* And normalize the frequencies to sum to 1 */ int iFreqOut = 0; for (i = 0; i < nPos; i++) { numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut); if (fOut) NormalizeFreq(/*IN/OUT*/fOut, dmat); } assert(iFreqOut == out->nVectors); if (verbose > 10) fprintf(stderr,"Average %d profiles\n", nProfiles); if(dmat) SetCodeDist(/*IN/OUT*/out, nPos, dmat); /* Compute constraints */ for (i = 0; i < nConstraints; i++) { out->nOn[i] = 0; out->nOff[i] = 0; for (in = 0; in < nProfiles; in++) { out->nOn[i] += profiles[in]->nOn[i]; out->nOff[i] += profiles[in]->nOff[i]; } } return(out); } void UpdateOutProfile(/*IN/OUT*/profile_t *out, profile_t *old1, profile_t *old2, profile_t *new, int nActiveOld, int nPos, int nConstraints, distance_matrix_t *dmat) { int i, k; int iFreqOut = 0; int iFreq1 = 0; int iFreq2 = 0; int iFreqNew = 0; assert(nActiveOld > 0); for (i = 0; i < nPos; i++) { numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut); numeric_t *fOld1 = GET_FREQ(old1,i,/*IN/OUT*/iFreq1); numeric_t *fOld2 = GET_FREQ(old2,i,/*IN/OUT*/iFreq2); numeric_t *fNew = GET_FREQ(new,i,/*IN/OUT*/iFreqNew); assert(out->codes[i] == NOCODE && fOut != NULL); /* No no-vector optimization for outprofiles */ if (verbose > 3 && i < 3) { fprintf(stderr,"Updating out-profile position %d weight %f (mult %f)\n", i, out->weights[i], out->weights[i]*nActiveOld); } double originalMult = out->weights[i]*nActiveOld; double newMult = originalMult + new->weights[i] - old1->weights[i] - old2->weights[i]; out->weights[i] = newMult/(nActiveOld-1); if (out->weights[i] <= 0) out->weights[i] = 1e-20; /* always use the vector */ for (k = 0; k < nCodes; k++) fOut[k] *= originalMult; if (old1->weights[i] > 0) AddToFreq(/*IN/OUT*/fOut, -old1->weights[i], old1->codes[i], fOld1, dmat); if (old2->weights[i] > 0) AddToFreq(/*IN/OUT*/fOut, -old2->weights[i], old2->codes[i], fOld2, dmat); if (new->weights[i] > 0) AddToFreq(/*IN/OUT*/fOut, new->weights[i], new->codes[i], fNew, dmat); /* And renormalize */ NormalizeFreq(/*IN/OUT*/fOut, dmat); if (verbose > 2 && i < 3) { fprintf(stderr,"Updated out-profile position %d weight %f (mult %f)", i, out->weights[i], out->weights[i]*nActiveOld); if(out->weights[i] > 0) for (k=0;k<nCodes;k++) fprintf(stderr, " %c:%f", dmat?'?':codesString[k], fOut[k]); fprintf(stderr,"\n"); } } assert(iFreqOut == out->nVectors); assert(iFreq1 == old1->nVectors); assert(iFreq2 == old2->nVectors); assert(iFreqNew == new->nVectors); if(dmat) SetCodeDist(/*IN/OUT*/out,nPos,dmat); /* update constraints -- note in practice this should be a no-op */ for (i = 0; i < nConstraints; i++) { out->nOn[i] += new->nOn[i] - old1->nOn[i] - old2->nOn[i]; out->nOff[i] += new->nOff[i] - old1->nOff[i] - old2->nOff[i]; } } void SetCodeDist(/*IN/OUT*/profile_t *profile, int nPos, distance_matrix_t *dmat) { if (profile->codeDist == NULL) profile->codeDist = (numeric_t*)mymalloc(sizeof(numeric_t)*nPos*nCodes); int i; int iFreq = 0; for (i = 0; i < nPos; i++) { numeric_t *f = GET_FREQ(profile,i,/*IN/OUT*/iFreq); int k; for (k = 0; k < nCodes; k++) profile->codeDist[i*nCodes+k] = ProfileDistPiece(/*code1*/profile->codes[i], /*code2*/k, /*f1*/f, /*f2*/NULL, dmat, NULL); } assert(iFreq==profile->nVectors); } void SetBestHit(int node, NJ_t *NJ, int nActive, /*OUT*/besthit_t *bestjoin, /*OUT OPTIONAL*/besthit_t *allhits) { assert(NJ->parent[node] < 0); bestjoin->i = node; bestjoin->j = -1; bestjoin->dist = 1e20; bestjoin->criterion = 1e20; int j; besthit_t tmp; #ifdef OPENMP /* Note -- if we are already in a parallel region, this will be ignored */ #pragma omp parallel for schedule(dynamic, 50) #endif for (j = 0; j < NJ->maxnode; j++) { besthit_t *sv = allhits != NULL ? &allhits[j] : &tmp; sv->i = node; sv->j = j; if (NJ->parent[j] >= 0) { sv->i = -1; /* illegal/empty join */ sv->weight = 0.0; sv->criterion = sv->dist = 1e20; continue; } /* Note that we compute self-distances (allow j==node) because the top-hit heuristic expects self to be within its top hits, but we exclude those from the bestjoin that we return... */ SetDistCriterion(NJ, nActive, /*IN/OUT*/sv); if (sv->criterion < bestjoin->criterion && node != j) *bestjoin = *sv; } if (verbose>5) { fprintf(stderr, "SetBestHit %d %d %f %f\n", bestjoin->i, bestjoin->j, bestjoin->dist, bestjoin->criterion); } } void ReadMatrix(char *filename, /*OUT*/numeric_t codes[MAXCODES][MAXCODES], bool checkCodes) { char buf[BUFFER_SIZE] = ""; FILE *fp = fopen(filename, "r"); if (fp == NULL) { fprintf(stderr, "Cannot read %s\n",filename); exit(1); } if (fgets(buf,sizeof(buf),fp) == NULL) { fprintf(stderr, "Error reading header line for %s:\n%s\n", filename, buf); exit(1); } if (checkCodes) { int i; int iBufPos; for (iBufPos=0,i=0;i<nCodes;i++,iBufPos++) { if(buf[iBufPos] != codesString[i]) { fprintf(stderr,"Header line\n%s\nin file %s does not have expected code %c # %d in %s\n", buf, filename, codesString[i], i, codesString); exit(1); } iBufPos++; if(buf[iBufPos] != '\n' && buf[iBufPos] != '\r' && buf[iBufPos] != '\0' && buf[iBufPos] != '\t') { fprintf(stderr, "Header line in %s should be tab-delimited\n", filename); exit(1); } if (buf[iBufPos] == '\0' && i < nCodes-1) { fprintf(stderr, "Header line in %s ends prematurely\n",filename); exit(1); } } /* end loop over codes */ /* Should be at end, but allow \n because of potential DOS \r\n */ if(buf[iBufPos] != '\0' && buf[iBufPos] != '\n' && buf[iBufPos] != '\r') { fprintf(stderr, "Header line in %s has too many entries\n", filename); exit(1); } } int iLine; for (iLine = 0; iLine < nCodes; iLine++) { buf[0] = '\0'; if (fgets(buf,sizeof(buf),fp) == NULL) { fprintf(stderr, "Cannot read line %d from file %s\n", iLine+2, filename); exit(1); } char *field = strtok(buf,"\t\r\n"); field = strtok(NULL, "\t"); /* ignore first column */ int iColumn; for (iColumn = 0; iColumn < nCodes && field != NULL; iColumn++, field = strtok(NULL,"\t")) { if(sscanf(field,ScanNumericSpec,&codes[iLine][iColumn]) != 1) { fprintf(stderr,"Cannot parse field %s in file %s\n", field, filename); exit(1); } } } } void ReadVector(char *filename, /*OUT*/numeric_t codes[MAXCODES]) { FILE *fp = fopen(filename,"r"); if (fp == NULL) { fprintf(stderr, "Cannot read %s\n",filename); exit(1); } int i; for (i = 0; i < nCodes; i++) { if (fscanf(fp,ScanNumericSpec,&codes[i]) != 1) { fprintf(stderr,"Cannot read %d entry of %s\n",i+1,filename); exit(1); } } if (fclose(fp) != 0) { fprintf(stderr, "Error reading %s\n",filename); exit(1); } } distance_matrix_t *ReadDistanceMatrix(char *prefix) { char buffer[BUFFER_SIZE]; distance_matrix_t *dmat = (distance_matrix_t*)mymalloc(sizeof(distance_matrix_t)); if(strlen(prefix) > BUFFER_SIZE-20) { fprintf(stderr,"Filename %s too long\n", prefix); exit(1); } strcpy(buffer, prefix); strcat(buffer, ".distances"); ReadMatrix(buffer, /*OUT*/dmat->distances, /*checkCodes*/true); strcpy(buffer, prefix); strcat(buffer, ".inverses"); ReadMatrix(buffer, /*OUT*/dmat->eigeninv, /*checkCodes*/false); strcpy(buffer, prefix); strcat(buffer, ".eigenvalues"); ReadVector(buffer, /*OUT*/dmat->eigenval); if(verbose>1) fprintf(stderr, "Read distance matrix from %s\n",prefix); SetupDistanceMatrix(/*IN/OUT*/dmat); return(dmat); } void SetupDistanceMatrix(/*IN/OUT*/distance_matrix_t *dmat) { /* Check that the eigenvalues and eigen-inverse are consistent with the distance matrix and that the matrix is symmetric */ int i,j,k; for (i = 0; i < nCodes; i++) { for (j = 0; j < nCodes; j++) { if(fabs(dmat->distances[i][j]-dmat->distances[j][i]) > 1e-6) { fprintf(stderr,"Distance matrix not symmetric for %d,%d: %f vs %f\n", i+1,j+1, dmat->distances[i][j], dmat->distances[j][i]); exit(1); } double total = 0.0; for (k = 0; k < nCodes; k++) total += dmat->eigenval[k] * dmat->eigeninv[k][i] * dmat->eigeninv[k][j]; if(fabs(total - dmat->distances[i][j]) > 1e-6) { fprintf(stderr,"Distance matrix entry %d,%d should be %f but eigen-representation gives %f\n", i+1,j+1,dmat->distances[i][j],total); exit(1); } } } /* And compute eigentot */ for (k = 0; k < nCodes; k++) { dmat->eigentot[k] = 0.; int j; for (j = 0; j < nCodes; j++) dmat->eigentot[k] += dmat->eigeninv[k][j]; } /* And compute codeFreq */ int code; for(code = 0; code < nCodes; code++) { for (k = 0; k < nCodes; k++) { dmat->codeFreq[code][k] = dmat->eigeninv[k][code]; } } /* And gapFreq */ for(code = 0; code < nCodes; code++) { double gapFreq = 0.0; for (k = 0; k < nCodes; k++) gapFreq += dmat->codeFreq[k][code]; dmat->gapFreq[code] = gapFreq / nCodes; } if(verbose>10) fprintf(stderr, "Made codeFreq\n"); } nni_t ChooseNNI(profile_t *profiles[4], /*OPTIONAL*/distance_matrix_t *dmat, int nPos, int nConstraints, /*OUT*/double criteria[3]) { double d[6]; CorrectedPairDistances(profiles, 4, dmat, nPos, /*OUT*/d); double penalty[3]; /* indexed as nni_t */ QuartetConstraintPenalties(profiles, nConstraints, /*OUT*/penalty); criteria[ABvsCD] = d[qAB] + d[qCD] + penalty[ABvsCD]; criteria[ACvsBD] = d[qAC] + d[qBD] + penalty[ACvsBD]; criteria[ADvsBC] = d[qAD] + d[qBC] + penalty[ADvsBC]; nni_t choice = ABvsCD; if (criteria[ACvsBD] < criteria[ABvsCD] && criteria[ACvsBD] <= criteria[ADvsBC]) { choice = ACvsBD; } else if (criteria[ADvsBC] < criteria[ABvsCD] && criteria[ADvsBC] <= criteria[ACvsBD]) { choice = ADvsBC; } if (verbose > 1 && penalty[choice] > penalty[ABvsCD] + 1e-6) { fprintf(stderr, "Worsen constraint: from %.3f to %.3f distance %.3f to %.3f: ", penalty[ABvsCD], penalty[choice], criteria[ABvsCD], choice == ACvsBD ? criteria[ACvsBD] : criteria[ADvsBC]); int iC; for (iC = 0; iC < nConstraints; iC++) { double ppart[3]; if (QuartetConstraintPenaltiesPiece(profiles, iC, /*OUT*/ppart)) { double old_penalty = ppart[ABvsCD]; double new_penalty = ppart[choice]; if (new_penalty > old_penalty + 1e-6) fprintf(stderr, " %d (%d/%d %d/%d %d/%d %d/%d)", iC, profiles[0]->nOn[iC], profiles[0]->nOff[iC], profiles[1]->nOn[iC], profiles[1]->nOff[iC], profiles[2]->nOn[iC], profiles[2]->nOff[iC], profiles[3]->nOn[iC], profiles[3]->nOff[iC]); } } fprintf(stderr,"\n"); } if (verbose > 3) fprintf(stderr, "NNI scores ABvsCD %.5f ACvsBD %.5f ADvsBC %.5f choice %s\n", criteria[ABvsCD], criteria[ACvsBD], criteria[ADvsBC], choice == ABvsCD ? "AB|CD" : (choice == ACvsBD ? "AC|BD" : "AD|BC")); return(choice); } profile_t *PosteriorProfile(profile_t *p1, profile_t *p2, double len1, double len2, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, int nPos, int nConstraints) { if (len1 < MLMinBranchLength) len1 = MLMinBranchLength; if (len2 < MLMinBranchLength) len2 = MLMinBranchLength; int i,j,k; profile_t *out = NewProfile(nPos, nConstraints); for (i = 0; i < nPos; i++) { out->codes[i] = NOCODE; out->weights[i] = 1.0; } out->nVectors = nPos; out->vectors = (numeric_t*)mymalloc(sizeof(numeric_t)*nCodes*out->nVectors); for (i = 0; i < nCodes * out->nVectors; i++) out->vectors[i] = 0; int iFreqOut = 0; int iFreq1 = 0; int iFreq2 = 0; numeric_t *expeigenRates1 = NULL, *expeigenRates2 = NULL; if (transmat != NULL) { expeigenRates1 = ExpEigenRates(len1, transmat, rates); expeigenRates2 = ExpEigenRates(len2, transmat, rates); } if (transmat == NULL) { /* Jukes-Cantor */ assert(nCodes == 4); double *PSame1 = PSameVector(len1, rates); double *PDiff1 = PDiffVector(PSame1, rates); double *PSame2 = PSameVector(len2, rates); double *PDiff2 = PDiffVector(PSame2, rates); numeric_t mix1[4], mix2[4]; for (i=0; i < nPos; i++) { int iRate = rates->ratecat[i]; double w1 = p1->weights[i]; double w2 = p2->weights[i]; int code1 = p1->codes[i]; int code2 = p2->codes[i]; numeric_t *f1 = GET_FREQ(p1,i,/*IN/OUT*/iFreq1); numeric_t *f2 = GET_FREQ(p2,i,/*IN/OUT*/iFreq2); /* First try to store a simple profile */ if (f1 == NULL && f2 == NULL) { if (code1 == NOCODE && code2 == NOCODE) { out->codes[i] = NOCODE; out->weights[i] = 0.0; continue; } else if (code1 == NOCODE) { /* Posterior(parent | character & gap, len1, len2) = Posterior(parent | character, len1) = PSame() for matching characters and 1-PSame() for the rest = (pSame - pDiff) * character + (1-(pSame-pDiff)) * gap */ out->codes[i] = code2; out->weights[i] = w2 * (PSame2[iRate] - PDiff2[iRate]); continue; } else if (code2 == NOCODE) { out->codes[i] = code1; out->weights[i] = w1 * (PSame1[iRate] - PDiff1[iRate]); continue; } else if (code1 == code2) { out->codes[i] = code1; double f12code = (w1*PSame1[iRate] + (1-w1)*0.25) * (w2*PSame2[iRate] + (1-w2)*0.25); double f12other = (w1*PDiff1[iRate] + (1-w1)*0.25) * (w2*PDiff2[iRate] + (1-w2)*0.25); /* posterior probability of code1/code2 after scaling */ double pcode = f12code/(f12code+3*f12other); /* Now f = w * (code ? 1 : 0) + (1-w) * 0.25, so to get pcode we need fcode = 1/4 + w1*3/4 or w = (f-1/4)*4/3 */ out->weights[i] = (pcode - 0.25) * 4.0/3.0; /* This can be zero because of numerical problems, I think */ if (out->weights[i] < 1e-6) { if (verbose > 1) fprintf(stderr, "Replaced weight %f with %f from w1 %f w2 %f PSame %f %f f12code %f f12other %f\n", out->weights[i], 1e-6, w1, w2, PSame1[iRate], PSame2[iRate], f12code, f12other); out->weights[i] = 1e-6; } continue; } } /* if we did not compute a simple profile, then do the full computation and store the full vector */ if (f1 == NULL) { for (j = 0; j < 4; j++) mix1[j] = (1-w1)*0.25; if(code1 != NOCODE) mix1[code1] += w1; f1 = mix1; } if (f2 == NULL) { for (j = 0; j < 4; j++) mix2[j] = (1-w2)*0.25; if(code2 != NOCODE) mix2[code2] += w2; f2 = mix2; } out->codes[i] = NOCODE; out->weights[i] = 1.0; numeric_t *f = GET_FREQ(out,i,/*IN/OUT*/iFreqOut); double lkAB = 0; for (j = 0; j < 4; j++) { f[j] = (f1[j] * PSame1[iRate] + (1.0-f1[j]) * PDiff1[iRate]) * (f2[j] * PSame2[iRate] + (1.0-f2[j]) * PDiff2[iRate]); lkAB += f[j]; } double lkABInv = 1.0/lkAB; for (j = 0; j < 4; j++) f[j] *= lkABInv; } PSame1 = myfree(PSame1, sizeof(double) * rates->nRateCategories); PSame2 = myfree(PSame2, sizeof(double) * rates->nRateCategories); PDiff1 = myfree(PDiff1, sizeof(double) * rates->nRateCategories); PDiff2 = myfree(PDiff2, sizeof(double) * rates->nRateCategories); } else if (nCodes == 4) { /* matrix model on nucleotides */ numeric_t *fGap = &transmat->codeFreq[NOCODE][0]; numeric_t f1mix[4], f2mix[4]; for (i=0; i < nPos; i++) { if (p1->codes[i] == NOCODE && p2->codes[i] == NOCODE && p1->weights[i] == 0 && p2->weights[i] == 0) { /* aligning gap with gap -- just output a gap out->codes[i] is already set to NOCODE so need not set that */ out->weights[i] = 0; continue; } int iRate = rates->ratecat[i]; numeric_t *expeigen1 = &expeigenRates1[iRate*4]; numeric_t *expeigen2 = &expeigenRates2[iRate*4]; numeric_t *f1 = GET_FREQ(p1,i,/*IN/OUT*/iFreq1); numeric_t *f2 = GET_FREQ(p2,i,/*IN/OUT*/iFreq2); numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut); assert(fOut != NULL); if (f1 == NULL) { f1 = &transmat->codeFreq[p1->codes[i]][0]; /* codeFreq includes an entry for NOCODE */ double w = p1->weights[i]; if (w > 0.0 && w < 1.0) { for (j = 0; j < 4; j++) f1mix[j] = w * f1[j] + (1.0-w) * fGap[j]; f1 = f1mix; } } if (f2 == NULL) { f2 = &transmat->codeFreq[p2->codes[i]][0]; double w = p2->weights[i]; if (w > 0.0 && w < 1.0) { for (j = 0; j < 4; j++) f2mix[j] = w * f2[j] + (1.0-w) * fGap[j]; f2 = f2mix; } } numeric_t fMult1[4] ALIGNED; /* rotated1 * expeigen1 */ numeric_t fMult2[4] ALIGNED; /* rotated2 * expeigen2 */ #if 0 /* SSE3 is slower */ vector_multiply(f1, expeigen1, 4, /*OUT*/fMult1); vector_multiply(f2, expeigen2, 4, /*OUT*/fMult2); #else for (j = 0; j < 4; j++) { fMult1[j] = f1[j]*expeigen1[j]; fMult2[j] = f2[j]*expeigen2[j]; } #endif numeric_t fPost[4] ALIGNED; /* in unrotated space */ for (j = 0; j < 4; j++) { #if 0 /* SSE3 is slower */ fPost[j] = vector_dot_product_rot(fMult1, fMult2, &transmat->codeFreq[j][0], 4) * transmat->statinv[j]; */ #else double out1 = 0; double out2 = 0; for (k = 0; k < 4; k++) { out1 += fMult1[k] * transmat->codeFreq[j][k]; out2 += fMult2[k] * transmat->codeFreq[j][k]; } fPost[j] = out1*out2*transmat->statinv[j]; #endif } double fPostTot = 0; for (j = 0; j < 4; j++) fPostTot += fPost[j]; assert(fPostTot > fPostTotalTolerance); double fPostInv = 1.0/fPostTot; #if 0 /* SSE3 is slower */ vector_multiply_by(fPost, fPostInv, 4); #else for (j = 0; j < 4; j++) fPost[j] *= fPostInv; #endif /* and finally, divide by stat again & rotate to give the new frequencies */ matrixt_by_vector4(transmat->eigeninvT, fPost, /*OUT*/fOut); } /* end loop over position i */ } else if (nCodes == 20) { /* matrix model on amino acids */ numeric_t *fGap = &transmat->codeFreq[NOCODE][0]; numeric_t f1mix[20] ALIGNED; numeric_t f2mix[20] ALIGNED; for (i=0; i < nPos; i++) { if (p1->codes[i] == NOCODE && p2->codes[i] == NOCODE && p1->weights[i] == 0 && p2->weights[i] == 0) { /* aligning gap with gap -- just output a gap out->codes[i] is already set to NOCODE so need not set that */ out->weights[i] = 0; continue; } int iRate = rates->ratecat[i]; numeric_t *expeigen1 = &expeigenRates1[iRate*20]; numeric_t *expeigen2 = &expeigenRates2[iRate*20]; numeric_t *f1 = GET_FREQ(p1,i,/*IN/OUT*/iFreq1); numeric_t *f2 = GET_FREQ(p2,i,/*IN/OUT*/iFreq2); numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut); assert(fOut != NULL); if (f1 == NULL) { f1 = &transmat->codeFreq[p1->codes[i]][0]; /* codeFreq includes an entry for NOCODE */ double w = p1->weights[i]; if (w > 0.0 && w < 1.0) { for (j = 0; j < 20; j++) f1mix[j] = w * f1[j] + (1.0-w) * fGap[j]; f1 = f1mix; } } if (f2 == NULL) { f2 = &transmat->codeFreq[p2->codes[i]][0]; double w = p2->weights[i]; if (w > 0.0 && w < 1.0) { for (j = 0; j < 20; j++) f2mix[j] = w * f2[j] + (1.0-w) * fGap[j]; f2 = f2mix; } } numeric_t fMult1[20] ALIGNED; /* rotated1 * expeigen1 */ numeric_t fMult2[20] ALIGNED; /* rotated2 * expeigen2 */ vector_multiply(f1, expeigen1, 20, /*OUT*/fMult1); vector_multiply(f2, expeigen2, 20, /*OUT*/fMult2); numeric_t fPost[20] ALIGNED; /* in unrotated space */ for (j = 0; j < 20; j++) { numeric_t value = vector_dot_product_rot(fMult1, fMult2, &transmat->codeFreq[j][0], 20) * transmat->statinv[j]; /* Added this logic try to avoid rare numerical problems */ fPost[j] = value >= 0 ? value : 0; } double fPostTot = vector_sum(fPost, 20); assert(fPostTot > fPostTotalTolerance); double fPostInv = 1.0/fPostTot; vector_multiply_by(/*IN/OUT*/fPost, fPostInv, 20); int ch = -1; /* the dominant character, if any */ if (!exactML) { for (j = 0; j < 20; j++) { if (fPost[j] >= approxMLminf) { ch = j; break; } } } /* now, see if we can use the approximation fPost ~= (1 or 0) * w + nearP * (1-w) to avoid rotating */ double w = 0; if (ch >= 0) { w = (fPost[ch] - transmat->nearP[ch][ch]) / (1.0 - transmat->nearP[ch][ch]); for (j = 0; j < 20; j++) { if (j != ch) { double fRough = (1.0-w) * transmat->nearP[ch][j]; if (fRough < fPost[j] * approxMLminratio) { ch = -1; /* give up on the approximation */ break; } } } } if (ch >= 0) { nAAPosteriorRough++; double wInvStat = w * transmat->statinv[ch]; for (j = 0; j < 20; j++) fOut[j] = wInvStat * transmat->codeFreq[ch][j] + (1.0-w) * transmat->nearFreq[ch][j]; } else { /* and finally, divide by stat again & rotate to give the new frequencies */ nAAPosteriorExact++; for (j = 0; j < 20; j++) fOut[j] = vector_multiply_sum(fPost, &transmat->eigeninv[j][0], 20); } } /* end loop over position i */ } else { assert(0); /* illegal nCodes */ } if (transmat != NULL) { expeigenRates1 = myfree(expeigenRates1, sizeof(numeric_t) * rates->nRateCategories * nCodes); expeigenRates2 = myfree(expeigenRates2, sizeof(numeric_t) * rates->nRateCategories * nCodes); } /* Reallocate out->vectors to be the right size */ out->nVectors = iFreqOut; if (out->nVectors == 0) out->vectors = (numeric_t*)myfree(out->vectors, sizeof(numeric_t)*nCodes*nPos); else out->vectors = (numeric_t*)myrealloc(out->vectors, /*OLDSIZE*/sizeof(numeric_t)*nCodes*nPos, /*NEWSIZE*/sizeof(numeric_t)*nCodes*out->nVectors, /*copy*/true); /* try to save space */ nProfileFreqAlloc += out->nVectors; nProfileFreqAvoid += nPos - out->nVectors; /* compute total constraints */ for (i = 0; i < nConstraints; i++) { out->nOn[i] = p1->nOn[i] + p2->nOn[i]; out->nOff[i] = p1->nOff[i] + p2->nOff[i]; } nPosteriorCompute++; return(out); } double *PSameVector(double length, rates_t *rates) { double *pSame = mymalloc(sizeof(double) * rates->nRateCategories); int iRate; for (iRate = 0; iRate < rates->nRateCategories; iRate++) pSame[iRate] = 0.25 + 0.75 * exp((-4.0/3.0) * fabs(length*rates->rates[iRate])); return(pSame); } double *PDiffVector(double *pSame, rates_t *rates) { double *pDiff = mymalloc(sizeof(double) * rates->nRateCategories); int iRate; for (iRate = 0; iRate < rates->nRateCategories; iRate++) pDiff[iRate] = (1.0 - pSame[iRate])/3.0; return(pDiff); } numeric_t *ExpEigenRates(double length, transition_matrix_t *transmat, rates_t *rates) { numeric_t *expeigen = mymalloc(sizeof(numeric_t) * nCodes * rates->nRateCategories); int iRate, j; for (iRate = 0; iRate < rates->nRateCategories; iRate++) { for (j = 0; j < nCodes; j++) { double relLen = length * rates->rates[iRate]; /* very short branch lengths lead to numerical problems so prevent them */ if (relLen < MLMinRelBranchLength) relLen = MLMinRelBranchLength; expeigen[iRate*nCodes + j] = exp(relLen * transmat->eigenval[j]); } } return(expeigen); } double PairLogLk(profile_t *pA, profile_t *pB, double length, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*OPTIONAL IN/OUT*/double *site_likelihoods) { double lk = 1.0; double loglk = 0.0; /* stores underflow of lk during the loop over positions */ int i,j; assert(rates != NULL && rates->nRateCategories > 0); numeric_t *expeigenRates = NULL; if (transmat != NULL) expeigenRates = ExpEigenRates(length, transmat, rates); if (transmat == NULL) { /* Jukes-Cantor */ assert (nCodes == 4); double *pSame = PSameVector(length, rates); double *pDiff = PDiffVector(pSame, rates); int iFreqA = 0; int iFreqB = 0; for (i = 0; i < nPos; i++) { int iRate = rates->ratecat[i]; double wA = pA->weights[i]; double wB = pB->weights[i]; int codeA = pA->codes[i]; int codeB = pB->codes[i]; numeric_t *fA = GET_FREQ(pA,i,/*IN/OUT*/iFreqA); numeric_t *fB = GET_FREQ(pB,i,/*IN/OUT*/iFreqB); double lkAB = 0; if (fA == NULL && fB == NULL) { if (codeA == NOCODE) { /* A is all gaps */ /* gap to gap is sum(j) 0.25 * (0.25 * pSame + 0.75 * pDiff) = sum(i) 0.25*0.25 = 0.25 gap to any character gives the same result */ lkAB = 0.25; } else if (codeB == NOCODE) { /* B is all gaps */ lkAB = 0.25; } else if (codeA == codeB) { /* A and B match */ lkAB = pSame[iRate] * wA*wB + 0.25 * (1-wA*wB); } else { /* codeA != codeB */ lkAB = pDiff[iRate] * wA*wB + 0.25 * (1-wA*wB); } } else if (fA == NULL) { /* Compare codeA to profile of B */ if (codeA == NOCODE) lkAB = 0.25; else lkAB = wA * (pDiff[iRate] + fB[codeA] * (pSame[iRate]-pDiff[iRate])) + (1.0-wA) * 0.25; /* because lkAB = wA * P(codeA->B) + (1-wA) * 0.25 P(codeA -> B) = sum(j) P(B==j) * (j==codeA ? pSame : pDiff) = sum(j) P(B==j) * pDiff + = pDiff + P(B==codeA) * (pSame-pDiff) */ } else if (fB == NULL) { /* Compare codeB to profile of A */ if (codeB == NOCODE) lkAB = 0.25; else lkAB = wB * (pDiff[iRate] + fA[codeB] * (pSame[iRate]-pDiff[iRate])) + (1.0-wB) * 0.25; } else { /* both are full profiles */ for (j = 0; j < 4; j++) lkAB += fB[j] * (fA[j] * pSame[iRate] + (1-fA[j])* pDiff[iRate]); /* P(A|B) */ } assert(lkAB > 0); lk *= lkAB; while (lk < LkUnderflow) { lk *= LkUnderflowInv; loglk -= LogLkUnderflow; } if (site_likelihoods != NULL) site_likelihoods[i] *= lkAB; } pSame = myfree(pSame, sizeof(double) * rates->nRateCategories); pDiff = myfree(pDiff, sizeof(double) * rates->nRateCategories); } else if (nCodes == 4) { /* matrix model on nucleotides */ int iFreqA = 0; int iFreqB = 0; numeric_t fAmix[4], fBmix[4]; numeric_t *fGap = &transmat->codeFreq[NOCODE][0]; for (i = 0; i < nPos; i++) { int iRate = rates->ratecat[i]; numeric_t *expeigen = &expeigenRates[iRate*4]; double wA = pA->weights[i]; double wB = pB->weights[i]; if (wA == 0 && wB == 0 && pA->codes[i] == NOCODE && pB->codes[i] == NOCODE) { /* Likelihood of A vs B is 1, so nothing changes Do not need to advance iFreqA or iFreqB */ continue; } numeric_t *fA = GET_FREQ(pA,i,/*IN/OUT*/iFreqA); numeric_t *fB = GET_FREQ(pB,i,/*IN/OUT*/iFreqB); if (fA == NULL) fA = &transmat->codeFreq[pA->codes[i]][0]; if (wA > 0.0 && wA < 1.0) { for (j = 0; j < 4; j++) fAmix[j] = wA*fA[j] + (1.0-wA)*fGap[j]; fA = fAmix; } if (fB == NULL) fB = &transmat->codeFreq[pB->codes[i]][0]; if (wB > 0.0 && wB < 1.0) { for (j = 0; j < 4; j++) fBmix[j] = wB*fB[j] + (1.0-wB)*fGap[j]; fB = fBmix; } /* SSE3 instructions do not speed this step up: numeric_t lkAB = vector_multiply3_sum(expeigen, fA, fB); */ // dsp this is where check for <=0 was added in 2.1.1.LG double lkAB = 0; for (j = 0; j < 4; j++) lkAB += expeigen[j]*fA[j]*fB[j]; assert(lkAB > 0); if (site_likelihoods != NULL) site_likelihoods[i] *= lkAB; lk *= lkAB; while (lk < LkUnderflow) { lk *= LkUnderflowInv; loglk -= LogLkUnderflow; } while (lk > LkUnderflowInv) { lk *= LkUnderflow; loglk += LogLkUnderflow; } } } else if (nCodes == 20) { /* matrix model on amino acids */ int iFreqA = 0; int iFreqB = 0; numeric_t fAmix[20], fBmix[20]; numeric_t *fGap = &transmat->codeFreq[NOCODE][0]; for (i = 0; i < nPos; i++) { int iRate = rates->ratecat[i]; numeric_t *expeigen = &expeigenRates[iRate*20]; double wA = pA->weights[i]; double wB = pB->weights[i]; if (wA == 0 && wB == 0 && pA->codes[i] == NOCODE && pB->codes[i] == NOCODE) { /* Likelihood of A vs B is 1, so nothing changes Do not need to advance iFreqA or iFreqB */ continue; } numeric_t *fA = GET_FREQ(pA,i,/*IN/OUT*/iFreqA); numeric_t *fB = GET_FREQ(pB,i,/*IN/OUT*/iFreqB); if (fA == NULL) fA = &transmat->codeFreq[pA->codes[i]][0]; if (wA > 0.0 && wA < 1.0) { for (j = 0; j < 20; j++) fAmix[j] = wA*fA[j] + (1.0-wA)*fGap[j]; fA = fAmix; } if (fB == NULL) fB = &transmat->codeFreq[pB->codes[i]][0]; if (wB > 0.0 && wB < 1.0) { for (j = 0; j < 20; j++) fBmix[j] = wB*fB[j] + (1.0-wB)*fGap[j]; fB = fBmix; } numeric_t lkAB = vector_multiply3_sum(expeigen, fA, fB, 20); if (!(lkAB > 0)) { /* If this happens, it indicates a numerical problem that needs to be addressed elsewhere, so report all the details */ fprintf(stderr, "# FastTree.c::PairLogLk -- numerical problem!\n"); fprintf(stderr, "# This block is intended for loading into R\n"); fprintf(stderr, "lkAB = %.8g\n", lkAB); fprintf(stderr, "Branch_length= %.8g\nalignment_position=%d\nnCodes=%d\nrate_category=%d\nrate=%.8g\n", length, i, nCodes, iRate, rates->rates[iRate]); fprintf(stderr, "wA=%.8g\nwB=%.8g\n", wA, wB); fprintf(stderr, "codeA = %d\ncodeB = %d\n", pA->codes[i], pB->codes[i]); fprintf(stderr, "fA = c("); for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", fA[j]); fprintf(stderr,")\n"); fprintf(stderr, "fB = c("); for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", fB[j]); fprintf(stderr,")\n"); fprintf(stderr, "stat = c("); for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", transmat->stat[j]); fprintf(stderr,")\n"); fprintf(stderr, "eigenval = c("); for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", transmat->eigenval[j]); fprintf(stderr,")\n"); fprintf(stderr, "expeigen = c("); for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", expeigen[j]); fprintf(stderr,")\n"); int k; fprintf(stderr, "codeFreq = c("); for (j = 0; j < nCodes; j++) for(k = 0; k < nCodes; k++) fprintf(stderr, "%s %.8g", j==0 && k==0?"":",", transmat->codeFreq[j][k]); fprintf(stderr,")\n"); fprintf(stderr, "eigeninv = c("); for (j = 0; j < nCodes; j++) for(k = 0; k < nCodes; k++) fprintf(stderr, "%s %.8g", j==0 && k==0?"":",", transmat->eigeninv[j][k]); fprintf(stderr,")\n"); fprintf(stderr, "# Transform into matrices and compute un-rotated vectors for profiles A and B\n"); fprintf(stderr, "codeFreq = matrix(codeFreq,nrow=20);\n"); fprintf(stderr, "eigeninv = matrix(eigeninv,nrow=20);\n"); fputs("unrotA = stat * (eigeninv %*% fA)\n", stderr); fputs("unrotB = stat * (eigeninv %*% fB)\n", stderr); fprintf(stderr,"# End of R block\n"); } assert(lkAB > 0); if (site_likelihoods != NULL) site_likelihoods[i] *= lkAB; lk *= lkAB; while (lk < LkUnderflow) { lk *= LkUnderflowInv; loglk -= LogLkUnderflow; } while (lk > LkUnderflowInv) { lk *= LkUnderflow; loglk += LogLkUnderflow; } } } else { assert(0); /* illegal nCodes */ } if (transmat != NULL) expeigenRates = myfree(expeigenRates, sizeof(numeric_t) * rates->nRateCategories * 20); loglk += log(lk); nLkCompute++; return(loglk); } double MLQuartetLogLk(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*IN*/double branch_lengths[5], /*OPTIONAL OUT*/double *site_likelihoods) { profile_t *pAB = PosteriorProfile(pA, pB, branch_lengths[0], branch_lengths[1], transmat, rates, nPos, /*nConstraints*/0); profile_t *pCD = PosteriorProfile(pC, pD, branch_lengths[2], branch_lengths[3], transmat, rates, nPos, /*nConstraints*/0); if (site_likelihoods != NULL) { int i; for (i = 0; i < nPos; i++) site_likelihoods[i] = 1.0; } /* Roughly, P(A,B,C,D) = P(A) P(B|A) P(D|C) P(AB | CD) */ double loglk = PairLogLk(pA, pB, branch_lengths[0]+branch_lengths[1], nPos, transmat, rates, /*OPTIONAL IN/OUT*/site_likelihoods) + PairLogLk(pC, pD, branch_lengths[2]+branch_lengths[3], nPos, transmat, rates, /*OPTIONAL IN/OUT*/site_likelihoods) + PairLogLk(pAB, pCD, branch_lengths[4], nPos, transmat, rates, /*OPTIONAL IN/OUT*/site_likelihoods); pAB = FreeProfile(pAB, nPos, /*nConstraints*/0); pCD = FreeProfile(pCD, nPos, /*nConstraints*/0); return(loglk); } double PairNegLogLk(double x, void *data) { quartet_opt_t *qo = (quartet_opt_t *)data; assert(qo != NULL); assert(qo->pair1 != NULL && qo->pair2 != NULL); qo->nEval++; double loglk = PairLogLk(qo->pair1, qo->pair2, x, qo->nPos, qo->transmat, qo->rates, /*site_lk*/NULL); assert(loglk < 1e100); if (verbose > 5) fprintf(stderr, "PairLogLk(%.4f) = %.4f\n", x, loglk); return(-loglk); } double MLQuartetOptimize(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*IN/OUT*/double branch_lengths[5], /*OPTIONAL OUT*/bool *pStarTest, /*OPTIONAL OUT*/double *site_likelihoods) { int j; double start_length[5]; for (j = 0; j < 5; j++) { start_length[j] = branch_lengths[j]; if (branch_lengths[j] < MLMinBranchLength) branch_lengths[j] = MLMinBranchLength; } quartet_opt_t qopt = { nPos, transmat, rates, /*nEval*/0, /*pair1*/NULL, /*pair2*/NULL }; double f2x, negloglk; if (pStarTest != NULL) *pStarTest = false; /* First optimize internal branch, then branch to A, B, C, D, in turn May use star test to quit after internal branch */ profile_t *pAB = PosteriorProfile(pA, pB, branch_lengths[LEN_A], branch_lengths[LEN_B], transmat, rates, nPos, /*nConstraints*/0); profile_t *pCD = PosteriorProfile(pC, pD, branch_lengths[LEN_C], branch_lengths[LEN_D], transmat, rates, nPos, /*nConstraints*/0); qopt.pair1 = pAB; qopt.pair2 = pCD; branch_lengths[LEN_I] = onedimenmin(/*xmin*/MLMinBranchLength, /*xguess*/branch_lengths[LEN_I], /*xmax*/6.0, PairNegLogLk, /*data*/&qopt, /*ftol*/MLFTolBranchLength, /*atol*/MLMinBranchLengthTolerance, /*OUT*/&negloglk, /*OUT*/&f2x); if (pStarTest != NULL) { assert(site_likelihoods == NULL); double loglkStar = -PairNegLogLk(MLMinBranchLength, &qopt); if (loglkStar < -negloglk - closeLogLkLimit) { *pStarTest = true; double off = PairLogLk(pA, pB, branch_lengths[LEN_A] + branch_lengths[LEN_B], qopt.nPos, qopt.transmat, qopt.rates, /*site_lk*/NULL) + PairLogLk(pC, pD, branch_lengths[LEN_C] + branch_lengths[LEN_D], qopt.nPos, qopt.transmat, qopt.rates, /*site_lk*/NULL); pAB = FreeProfile(pAB, nPos, /*nConstraints*/0); pCD = FreeProfile(pCD, nPos, /*nConstraints*/0); return (-negloglk + off); } } pAB = FreeProfile(pAB, nPos, /*nConstraints*/0); profile_t *pBCD = PosteriorProfile(pB, pCD, branch_lengths[LEN_B], branch_lengths[LEN_I], transmat, rates, nPos, /*nConstraints*/0); qopt.pair1 = pA; qopt.pair2 = pBCD; branch_lengths[LEN_A] = onedimenmin(/*xmin*/MLMinBranchLength, /*xguess*/branch_lengths[LEN_A], /*xmax*/6.0, PairNegLogLk, /*data*/&qopt, /*ftol*/MLFTolBranchLength, /*atol*/MLMinBranchLengthTolerance, /*OUT*/&negloglk, /*OUT*/&f2x); pBCD = FreeProfile(pBCD, nPos, /*nConstraints*/0); profile_t *pACD = PosteriorProfile(pA, pCD, branch_lengths[LEN_A], branch_lengths[LEN_I], transmat, rates, nPos, /*nConstraints*/0); qopt.pair1 = pB; qopt.pair2 = pACD; branch_lengths[LEN_B] = onedimenmin(/*xmin*/MLMinBranchLength, /*xguess*/branch_lengths[LEN_B], /*xmax*/6.0, PairNegLogLk, /*data*/&qopt, /*ftol*/MLFTolBranchLength, /*atol*/MLMinBranchLengthTolerance, /*OUT*/&negloglk, /*OUT*/&f2x); pACD = FreeProfile(pACD, nPos, /*nConstraints*/0); pCD = FreeProfile(pCD, nPos, /*nConstraints*/0); pAB = PosteriorProfile(pA, pB, branch_lengths[LEN_A], branch_lengths[LEN_B], transmat, rates, nPos, /*nConstraints*/0); profile_t *pABD = PosteriorProfile(pAB, pD, branch_lengths[LEN_I], branch_lengths[LEN_D], transmat, rates, nPos, /*nConstraints*/0); qopt.pair1 = pC; qopt.pair2 = pABD; branch_lengths[LEN_C] = onedimenmin(/*xmin*/MLMinBranchLength, /*xguess*/branch_lengths[LEN_C], /*xmax*/6.0, PairNegLogLk, /*data*/&qopt, /*ftol*/MLFTolBranchLength, /*atol*/MLMinBranchLengthTolerance, /*OUT*/&negloglk, /*OUT*/&f2x); pABD = FreeProfile(pABD, nPos, /*nConstraints*/0); profile_t *pABC = PosteriorProfile(pAB, pC, branch_lengths[LEN_I], branch_lengths[LEN_C], transmat, rates, nPos, /*nConstraints*/0); qopt.pair1 = pD; qopt.pair2 = pABC; branch_lengths[LEN_D] = onedimenmin(/*xmin*/MLMinBranchLength, /*xguess*/branch_lengths[LEN_D], /*xmax*/6.0, PairNegLogLk, /*data*/&qopt, /*ftol*/MLFTolBranchLength, /*atol*/MLMinBranchLengthTolerance, /*OUT*/&negloglk, /*OUT*/&f2x); /* Compute the total quartet likelihood PairLogLk(ABC,D) + PairLogLk(AB,C) + PairLogLk(A,B) */ double loglkABCvsD = -negloglk; if (site_likelihoods) { for (j = 0; j < nPos; j++) site_likelihoods[j] = 1.0; PairLogLk(pABC, pD, branch_lengths[LEN_D], qopt.nPos, qopt.transmat, qopt.rates, /*IN/OUT*/site_likelihoods); } double quartetloglk = loglkABCvsD + PairLogLk(pAB, pC, branch_lengths[LEN_I] + branch_lengths[LEN_C], qopt.nPos, qopt.transmat, qopt.rates, /*IN/OUT*/site_likelihoods) + PairLogLk(pA, pB, branch_lengths[LEN_A] + branch_lengths[LEN_B], qopt.nPos, qopt.transmat, qopt.rates, /*IN/OUT*/site_likelihoods); pABC = FreeProfile(pABC, nPos, /*nConstraints*/0); pAB = FreeProfile(pAB, nPos, /*nConstraints*/0); if (verbose > 3) { double loglkStart = MLQuartetLogLk(pA, pB, pC, pD, nPos, transmat, rates, start_length, /*site_lk*/NULL); fprintf(stderr, "Optimize loglk from %.5f to %.5f eval %d lengths from\n" " %.5f %.5f %.5f %.5f %.5f to\n" " %.5f %.5f %.5f %.5f %.5f\n", loglkStart, quartetloglk, qopt.nEval, start_length[0], start_length[1], start_length[2], start_length[3], start_length[4], branch_lengths[0], branch_lengths[1], branch_lengths[2], branch_lengths[3], branch_lengths[4]); } return(quartetloglk); } nni_t MLQuartetNNI(profile_t *profiles[4], /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, int nPos, int nConstraints, /*OUT*/double criteria[3], /* The three potential quartet log-likelihoods */ /*IN/OUT*/numeric_t len[5], bool bFast) { int i; double lenABvsCD[5] = {len[LEN_A], len[LEN_B], len[LEN_C], len[LEN_D], len[LEN_I]}; double lenACvsBD[5] = {len[LEN_A], len[LEN_C], len[LEN_B], len[LEN_D], len[LEN_I]}; /* Swap B & C */ double lenADvsBC[5] = {len[LEN_A], len[LEN_D], len[LEN_C], len[LEN_B], len[LEN_I]}; /* Swap B & D */ bool bConsiderAC = true; bool bConsiderAD = true; int iRound; int nRounds = mlAccuracy < 2 ? 2 : mlAccuracy; double penalty[3]; QuartetConstraintPenalties(profiles, nConstraints, /*OUT*/penalty); if (penalty[ABvsCD] > penalty[ACvsBD] || penalty[ABvsCD] > penalty[ADvsBC]) bFast = false; #ifdef OPENMP bFast = false; /* turn off star topology test */ #endif for (iRound = 0; iRound < nRounds; iRound++) { bool bStarTest = false; { #ifdef OPENMP #pragma omp parallel #pragma omp sections #endif { #ifdef OPENMP #pragma omp section #endif { criteria[ABvsCD] = MLQuartetOptimize(profiles[0], profiles[1], profiles[2], profiles[3], nPos, transmat, rates, /*IN/OUT*/lenABvsCD, bFast ? &bStarTest : NULL, /*site_likelihoods*/NULL) - penalty[ABvsCD]; /* subtract penalty b/c we are trying to maximize log lk */ } #ifdef OPENMP #pragma omp section #else if (bStarTest) { nStarTests++; criteria[ACvsBD] = -1e20; criteria[ADvsBC] = -1e20; len[LEN_I] = lenABvsCD[LEN_I]; return(ABvsCD); } #endif { if (bConsiderAC) criteria[ACvsBD] = MLQuartetOptimize(profiles[0], profiles[2], profiles[1], profiles[3], nPos, transmat, rates, /*IN/OUT*/lenACvsBD, NULL, /*site_likelihoods*/NULL) - penalty[ACvsBD]; } #ifdef OPENMP #pragma omp section #endif { if (bConsiderAD) criteria[ADvsBC] = MLQuartetOptimize(profiles[0], profiles[3], profiles[2], profiles[1], nPos, transmat, rates, /*IN/OUT*/lenADvsBC, NULL, /*site_likelihoods*/NULL) - penalty[ADvsBC]; } } } /* end parallel sections */ if (mlAccuracy < 2) { /* If clearly worse then ABvsCD, or have short internal branch length and worse, then give up */ if (criteria[ACvsBD] < criteria[ABvsCD] - closeLogLkLimit || (lenACvsBD[LEN_I] <= 2.0*MLMinBranchLength && criteria[ACvsBD] < criteria[ABvsCD])) bConsiderAC = false; if (criteria[ADvsBC] < criteria[ABvsCD] - closeLogLkLimit || (lenADvsBC[LEN_I] <= 2.0*MLMinBranchLength && criteria[ADvsBC] < criteria[ABvsCD])) bConsiderAD = false; if (!bConsiderAC && !bConsiderAD) break; /* If clearly better than either alternative, then give up (Comparison is probably biased in favor of ABvsCD anyway) */ if (criteria[ACvsBD] > criteria[ABvsCD] + closeLogLkLimit && criteria[ACvsBD] > criteria[ADvsBC] + closeLogLkLimit) break; if (criteria[ADvsBC] > criteria[ABvsCD] + closeLogLkLimit && criteria[ADvsBC] > criteria[ACvsBD] + closeLogLkLimit) break; } } /* end loop over rounds */ if (verbose > 2) { fprintf(stderr, "Optimized quartet for %d rounds: ABvsCD %.5f ACvsBD %.5f ADvsBC %.5f\n", iRound, criteria[ABvsCD], criteria[ACvsBD], criteria[ADvsBC]); } if (criteria[ACvsBD] > criteria[ABvsCD] && criteria[ACvsBD] > criteria[ADvsBC]) { for (i = 0; i < 5; i++) len[i] = lenACvsBD[i]; return(ACvsBD); } else if (criteria[ADvsBC] > criteria[ABvsCD] && criteria[ADvsBC] > criteria[ACvsBD]) { for (i = 0; i < 5; i++) len[i] = lenADvsBC[i]; return(ADvsBC); } else { for (i = 0; i < 5; i++) len[i] = lenABvsCD[i]; return(ABvsCD); } } double TreeLength(/*IN/OUT*/NJ_t *NJ, bool recomputeProfiles) { if (recomputeProfiles) { traversal_t traversal2 = InitTraversal(NJ); int j = NJ->root; while((j = TraversePostorder(j, NJ, /*IN/OUT*/traversal2, /*pUp*/NULL)) >= 0) { /* nothing to do for leaves or root */ if (j >= NJ->nSeq && j != NJ->root) SetProfile(/*IN/OUT*/NJ, j, /*noweight*/-1.0); } traversal2 = FreeTraversal(traversal2,NJ); } UpdateBranchLengths(/*IN/OUT*/NJ); double total_len = 0; int iNode; for (iNode = 0; iNode < NJ->maxnode; iNode++) total_len += NJ->branchlength[iNode]; return(total_len); } double TreeLogLk(/*IN*/NJ_t *NJ, /*OPTIONAL OUT*/double *site_loglk) { int i; if (NJ->nSeq < 2) return(0.0); double loglk = 0.0; double *site_likelihood = NULL; if (site_loglk != NULL) { site_likelihood = mymalloc(sizeof(double)*NJ->nPos); for (i = 0; i < NJ->nPos; i++) { site_likelihood[i] = 1.0; site_loglk[i] = 0.0; } } traversal_t traversal = InitTraversal(NJ); int node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { int nChild = NJ->child[node].nChild; if (nChild == 0) continue; assert(nChild >= 2); int *children = NJ->child[node].child; double loglkchild = PairLogLk(NJ->profiles[children[0]], NJ->profiles[children[1]], NJ->branchlength[children[0]]+NJ->branchlength[children[1]], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/site_likelihood); loglk += loglkchild; if (site_likelihood != NULL) { /* prevent underflows */ for (i = 0; i < NJ->nPos; i++) { while(site_likelihood[i] < LkUnderflow) { site_likelihood[i] *= LkUnderflowInv; site_loglk[i] -= LogLkUnderflow; } } } if (verbose > 2) fprintf(stderr, "At %d: LogLk(%d:%.4f,%d:%.4f) = %.3f\n", node, children[0], NJ->branchlength[children[0]], children[1], NJ->branchlength[children[1]], loglkchild); if (NJ->child[node].nChild == 3) { assert(node == NJ->root); /* Infer the common parent of the 1st two to define the third... */ profile_t *pAB = PosteriorProfile(NJ->profiles[children[0]], NJ->profiles[children[1]], NJ->branchlength[children[0]], NJ->branchlength[children[1]], NJ->transmat, &NJ->rates, NJ->nPos, /*nConstraints*/0); double loglkup = PairLogLk(pAB, NJ->profiles[children[2]], NJ->branchlength[children[2]], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/site_likelihood); loglk += loglkup; if (verbose > 2) fprintf(stderr, "At root %d: LogLk((%d/%d),%d:%.3f) = %.3f\n", node, children[0], children[1], children[2], NJ->branchlength[children[2]], loglkup); pAB = FreeProfile(pAB, NJ->nPos, NJ->nConstraints); } } traversal = FreeTraversal(traversal,NJ); if (site_likelihood != NULL) { for (i = 0; i < NJ->nPos; i++) { site_loglk[i] += log(site_likelihood[i]); } site_likelihood = myfree(site_likelihood, sizeof(double)*NJ->nPos); } /* For Jukes-Cantor, with a tree of size 4, if the children of the root are (A,B), C, and D, then P(ABCD) = P(A) P(B|A) P(C|AB) P(D|ABC) Above we compute P(B|A) P(C|AB) P(D|ABC) -- note P(B|A) is at the child of root and P(C|AB) P(D|ABC) is at root. Similarly if the children of the root are C, D, and (A,B), then P(ABCD) = P(C|D) P(A|B) P(AB|CD) P(D), and above we compute that except for P(D) So we need to multiply by P(A) = 0.25, so we pay log(4) at each position (if ungapped). Each gapped position in any sequence reduces the payment by log(4) For JTT or GTR, we are computing P(A & B) and the posterior profiles are scaled to take the prior into account, so we do not need any correction. codeFreq[NOCODE] is scaled x higher so that P(-) = 1 not P(-)=1/nCodes, so gaps do not need to be corrected either. */ if (nCodes == 4 && NJ->transmat == NULL) { int nGaps = 0; double logNCodes = log((double)nCodes); for (i = 0; i < NJ->nPos; i++) { int nGapsThisPos = 0; for (node = 0; node < NJ->nSeq; node++) { unsigned char *codes = NJ->profiles[node]->codes; if (codes[i] == NOCODE) nGapsThisPos++; } nGaps += nGapsThisPos; if (site_loglk != NULL) { site_loglk[i] += nGapsThisPos * logNCodes; if (nCodes == 4 && NJ->transmat == NULL) site_loglk[i] -= logNCodes; } } loglk -= NJ->nPos * logNCodes; loglk += nGaps * logNCodes; /* do not pay for gaps -- only Jukes-Cantor */ } return(loglk); } void SetMLGtr(/*IN/OUT*/NJ_t *NJ, /*OPTIONAL IN*/double *freq_in, /*OPTIONAL WRITE*/FILE *fpLog) { int i; assert(nCodes==4); gtr_opt_t gtr; gtr.NJ = NJ; gtr.fpLog = fpLog; if (freq_in != NULL) { for (i=0; i<4; i++) gtr.freq[i]=freq_in[i]; } else { /* n[] and sum were int in FastTree 2.1.9 and earlier -- this caused gtr analyses to fail on analyses with >2e9 positions */ long n[4] = {1,1,1,1}; /* pseudocounts */ for (i=0; i<NJ->nSeq; i++) { unsigned char *codes = NJ->profiles[i]->codes; int iPos; for (iPos=0; iPos<NJ->nPos; iPos++) if (codes[iPos] < 4) n[codes[iPos]]++; } long sum = n[0]+n[1]+n[2]+n[3]; for (i=0; i<4; i++) gtr.freq[i] = n[i]/(double)sum; } for (i=0; i<6; i++) gtr.rates[i] = 1.0; int nRounds = mlAccuracy < 2 ? 2 : mlAccuracy; for (i = 0; i < nRounds; i++) { for (gtr.iRate = 0; gtr.iRate < 6; gtr.iRate++) { ProgressReport("Optimizing GTR model, step %d of %d", i*6+gtr.iRate+1, 12, 0, 0); double negloglk, f2x; gtr.rates[gtr.iRate] = onedimenmin(/*xmin*/0.05, /*xguess*/gtr.rates[gtr.iRate], /*xmax*/20.0, GTRNegLogLk, /*data*/&gtr, /*ftol*/0.001, /*atol*/0.0001, /*OUT*/&negloglk, /*OUT*/&f2x); } } /* normalize gtr so last rate is 1 -- specifying that rate separately is useful for optimization only */ for (i = 0; i < 5; i++) gtr.rates[i] /= gtr.rates[5]; gtr.rates[5] = 1.0; if (verbose) { fprintf(stderr, "GTR Frequencies: %.4f %.4f %.4f %.4f\n", gtr.freq[0], gtr.freq[1], gtr.freq[2], gtr.freq[3]); fprintf(stderr, "GTR rates(ac ag at cg ct gt) %.4f %.4f %.4f %.4f %.4f %.4f\n", gtr.rates[0],gtr.rates[1],gtr.rates[2],gtr.rates[3],gtr.rates[4],gtr.rates[5]); } if (fpLog != NULL) { fprintf(fpLog, "GTRFreq\t%.4f\t%.4f\t%.4f\t%.4f\n", gtr.freq[0], gtr.freq[1], gtr.freq[2], gtr.freq[3]); fprintf(fpLog, "GTRRates\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\n", gtr.rates[0],gtr.rates[1],gtr.rates[2],gtr.rates[3],gtr.rates[4],gtr.rates[5]); } myfree(NJ->transmat, sizeof(transition_matrix_t)); NJ->transmat = CreateGTR(gtr.rates, gtr.freq); RecomputeMLProfiles(/*IN/OUT*/NJ); OptimizeAllBranchLengths(/*IN/OUT*/NJ); } double GTRNegLogLk(double x, void *data) { gtr_opt_t *gtr = (gtr_opt_t*)data; assert(nCodes == 4); assert(gtr->NJ != NULL); assert(gtr->iRate >= 0 && gtr->iRate < 6); assert(x > 0); transition_matrix_t *old = gtr->NJ->transmat; double rates[6]; int i; for (i = 0; i < 6; i++) rates[i] = gtr->rates[i]; rates[gtr->iRate] = x; FILE *fpLog = gtr->fpLog; if (fpLog) fprintf(fpLog, "GTR_Opt\tfreq %.5f %.5f %.5f %.5f rates %.5f %.5f %.5f %.5f %.5f %.5f\n", gtr->freq[0], gtr->freq[1], gtr->freq[2], gtr->freq[3], rates[0], rates[1], rates[2], rates[3], rates[4], rates[5]); gtr->NJ->transmat = CreateGTR(rates, gtr->freq); RecomputeMLProfiles(/*IN/OUT*/gtr->NJ); double loglk = TreeLogLk(gtr->NJ, /*site_loglk*/NULL); myfree(gtr->NJ->transmat, sizeof(transition_matrix_t)); gtr->NJ->transmat = old; /* Do not recompute profiles -- assume the caller will do that */ if (verbose > 2) fprintf(stderr, "GTR LogLk(%.5f %.5f %.5f %.5f %.5f %.5f) = %f\n", rates[0], rates[1], rates[2], rates[3], rates[4], rates[5], loglk); if (fpLog) fprintf(fpLog, "GTR_Opt\tGTR LogLk(%.5f %.5f %.5f %.5f %.5f %.5f) = %f\n", rates[0], rates[1], rates[2], rates[3], rates[4], rates[5], loglk); return(-loglk); } /* Caller must free the resulting vector of n rates */ numeric_t *MLSiteRates(int nRateCategories) { /* Even spacing from 1/nRate to nRate */ double logNCat = log((double)nRateCategories); double logMinRate = -logNCat; double logMaxRate = logNCat; double logd = (logMaxRate-logMinRate)/(double)(nRateCategories-1); numeric_t *rates = mymalloc(sizeof(numeric_t)*nRateCategories); int i; for (i = 0; i < nRateCategories; i++) rates[i] = exp(logMinRate + logd*(double)i); return(rates); } double *MLSiteLikelihoodsByRate(/*IN*/NJ_t *NJ, /*IN*/numeric_t *rates, int nRateCategories) { double *site_loglk = mymalloc(sizeof(double)*NJ->nPos*nRateCategories); /* save the original rates */ assert(NJ->rates.nRateCategories > 0); numeric_t *oldRates = NJ->rates.rates; NJ->rates.rates = mymalloc(sizeof(numeric_t) * NJ->rates.nRateCategories); /* Compute site likelihood for each rate */ int iPos; int iRate; for (iRate = 0; iRate < nRateCategories; iRate++) { int i; for (i = 0; i < NJ->rates.nRateCategories; i++) NJ->rates.rates[i] = rates[iRate]; RecomputeMLProfiles(/*IN/OUT*/NJ); double loglk = TreeLogLk(NJ, /*OUT*/&site_loglk[NJ->nPos*iRate]); ProgressReport("Site likelihoods with rate category %d of %d", iRate+1, nRateCategories, 0, 0); if(verbose > 2) { fprintf(stderr, "Rate %.3f Loglk %.3f SiteLogLk", rates[iRate], loglk); for (iPos = 0; iPos < NJ->nPos; iPos++) fprintf(stderr,"\t%.3f", site_loglk[NJ->nPos*iRate + iPos]); fprintf(stderr,"\n"); } } /* restore original rates and profiles */ myfree(NJ->rates.rates, sizeof(numeric_t) * NJ->rates.nRateCategories); NJ->rates.rates = oldRates; RecomputeMLProfiles(/*IN/OUT*/NJ); return(site_loglk); } void SetMLRates(/*IN/OUT*/NJ_t *NJ, int nRateCategories) { assert(nRateCategories > 0); AllocRateCategories(/*IN/OUT*/&NJ->rates, 1, NJ->nPos); /* set to 1 category of rate 1 */ if (nRateCategories == 1) { RecomputeMLProfiles(/*IN/OUT*/NJ); return; } numeric_t *rates = MLSiteRates(nRateCategories); double *site_loglk = MLSiteLikelihoodsByRate(/*IN*/NJ, /*IN*/rates, nRateCategories); /* Select best rate for each site, correcting for the prior For a prior, use a gamma distribution with shape parameter 3, scale 1/3, so Prior(rate) ~ rate**2 * exp(-3*rate) log Prior(rate) = C + 2 * log(rate) - 3 * rate */ double sumRates = 0; int iPos; int iRate; for (iPos = 0; iPos < NJ->nPos; iPos++) { int iBest = -1; double dBest = -1e20; for (iRate = 0; iRate < nRateCategories; iRate++) { double site_loglk_with_prior = site_loglk[NJ->nPos*iRate + iPos] + 2.0 * log(rates[iRate]) - 3.0 * rates[iRate]; if (site_loglk_with_prior > dBest) { iBest = iRate; dBest = site_loglk_with_prior; } } if (verbose > 2) fprintf(stderr, "Selected rate category %d rate %.3f for position %d\n", iBest, rates[iBest], iPos+1); NJ->rates.ratecat[iPos] = iBest; sumRates += rates[iBest]; } site_loglk = myfree(site_loglk, sizeof(double)*NJ->nPos*nRateCategories); /* Force the rates to average to 1 */ double avgRate = sumRates/NJ->nPos; for (iRate = 0; iRate < nRateCategories; iRate++) rates[iRate] /= avgRate; /* Save the rates */ NJ->rates.rates = myfree(NJ->rates.rates, sizeof(numeric_t) * NJ->rates.nRateCategories); NJ->rates.rates = rates; NJ->rates.nRateCategories = nRateCategories; /* Update profiles based on rates */ RecomputeMLProfiles(/*IN/OUT*/NJ); if (verbose) { fprintf(stderr, "Switched to using %d rate categories (CAT approximation)\n", nRateCategories); fprintf(stderr, "Rate categories were divided by %.3f so that average rate = 1.0\n", avgRate); fprintf(stderr, "CAT-based log-likelihoods may not be comparable across runs\n"); if (!gammaLogLk) fprintf(stderr, "Use -gamma for approximate but comparable Gamma(20) log-likelihoods\n"); } } double GammaLogLk(/*IN*/siteratelk_t *s, /*OPTIONAL OUT*/double *gamma_loglk_sites) { int iRate, iPos; double *dRate = mymalloc(sizeof(double) * s->nRateCats); for (iRate = 0; iRate < s->nRateCats; iRate++) { /* The probability density for each rate is approximated by the total density between the midpoints */ double pMin = iRate == 0 ? 0.0 : PGamma(s->mult * (s->rates[iRate-1] + s->rates[iRate])/2.0, s->alpha); double pMax = iRate == s->nRateCats-1 ? 1.0 : PGamma(s->mult * (s->rates[iRate]+s->rates[iRate+1])/2.0, s->alpha); dRate[iRate] = pMax-pMin; } double loglk = 0.0; for (iPos = 0; iPos < s->nPos; iPos++) { /* Prevent underflow on large trees by comparing to maximum loglk */ double maxloglk = -1e20; for (iRate = 0; iRate < s->nRateCats; iRate++) { double site_loglk = s->site_loglk[s->nPos*iRate + iPos]; if (site_loglk > maxloglk) maxloglk = site_loglk; } double rellk = 0; /* likelihood scaled by exp(maxloglk) */ for (iRate = 0; iRate < s->nRateCats; iRate++) { double lk = exp(s->site_loglk[s->nPos*iRate + iPos] - maxloglk); rellk += lk * dRate[iRate]; } double loglk_site = maxloglk + log(rellk); loglk += loglk_site; if (gamma_loglk_sites != NULL) gamma_loglk_sites[iPos] = loglk_site; } dRate = myfree(dRate, sizeof(double)*s->nRateCats); return(loglk); } double OptAlpha(double alpha, void *data) { siteratelk_t *s = (siteratelk_t *)data; s->alpha = alpha; return(-GammaLogLk(s, NULL)); } double OptMult(double mult, void *data) { siteratelk_t *s = (siteratelk_t *)data; s->mult = mult; return(-GammaLogLk(s, NULL)); } /* Input site_loglk must be for each rate */ double RescaleGammaLogLk(int nPos, int nRateCats, /*IN*/numeric_t *rates, /*IN*/double *site_loglk, /*OPTIONAL*/FILE *fpLog) { siteratelk_t s = { /*mult*/1.0, /*alpha*/1.0, nPos, nRateCats, rates, site_loglk }; double fx, f2x; int i; fx = -GammaLogLk(&s, NULL); if (verbose>2) fprintf(stderr, "Optimizing alpha, starting at loglk %.3f\n", -fx); for (i = 0; i < 10; i++) { ProgressReport("Optimizing alpha round %d", i+1, 0, 0, 0); double start = fx; s.alpha = onedimenmin(0.01, s.alpha, 10.0, OptAlpha, &s, 0.001, 0.001, &fx, &f2x); if (verbose>2) fprintf(stderr, "Optimize alpha round %d to %.3f lk %.3f\n", i+1, s.alpha, -fx); s.mult = onedimenmin(0.01, s.mult, 10.0, OptMult, &s, 0.001, 0.001, &fx, &f2x); if (verbose>2) fprintf(stderr, "Optimize mult round %d to %.3f lk %.3f\n", i+1, s.mult, -fx); if (fx > start - 0.001) { if (verbose>2) fprintf(stderr, "Optimizing alpha & mult converged\n"); break; } } double *gamma_loglk_sites = mymalloc(sizeof(double) * nPos); double gammaLogLk = GammaLogLk(&s, /*OUT*/gamma_loglk_sites); if (verbose > 0) fprintf(stderr, "Gamma(%d) LogLk = %.3f alpha = %.3f rescaling lengths by %.3f\n", nRateCats, gammaLogLk, s.alpha, 1/s.mult); if (fpLog) { int iPos; int iRate; fprintf(fpLog, "Gamma%dLogLk\t%.3f\tApproximate\tAlpha\t%.3f\tRescale\t%.3f\n", nRateCats, gammaLogLk, s.alpha, 1/s.mult); fprintf(fpLog, "Gamma%d\tSite\tLogLk", nRateCats); for (iRate = 0; iRate < nRateCats; iRate++) fprintf(fpLog, "\tr=%.3f", rates[iRate]/s.mult); fprintf(fpLog,"\n"); for (iPos = 0; iPos < nPos; iPos++) { fprintf(fpLog, "Gamma%d\t%d\t%.3f", nRateCats, iPos, gamma_loglk_sites[iPos]); for (iRate = 0; iRate < nRateCats; iRate++) fprintf(fpLog, "\t%.3f", site_loglk[nPos*iRate + iPos]); fprintf(fpLog,"\n"); } } gamma_loglk_sites = myfree(gamma_loglk_sites, sizeof(double) * nPos); return(1.0/s.mult); } double MLPairOptimize(profile_t *pA, profile_t *pB, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*IN/OUT*/double *branch_length) { quartet_opt_t qopt = { nPos, transmat, rates, /*nEval*/0, /*pair1*/pA, /*pair2*/pB }; double f2x,negloglk; *branch_length = onedimenmin(/*xmin*/MLMinBranchLength, /*xguess*/*branch_length, /*xmax*/6.0, PairNegLogLk, /*data*/&qopt, /*ftol*/MLFTolBranchLength, /*atol*/MLMinBranchLengthTolerance, /*OUT*/&negloglk, /*OUT*/&f2x); return(-negloglk); /* the log likelihood */ } void OptimizeAllBranchLengths(/*IN/OUT*/NJ_t *NJ) { if (NJ->nSeq < 2) return; if (NJ->nSeq == 2) { int parent = NJ->root; assert(NJ->child[parent].nChild==2); int nodes[2] = { NJ->child[parent].child[0], NJ->child[parent].child[1] }; double length = 1.0; (void)MLPairOptimize(NJ->profiles[nodes[0]], NJ->profiles[nodes[1]], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/&length); NJ->branchlength[nodes[0]] = length/2.0; NJ->branchlength[nodes[1]] = length/2.0; return; }; traversal_t traversal = InitTraversal(NJ); profile_t **upProfiles = UpProfiles(NJ); int node = NJ->root; int iDone = 0; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { int nChild = NJ->child[node].nChild; if (nChild > 0) { if ((iDone % 100) == 0) ProgressReport("ML Lengths %d of %d splits", iDone+1, NJ->maxnode - NJ->nSeq, 0, 0); iDone++; /* optimize the branch lengths between self, parent, and children, with two iterations */ assert(nChild == 2 || nChild == 3); int nodes[3] = { NJ->child[node].child[0], NJ->child[node].child[1], nChild == 3 ? NJ->child[node].child[2] : node }; profile_t *profiles[3] = { NJ->profiles[nodes[0]], NJ->profiles[nodes[1]], nChild == 3 ? NJ->profiles[nodes[2]] : GetUpProfile(/*IN/OUT*/upProfiles, NJ, node, /*useML*/true) }; int iter; for (iter = 0; iter < 2; iter++) { int i; for (i = 0; i < 3; i++) { profile_t *pA = profiles[i]; int b1 = (i+1) % 3; int b2 = (i+2) % 3; profile_t *pB = PosteriorProfile(profiles[b1], profiles[b2], NJ->branchlength[nodes[b1]], NJ->branchlength[nodes[b2]], NJ->transmat, &NJ->rates, NJ->nPos, /*nConstraints*/0); double len = NJ->branchlength[nodes[i]]; if (len < MLMinBranchLength) len = MLMinBranchLength; (void)MLPairOptimize(pA, pB, NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/&len); NJ->branchlength[nodes[i]] = len; pB = FreeProfile(pB, NJ->nPos, /*nConstraints*/0); if (verbose>3) fprintf(stderr, "Optimize length for %d to %.3f\n", nodes[i], NJ->branchlength[nodes[i]]); } } if (node != NJ->root) { RecomputeProfile(/*IN/OUT*/NJ, /*IN/OUT*/upProfiles, node, /*useML*/true); DeleteUpProfile(upProfiles, NJ, node); } } } traversal = FreeTraversal(traversal,NJ); upProfiles = FreeUpProfiles(upProfiles,NJ); } void RecomputeMLProfiles(/*IN/OUT*/NJ_t *NJ) { traversal_t traversal = InitTraversal(NJ); int node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { if (NJ->child[node].nChild == 2) { NJ->profiles[node] = FreeProfile(NJ->profiles[node], NJ->nPos, NJ->nConstraints); int *children = NJ->child[node].child; NJ->profiles[node] = PosteriorProfile(NJ->profiles[children[0]], NJ->profiles[children[1]], NJ->branchlength[children[0]], NJ->branchlength[children[1]], NJ->transmat, &NJ->rates, NJ->nPos, NJ->nConstraints); } } traversal = FreeTraversal(traversal, NJ); } void RecomputeProfiles(/*IN/OUT*/NJ_t *NJ, /*OPTIONAL*/distance_matrix_t *dmat) { traversal_t traversal = InitTraversal(NJ); int node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { if (NJ->child[node].nChild == 2) { int *child = NJ->child[node].child; NJ->profiles[node] = FreeProfile(NJ->profiles[node], NJ->nPos, NJ->nConstraints); NJ->profiles[node] = AverageProfile(NJ->profiles[child[0]], NJ->profiles[child[1]], NJ->nPos, NJ->nConstraints, dmat, /*unweighted*/-1.0); } } traversal = FreeTraversal(traversal,NJ); } int NNI(/*IN/OUT*/NJ_t *NJ, int iRound, int nRounds, bool useML, /*IN/OUT*/nni_stats_t *stats, /*OUT*/double *dMaxDelta) { /* For each non-root node N, with children A,B, sibling C, and uncle D, we compare the current topology AB|CD to the alternate topologies AC|BD and AD|BC, by using the 4 relevant profiles. If useML is true, it uses quartet maximum likelihood, and it updates branch lengths as it goes. If useML is false, it uses the minimum-evolution criterion with log-corrected distances on profiles. (If logdist is false, then the log correction is not done.) If useML is false, then NNI() does NOT modify the branch lengths. Regardless of whether it changes the topology, it recomputes the profile for the node, using the pairwise distances and BIONJ-like weightings (if bionj is set). The parent's profile has changed, but recomputing it is not necessary because we will visit it before we need it (we use postorder, so we may visit the sibling and its children before we visit the parent, but we never consider an ancestor's profile, so that is OK). When we change the parent's profile, this alters the uncle's up-profile, so we remove that. Finally, if the topology has changed, we remove the up-profiles of the nodes. If we do an NNI during post-order traversal, the result is a bit tricky. E.g. if we are at node N, and have visited its children A and B but not its uncle C, and we do an NNI that swaps B & C, then the post-order traversal will visit C, and its children, but then on the way back up, it will skip N, as it has already visited it. So, the profile of N will not be recomputed: any changes beneath C will not be reflected in the profile of N, and the profile of N will be slightly stale. This will be corrected on the next round of NNIs. */ double supportThreshold = useML ? treeLogLkDelta : MEMinDelta; int i; *dMaxDelta = 0.0; int nNNIThisRound = 0; if (NJ->nSeq <= 3) return(0); /* nothing to do */ if (verbose > 2) { fprintf(stderr, "Beginning round %d of NNIs with ml? %d\n", iRound, useML?1:0); PrintNJInternal(/*WRITE*/stderr, NJ, /*useLen*/useML && iRound > 0 ? 1 : 0); } /* For each node the upProfile or NULL */ profile_t **upProfiles = UpProfiles(NJ); traversal_t traversal = InitTraversal(NJ); /* Identify nodes we can skip traversing into */ int node; if (fastNNI) { for (node = 0; node < NJ->maxnode; node++) { if (node != NJ->root && node >= NJ->nSeq && stats[node].age >= 2 && stats[node].subtreeAge >= 2 && stats[node].support > supportThreshold) { int nodeABCD[4]; SetupABCD(NJ, node, NULL, NULL, /*OUT*/nodeABCD, useML); for (i = 0; i < 4; i++) if (stats[nodeABCD[i]].age == 0 && stats[nodeABCD[i]].support > supportThreshold) break; if (i == 4) { SkipTraversalInto(node, /*IN/OUT*/traversal); if (verbose > 2) fprintf(stderr, "Skipping subtree at %d: child %d %d parent %d age %d subtreeAge %d support %.3f\n", node, nodeABCD[0], nodeABCD[1], NJ->parent[node], stats[node].age, stats[node].subtreeAge, stats[node].support); } } } } int iDone = 0; bool bUp; node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, &bUp)) >= 0) { if (node < NJ->nSeq || node == NJ->root) continue; /* nothing to do for leaves or root */ if (bUp) { if(verbose > 2) fprintf(stderr, "Going up back to node %d\n", node); /* No longer needed */ for (i = 0; i < NJ->child[node].nChild; i++) DeleteUpProfile(upProfiles, NJ, NJ->child[node].child[i]); DeleteUpProfile(upProfiles, NJ, node); RecomputeProfile(/*IN/OUT*/NJ, /*IN/OUT*/upProfiles, node, useML); continue; } if ((iDone % 100) == 0) { char buf[100]; sprintf(buf, "%s NNI round %%d of %%d, %%d of %%d splits", useML ? "ML" : "ME"); if (iDone > 0) sprintf(buf+strlen(buf), ", %d changes", nNNIThisRound); if (nNNIThisRound > 0) sprintf(buf+strlen(buf), " (max delta %.3f)", *dMaxDelta); ProgressReport(buf, iRound+1, nRounds, iDone+1, NJ->maxnode - NJ->nSeq); } iDone++; profile_t *profiles[4]; int nodeABCD[4]; /* Note -- during the first round of ML NNIs, we use the min-evo-based branch lengths, which may be suboptimal */ SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, useML); /* Given our 4 profiles, consider doing a swap */ int nodeA = nodeABCD[0]; int nodeB = nodeABCD[1]; int nodeC = nodeABCD[2]; int nodeD = nodeABCD[3]; nni_t choice = ABvsCD; if (verbose > 2) fprintf(stderr,"Considering NNI around %d: Swap A=%d B=%d C=%d D=up(%d) or parent %d\n", node, nodeA, nodeB, nodeC, nodeD, NJ->parent[node]); if (verbose > 3 && useML) { double len[5] = { NJ->branchlength[nodeA], NJ->branchlength[nodeB], NJ->branchlength[nodeC], NJ->branchlength[nodeD], NJ->branchlength[node] }; for (i=0; i < 5; i++) if (len[i] < MLMinBranchLength) len[i] = MLMinBranchLength; fprintf(stderr, "Starting quartet likelihood %.3f len %.3f %.3f %.3f %.3f %.3f\n", MLQuartetLogLk(profiles[0],profiles[1],profiles[2],profiles[3],NJ->nPos,NJ->transmat,&NJ->rates,len, /*site_lk*/NULL), len[0], len[1], len[2], len[3], len[4]); } numeric_t newlength[5]; double criteria[3]; if (useML) { for (i = 0; i < 4; i++) newlength[i] = NJ->branchlength[nodeABCD[i]]; newlength[4] = NJ->branchlength[node]; bool bFast = mlAccuracy < 2 && stats[node].age > 0; choice = MLQuartetNNI(profiles, NJ->transmat, &NJ->rates, NJ->nPos, NJ->nConstraints, /*OUT*/criteria, /*IN/OUT*/newlength, bFast); } else { choice = ChooseNNI(profiles, NJ->distance_matrix, NJ->nPos, NJ->nConstraints, /*OUT*/criteria); /* invert criteria so that higher is better, as in ML case, to simplify code below */ for (i = 0; i < 3; i++) criteria[i] = -criteria[i]; } if (choice == ACvsBD) { /* swap B and C */ ReplaceChild(/*IN/OUT*/NJ, node, nodeB, nodeC); ReplaceChild(/*IN/OUT*/NJ, NJ->parent[node], nodeC, nodeB); } else if (choice == ADvsBC) { /* swap A and C */ ReplaceChild(/*IN/OUT*/NJ, node, nodeA, nodeC); ReplaceChild(/*IN/OUT*/NJ, NJ->parent[node], nodeC, nodeA); } if (useML) { /* update branch length for the internal branch, and of any branches that lead to leaves, b/c those will not are not the internal branch for NNI and would not otherwise be set. */ if (choice == ADvsBC) { /* For ADvsBC, MLQuartetNNI swaps B with D, but we swap A with C */ double length2[5] = { newlength[LEN_C], newlength[LEN_D], newlength[LEN_A], newlength[LEN_B], newlength[LEN_I] }; int i; for (i = 0; i < 5; i++) newlength[i] = length2[i]; /* and swap A and C */ double tmp = newlength[LEN_A]; newlength[LEN_A] = newlength[LEN_C]; newlength[LEN_C] = tmp; } else if (choice == ACvsBD) { /* swap B and C */ double tmp = newlength[LEN_B]; newlength[LEN_B] = newlength[LEN_C]; newlength[LEN_C] = tmp; } NJ->branchlength[node] = newlength[LEN_I]; NJ->branchlength[nodeA] = newlength[LEN_A]; NJ->branchlength[nodeB] = newlength[LEN_B]; NJ->branchlength[nodeC] = newlength[LEN_C]; NJ->branchlength[nodeD] = newlength[LEN_D]; } if (verbose>2 && (choice != ABvsCD || verbose > 2)) fprintf(stderr,"NNI around %d: Swap A=%d B=%d C=%d D=out(C) -- choose %s %s %.4f\n", node, nodeA, nodeB, nodeC, choice == ACvsBD ? "AC|BD" : (choice == ABvsCD ? "AB|CD" : "AD|BC"), useML ? "delta-loglk" : "-deltaLen", criteria[choice] - criteria[ABvsCD]); if(verbose >= 3 && slow && useML) fprintf(stderr, "Old tree lk -- %.4f\n", TreeLogLk(NJ, /*site_likelihoods*/NULL)); /* update stats, *dMaxDelta, etc. */ if (choice == ABvsCD) { stats[node].age++; } else { if (useML) nML_NNI++; else nNNI++; nNNIThisRound++; stats[node].age = 0; stats[nodeA].age = 0; stats[nodeB].age = 0; stats[nodeC].age = 0; stats[nodeD].age = 0; } stats[node].delta = criteria[choice] - criteria[ABvsCD]; /* 0 if ABvsCD */ if (stats[node].delta > *dMaxDelta) *dMaxDelta = stats[node].delta; /* support is improvement of score for self over better of alternatives */ stats[node].support = 1e20; for (i = 0; i < 3; i++) if (choice != i && criteria[choice]-criteria[i] < stats[node].support) stats[node].support = criteria[choice]-criteria[i]; /* subtreeAge is the number of rounds since self or descendent had a significant improvement */ if (stats[node].delta > supportThreshold) stats[node].subtreeAge = 0; else { stats[node].subtreeAge++; for (i = 0; i < 2; i++) { int child = NJ->child[node].child[i]; if (stats[node].subtreeAge > stats[child].subtreeAge) stats[node].subtreeAge = stats[child].subtreeAge; } } /* update profiles and free up unneeded up-profiles */ if (choice == ABvsCD) { /* No longer needed */ DeleteUpProfile(upProfiles, NJ, nodeA); DeleteUpProfile(upProfiles, NJ, nodeB); DeleteUpProfile(upProfiles, NJ, nodeC); RecomputeProfile(/*IN/OUT*/NJ, /*IN/OUT*/upProfiles, node, useML); if(slow && useML) UpdateForNNI(NJ, node, upProfiles, useML); } else { UpdateForNNI(NJ, node, upProfiles, useML); } if(verbose > 2 && slow && useML) { /* Note we recomputed profiles back up to root already if slow */ PrintNJInternal(/*WRITE*/stderr, NJ, /*useLen*/true); fprintf(stderr, "New tree lk -- %.4f\n", TreeLogLk(NJ, /*site_likelihoods*/NULL)); } } /* end postorder traversal */ traversal = FreeTraversal(traversal,NJ); if (verbose>=2) { int nUp = 0; for (i = 0; i < NJ->maxnodes; i++) if (upProfiles[i] != NULL) nUp++; fprintf(stderr, "N up profiles at end of NNI: %d\n", nUp); } upProfiles = FreeUpProfiles(upProfiles,NJ); return(nNNIThisRound); } nni_stats_t *InitNNIStats(NJ_t *NJ) { nni_stats_t *stats = mymalloc(sizeof(nni_stats_t)*NJ->maxnode); const int LargeAge = 1000000; int i; for (i = 0; i < NJ->maxnode; i++) { stats[i].delta = 0; stats[i].support = 0; if (i == NJ->root || i < NJ->nSeq) { stats[i].age = LargeAge; stats[i].subtreeAge = LargeAge; } else { stats[i].age = 0; stats[i].subtreeAge = 0; } } return(stats); } nni_stats_t *FreeNNIStats(nni_stats_t *stats, NJ_t *NJ) { return(myfree(stats, sizeof(nni_stats_t)*NJ->maxnode)); } int FindSPRSteps(/*IN/OUT*/NJ_t *NJ, int nodeMove, /* the node to move multiple times */ int nodeAround, /* sibling or parent of node to NNI to start the chain */ /*IN/OUT*/profile_t **upProfiles, /*OUT*/spr_step_t *steps, int maxSteps, bool bFirstAC) { int iStep; for (iStep = 0; iStep < maxSteps; iStep++) { if (NJ->child[nodeAround].nChild != 2) break; /* no further to go */ /* Consider the NNIs around nodeAround */ profile_t *profiles[4]; int nodeABCD[4]; SetupABCD(NJ, nodeAround, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/false); double criteria[3]; (void) ChooseNNI(profiles, NJ->distance_matrix, NJ->nPos, NJ->nConstraints, /*OUT*/criteria); /* Do & save the swap */ spr_step_t *step = &steps[iStep]; if (iStep == 0 ? bFirstAC : criteria[ACvsBD] < criteria[ADvsBC]) { /* swap B & C to put AC together */ step->deltaLength = criteria[ACvsBD] - criteria[ABvsCD]; step->nodes[0] = nodeABCD[1]; step->nodes[1] = nodeABCD[2]; } else { /* swap AC to put AD together */ step->deltaLength = criteria[ADvsBC] - criteria[ABvsCD]; step->nodes[0] = nodeABCD[0]; step->nodes[1] = nodeABCD[2]; } if (verbose>3) { fprintf(stderr, "SPR chain step %d for %d around %d swap %d %d deltaLen %.5f\n", iStep+1, nodeAround, nodeMove, step->nodes[0], step->nodes[1], step->deltaLength); if (verbose>4) PrintNJInternal(stderr, NJ, /*useLen*/false); } ReplaceChild(/*IN/OUT*/NJ, nodeAround, step->nodes[0], step->nodes[1]); ReplaceChild(/*IN/OUT*/NJ, NJ->parent[nodeAround], step->nodes[1], step->nodes[0]); UpdateForNNI(/*IN/OUT*/NJ, nodeAround, /*IN/OUT*/upProfiles, /*useML*/false); /* set the new nodeAround -- either parent(nodeMove) or sibling(nodeMove) -- so that it different from current nodeAround */ int newAround[2] = { NJ->parent[nodeMove], Sibling(NJ, nodeMove) }; if (NJ->parent[nodeMove] == NJ->root) RootSiblings(NJ, nodeMove, /*OUT*/newAround); assert(newAround[0] == nodeAround || newAround[1] == nodeAround); assert(newAround[0] != newAround[1]); nodeAround = newAround[newAround[0] == nodeAround ? 1 : 0]; } return(iStep); } void UnwindSPRStep(/*IN/OUT*/NJ_t *NJ, /*IN*/spr_step_t *step, /*IN/OUT*/profile_t **upProfiles) { int parents[2]; int i; for (i = 0; i < 2; i++) { assert(step->nodes[i] >= 0 && step->nodes[i] < NJ->maxnodes); parents[i] = NJ->parent[step->nodes[i]]; assert(parents[i] >= 0); } assert(parents[0] != parents[1]); ReplaceChild(/*IN/OUT*/NJ, parents[0], step->nodes[0], step->nodes[1]); ReplaceChild(/*IN/OUT*/NJ, parents[1], step->nodes[1], step->nodes[0]); int iYounger = 0; if (NJ->parent[parents[0]] == parents[1]) { iYounger = 0; } else { assert(NJ->parent[parents[1]] == parents[0]); iYounger = 1; } UpdateForNNI(/*IN/OUT*/NJ, parents[iYounger], /*IN/OUT*/upProfiles, /*useML*/false); } /* Update the profile of node and its ancestor, and delete nearby out-profiles */ void UpdateForNNI(/*IN/OUT*/NJ_t *NJ, int node, /*IN/OUT*/profile_t **upProfiles, bool useML) { int i; if (slow) { /* exhaustive update */ for (i = 0; i < NJ->maxnodes; i++) DeleteUpProfile(upProfiles, NJ, i); /* update profiles back to root */ int ancestor; for (ancestor = node; ancestor >= 0; ancestor = NJ->parent[ancestor]) RecomputeProfile(/*IN/OUT*/NJ, upProfiles, ancestor, useML); /* remove any up-profiles made while doing that*/ for (i = 0; i < NJ->maxnodes; i++) DeleteUpProfile(upProfiles, NJ, i); } else { /* if fast, only update around self note that upProfile(parent) is still OK after an NNI, but up-profiles of uncles may not be */ DeleteUpProfile(upProfiles, NJ, node); for (i = 0; i < NJ->child[node].nChild; i++) DeleteUpProfile(upProfiles, NJ, NJ->child[node].child[i]); assert(node != NJ->root); int parent = NJ->parent[node]; int neighbors[2] = { parent, Sibling(NJ, node) }; if (parent == NJ->root) RootSiblings(NJ, node, /*OUT*/neighbors); DeleteUpProfile(upProfiles, NJ, neighbors[0]); DeleteUpProfile(upProfiles, NJ, neighbors[1]); int uncle = Sibling(NJ, parent); if (uncle >= 0) DeleteUpProfile(upProfiles, NJ, uncle); RecomputeProfile(/*IN/OUT*/NJ, upProfiles, node, useML); RecomputeProfile(/*IN/OUT*/NJ, upProfiles, parent, useML); } } void SPR(/*IN/OUT*/NJ_t *NJ, int maxSPRLength, int iRound, int nRounds) { /* Given a non-root node N with children A,B, sibling C, and uncle D, we can try to move A by doing three types of moves (4 choices): "down" -- swap A with a child of B (if B is not a leaf) [2 choices] "over" -- swap B with C "up" -- swap A with D We follow down moves with down moves, over moves with down moves, and up moves with either up or over moves. (Other choices are just backing up and hence useless.) As with NNIs, we keep track of up-profiles as we go. However, some of the regular profiles may also become "stale" so it is a bit trickier. We store the traversal before we do SPRs to avoid any possible infinite loop */ double last_tot_len = 0.0; if (NJ->nSeq <= 3 || maxSPRLength < 1) return; if (slow) last_tot_len = TreeLength(NJ, /*recomputeLengths*/true); int *nodeList = mymalloc(sizeof(int) * NJ->maxnodes); int nodeListLen = 0; traversal_t traversal = InitTraversal(NJ); int node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { nodeList[nodeListLen++] = node; } assert(nodeListLen == NJ->maxnode); traversal = FreeTraversal(traversal,NJ); profile_t **upProfiles = UpProfiles(NJ); spr_step_t *steps = mymalloc(sizeof(spr_step_t) * maxSPRLength); /* current chain of SPRs */ int i; for (i = 0; i < nodeListLen; i++) { node = nodeList[i]; if ((i % 100) == 0) ProgressReport("SPR round %3d of %3d, %d of %d nodes", iRound+1, nRounds, i+1, nodeListLen); if (node == NJ->root) continue; /* nothing to do for root */ /* The nodes to NNI around */ int nodeAround[2] = { NJ->parent[node], Sibling(NJ, node) }; if (NJ->parent[node] == NJ->root) { /* NNI around both siblings instead */ RootSiblings(NJ, node, /*OUT*/nodeAround); } bool bChanged = false; int iAround; for (iAround = 0; iAround < 2 && bChanged == false; iAround++) { int ACFirst; for (ACFirst = 0; ACFirst < 2 && bChanged == false; ACFirst++) { if(verbose > 3) PrintNJInternal(stderr, NJ, /*useLen*/false); int chainLength = FindSPRSteps(/*IN/OUT*/NJ, node, nodeAround[iAround], upProfiles, /*OUT*/steps, maxSPRLength, (bool)ACFirst); double dMinDelta = 0.0; int iCBest = -1; double dTotDelta = 0.0; int iC; for (iC = 0; iC < chainLength; iC++) { dTotDelta += steps[iC].deltaLength; if (dTotDelta < dMinDelta) { dMinDelta = dTotDelta; iCBest = iC; } } if (verbose>3) { fprintf(stderr, "SPR %s %d around %d chainLength %d of %d deltaLength %.5f swaps:", iCBest >= 0 ? "move" : "abandoned", node,nodeAround[iAround],iCBest+1,chainLength,dMinDelta); for (iC = 0; iC < chainLength; iC++) fprintf(stderr, " (%d,%d)%.4f", steps[iC].nodes[0], steps[iC].nodes[1], steps[iC].deltaLength); fprintf(stderr,"\n"); } for (iC = chainLength - 1; iC > iCBest; iC--) UnwindSPRStep(/*IN/OUT*/NJ, /*IN*/&steps[iC], /*IN/OUT*/upProfiles); if(verbose > 3) PrintNJInternal(stderr, NJ, /*useLen*/false); while (slow && iCBest >= 0) { double expected_tot_len = last_tot_len + dMinDelta; double new_tot_len = TreeLength(NJ, /*recompute*/true); if (verbose > 2) fprintf(stderr, "Total branch-length is now %.4f was %.4f expected %.4f\n", new_tot_len, last_tot_len, expected_tot_len); if (new_tot_len < last_tot_len) { last_tot_len = new_tot_len; break; /* no rewinding necessary */ } if (verbose > 2) fprintf(stderr, "Rewinding SPR to %d\n",iCBest); UnwindSPRStep(/*IN/OUT*/NJ, /*IN*/&steps[iCBest], /*IN/OUT*/upProfiles); dMinDelta -= steps[iCBest].deltaLength; iCBest--; } if (iCBest >= 0) bChanged = true; } /* loop over which step to take at 1st NNI */ } /* loop over which node to pivot around */ if (bChanged) { nSPR++; /* the SPR move is OK */ /* make sure all the profiles are OK */ int j; for (j = 0; j < NJ->maxnodes; j++) DeleteUpProfile(upProfiles, NJ, j); int ancestor; for (ancestor = NJ->parent[node]; ancestor >= 0; ancestor = NJ->parent[ancestor]) RecomputeProfile(/*IN/OUT*/NJ, upProfiles, ancestor, /*useML*/false); } } /* end loop over subtrees to prune & regraft */ steps = myfree(steps, sizeof(spr_step_t) * maxSPRLength); upProfiles = FreeUpProfiles(upProfiles,NJ); nodeList = myfree(nodeList, sizeof(int) * NJ->maxnodes); } void RecomputeProfile(/*IN/OUT*/NJ_t *NJ, /*IN/OUT*/profile_t **upProfiles, int node, bool useML) { if (node < NJ->nSeq || node == NJ->root) return; /* no profile to compute */ assert(NJ->child[node].nChild==2); profile_t *profiles[4]; double weight = 0.5; if (useML || !bionj) { profiles[0] = NJ->profiles[NJ->child[node].child[0]]; profiles[1] = NJ->profiles[NJ->child[node].child[1]]; } else { int nodeABCD[4]; SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, useML); weight = QuartetWeight(profiles, NJ->distance_matrix, NJ->nPos); } if (verbose>3) { if (useML) { fprintf(stderr, "Recompute %d from %d %d lengths %.4f %.4f\n", node, NJ->child[node].child[0], NJ->child[node].child[1], NJ->branchlength[NJ->child[node].child[0]], NJ->branchlength[NJ->child[node].child[1]]); } else { fprintf(stderr, "Recompute %d from %d %d weight %.3f\n", node, NJ->child[node].child[0], NJ->child[node].child[1], weight); } } NJ->profiles[node] = FreeProfile(NJ->profiles[node], NJ->nPos, NJ->nConstraints); if (useML) { NJ->profiles[node] = PosteriorProfile(profiles[0], profiles[1], NJ->branchlength[NJ->child[node].child[0]], NJ->branchlength[NJ->child[node].child[1]], NJ->transmat, &NJ->rates, NJ->nPos, NJ->nConstraints); } else { NJ->profiles[node] = AverageProfile(profiles[0], profiles[1], NJ->nPos, NJ->nConstraints, NJ->distance_matrix, weight); } } /* The BIONJ-like formula for the weight of A when building a profile for AB is 1/2 + (avgD(B,CD) - avgD(A,CD))/(2*d(A,B)) */ double QuartetWeight(profile_t *profiles[4], distance_matrix_t *dmat, int nPos) { if (!bionj) return(-1.0); /* even weighting */ double d[6]; CorrectedPairDistances(profiles, 4, dmat, nPos, /*OUT*/d); if (d[qAB] < 0.01) return -1.0; double weight = 0.5 + ((d[qBC]+d[qBD])-(d[qAC]+d[qAD]))/(4*d[qAB]); if (weight < 0) weight = 0; if (weight > 1) weight = 1; return (weight); } /* Resets the children entry of parent and also the parent entry of newchild */ void ReplaceChild(/*IN/OUT*/NJ_t *NJ, int parent, int oldchild, int newchild) { NJ->parent[newchild] = parent; int iChild; for (iChild = 0; iChild < NJ->child[parent].nChild; iChild++) { if (NJ->child[parent].child[iChild] == oldchild) { NJ->child[parent].child[iChild] = newchild; return; } } assert(0); } /* Recomputes all branch lengths For internal branches such as (A,B) vs. (C,D), uses the formula length(AB|CD) = (d(A,C)+d(A,D)+d(B,C)+d(B,D))/4 - d(A,B)/2 - d(C,D)/2 (where all distances are profile distances - diameters). For external branches (e.g. to leaves) A vs. (B,C), use the formula length(A|BC) = (d(A,B)+d(A,C)-d(B,C))/2 */ void UpdateBranchLengths(/*IN/OUT*/NJ_t *NJ) { if (NJ->nSeq < 2) return; else if (NJ->nSeq == 2) { int root = NJ->root; int nodeA = NJ->child[root].child[0]; int nodeB = NJ->child[root].child[1]; besthit_t h; ProfileDist(NJ->profiles[nodeA],NJ->profiles[nodeB], NJ->nPos, NJ->distance_matrix, /*OUT*/&h); if (logdist) h.dist = LogCorrect(h.dist); NJ->branchlength[nodeA] = h.dist/2.0; NJ->branchlength[nodeB] = h.dist/2.0; return; } profile_t **upProfiles = UpProfiles(NJ); traversal_t traversal = InitTraversal(NJ); int node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { /* reset branch length of node (distance to its parent) */ if (node == NJ->root) continue; /* no branch length to set */ if (node < NJ->nSeq) { /* a leaf */ profile_t *profileA = NJ->profiles[node]; profile_t *profileB = NULL; profile_t *profileC = NULL; int sib = Sibling(NJ,node); if (sib == -1) { /* at root, have 2 siblings */ int sibs[2]; RootSiblings(NJ, node, /*OUT*/sibs); profileB = NJ->profiles[sibs[0]]; profileC = NJ->profiles[sibs[1]]; } else { profileB = NJ->profiles[sib]; profileC = GetUpProfile(/*IN/OUT*/upProfiles, NJ, NJ->parent[node], /*useML*/false); } profile_t *profiles[3] = {profileA,profileB,profileC}; double d[3]; /*AB,AC,BC*/ CorrectedPairDistances(profiles, 3, NJ->distance_matrix, NJ->nPos, /*OUT*/d); /* d(A,BC) = (dAB+dAC-dBC)/2 */ NJ->branchlength[node] = (d[0]+d[1]-d[2])/2.0; } else { profile_t *profiles[4]; int nodeABCD[4]; SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/false); double d[6]; CorrectedPairDistances(profiles, 4, NJ->distance_matrix, NJ->nPos, /*OUT*/d); NJ->branchlength[node] = (d[qAC]+d[qAD]+d[qBC]+d[qBD])/4.0 - (d[qAB]+d[qCD])/2.0; /* no longer needed */ DeleteUpProfile(upProfiles, NJ, nodeABCD[0]); DeleteUpProfile(upProfiles, NJ, nodeABCD[1]); } } traversal = FreeTraversal(traversal,NJ); upProfiles = FreeUpProfiles(upProfiles,NJ); } /* Pick columns for resampling, stored as returned_vector[iBoot*nPos + j] */ int *ResampleColumns(int nPos, int nBootstrap) { long lPos = nPos; /* to prevent overflow on very long alignments when multiplying nPos * nBootstrap */ int *col = (int*)mymalloc(sizeof(int)*lPos*(size_t)nBootstrap); int i; for (i = 0; i < nBootstrap; i++) { int j; for (j = 0; j < nPos; j++) { int pos = (int)(knuth_rand() * nPos); if (pos<0) pos = 0; else if (pos == nPos) pos = nPos-1; col[i*lPos + j] = pos; } } if (verbose > 5) { for (i=0; i < 3 && i < nBootstrap; i++) { fprintf(stderr,"Boot%d",i); int j; for (j = 0; j < nPos; j++) { fprintf(stderr,"\t%d",col[i*lPos+j]); } fprintf(stderr,"\n"); } } return(col); } void ReliabilityNJ(/*IN/OUT*/NJ_t *NJ, int nBootstrap) { /* For each non-root node N, with children A,B, parent P, sibling C, and grandparent G, we test the reliability of the split (A,B) versus rest by comparing the profiles of A, B, C, and the "up-profile" of P. Each node's upProfile is the average of its sibling's (down)-profile + its parent's up-profile (If node's parent is the root, then there are two siblings and we don't need an up-profile) To save memory, we do depth-first-search down from the root, and we only keep up-profiles for nodes in the active path. */ if (NJ->nSeq <= 3 || nBootstrap <= 0) return; /* nothing to do */ int *col = ResampleColumns(NJ->nPos, nBootstrap); profile_t **upProfiles = UpProfiles(NJ); traversal_t traversal = InitTraversal(NJ); int node = NJ->root; int iNodesDone = 0; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { if (node < NJ->nSeq || node == NJ->root) continue; /* nothing to do for leaves or root */ if(iNodesDone > 0 && (iNodesDone % 100) == 0) ProgressReport("Local bootstrap for %6d of %6d internal splits", iNodesDone, NJ->nSeq-3, 0, 0); iNodesDone++; profile_t *profiles[4]; int nodeABCD[4]; SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/false); NJ->support[node] = SplitSupport(profiles[0], profiles[1], profiles[2], profiles[3], NJ->distance_matrix, NJ->nPos, nBootstrap, col); /* no longer needed */ DeleteUpProfile(upProfiles, NJ, nodeABCD[0]); DeleteUpProfile(upProfiles, NJ, nodeABCD[1]); DeleteUpProfile(upProfiles, NJ, nodeABCD[2]); } traversal = FreeTraversal(traversal,NJ); upProfiles = FreeUpProfiles(upProfiles,NJ); col = myfree(col, sizeof(int)*((size_t)NJ->nPos)*nBootstrap); } profile_t *NewProfile(int nPos, int nConstraints) { profile_t *profile = (profile_t *)mymalloc(sizeof(profile_t)); profile->weights = mymalloc(sizeof(numeric_t)*nPos); profile->codes = mymalloc(sizeof(unsigned char)*nPos); profile->vectors = NULL; profile->nVectors = 0; profile->codeDist = NULL; if (nConstraints == 0) { profile->nOn = NULL; profile->nOff = NULL; } else { profile->nOn = mymalloc(sizeof(int)*nConstraints); profile->nOff = mymalloc(sizeof(int)*nConstraints); } return(profile); } profile_t *FreeProfile(profile_t *profile, int nPos, int nConstraints) { if(profile==NULL) return(NULL); myfree(profile->codes, nPos); myfree(profile->weights, nPos); myfree(profile->vectors, sizeof(numeric_t)*nCodes*profile->nVectors); myfree(profile->codeDist, sizeof(numeric_t)*nCodes*nPos); if (nConstraints > 0) { myfree(profile->nOn, sizeof(int)*nConstraints); myfree(profile->nOff, sizeof(int)*nConstraints); } return(myfree(profile, sizeof(profile_t))); } void SetupABCD(NJ_t *NJ, int node, /* the 4 profiles; the last one is an outprofile */ /*OPTIONAL OUT*/profile_t *profiles[4], /*OPTIONAL IN/OUT*/profile_t **upProfiles, /*OUT*/int nodeABCD[4], bool useML) { int parent = NJ->parent[node]; assert(parent >= 0); assert(NJ->child[node].nChild == 2); nodeABCD[0] = NJ->child[node].child[0]; /*A*/ nodeABCD[1] = NJ->child[node].child[1]; /*B*/ profile_t *profile4 = NULL; if (parent == NJ->root) { int sibs[2]; RootSiblings(NJ, node, /*OUT*/sibs); nodeABCD[2] = sibs[0]; nodeABCD[3] = sibs[1]; if (profiles == NULL) return; profile4 = NJ->profiles[sibs[1]]; } else { nodeABCD[2] = Sibling(NJ,node); assert(nodeABCD[2] >= 0); nodeABCD[3] = parent; if (profiles == NULL) return; profile4 = GetUpProfile(upProfiles,NJ,parent,useML); } assert(upProfiles != NULL); int i; for (i = 0; i < 3; i++) profiles[i] = NJ->profiles[nodeABCD[i]]; profiles[3] = profile4; } int Sibling(NJ_t *NJ, int node) { int parent = NJ->parent[node]; if (parent < 0 || parent == NJ->root) return(-1); int iChild; for(iChild=0;iChild<NJ->child[parent].nChild;iChild++) { if(NJ->child[parent].child[iChild] != node) return (NJ->child[parent].child[iChild]); } assert(0); return(-1); } void RootSiblings(NJ_t *NJ, int node, /*OUT*/int sibs[2]) { assert(NJ->parent[node] == NJ->root); assert(NJ->child[NJ->root].nChild == 3); int nSibs = 0; int iChild; for(iChild=0; iChild < NJ->child[NJ->root].nChild; iChild++) { int child = NJ->child[NJ->root].child[iChild]; if (child != node) sibs[nSibs++] = child; } assert(nSibs==2); } void TestSplitsML(/*IN/OUT*/NJ_t *NJ, /*OUT*/SplitCount_t *splitcount, int nBootstrap) { const double tolerance = 1e-6; splitcount->nBadSplits = 0; splitcount->nConstraintViolations = 0; splitcount->nBadBoth = 0; splitcount->nSplits = 0; splitcount->dWorstDeltaUnconstrained = 0; splitcount->dWorstDeltaConstrained = 0; profile_t **upProfiles = UpProfiles(NJ); traversal_t traversal = InitTraversal(NJ); int node = NJ->root; int *col = nBootstrap > 0 ? ResampleColumns(NJ->nPos, nBootstrap) : NULL; double *site_likelihoods[3]; int choice; for (choice = 0; choice < 3; choice++) site_likelihoods[choice] = mymalloc(sizeof(double)*NJ->nPos); int iNodesDone = 0; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { if (node < NJ->nSeq || node == NJ->root) continue; /* nothing to do for leaves or root */ if(iNodesDone > 0 && (iNodesDone % 100) == 0) ProgressReport("ML split tests for %6d of %6d internal splits", iNodesDone, NJ->nSeq-3, 0, 0); iNodesDone++; profile_t *profiles[4]; int nodeABCD[4]; SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/true); double loglk[3]; double len[5]; int i; for (i = 0; i < 4; i++) len[i] = NJ->branchlength[nodeABCD[i]]; len[4] = NJ->branchlength[node]; double lenABvsCD[5] = {len[LEN_A], len[LEN_B], len[LEN_C], len[LEN_D], len[LEN_I]}; double lenACvsBD[5] = {len[LEN_A], len[LEN_C], len[LEN_B], len[LEN_D], len[LEN_I]}; /* Swap B & C */ double lenADvsBC[5] = {len[LEN_A], len[LEN_D], len[LEN_C], len[LEN_B], len[LEN_I]}; /* Swap B & D */ { #ifdef OPENMP #pragma omp parallel #pragma omp sections #endif { #ifdef OPENMP #pragma omp section #endif { /* Lengths are already optimized for ABvsCD */ loglk[ABvsCD] = MLQuartetLogLk(profiles[0], profiles[1], profiles[2], profiles[3], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenABvsCD, /*OUT*/site_likelihoods[ABvsCD]); } #ifdef OPENMP #pragma omp section #endif { loglk[ACvsBD] = MLQuartetOptimize(profiles[0], profiles[2], profiles[1], profiles[3], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenACvsBD, /*pStarTest*/NULL, /*OUT*/site_likelihoods[ACvsBD]); } #ifdef OPENMP #pragma omp section #endif { loglk[ADvsBC] = MLQuartetOptimize(profiles[0], profiles[3], profiles[2], profiles[1], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenADvsBC, /*pStarTest*/NULL, /*OUT*/site_likelihoods[ADvsBC]); } } } /* do a second pass on the better alternative if it is close */ if (loglk[ACvsBD] > loglk[ADvsBC]) { if (mlAccuracy > 1 || loglk[ACvsBD] > loglk[ABvsCD] - closeLogLkLimit) { loglk[ACvsBD] = MLQuartetOptimize(profiles[0], profiles[2], profiles[1], profiles[3], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenACvsBD, /*pStarTest*/NULL, /*OUT*/site_likelihoods[ACvsBD]); } } else { if (mlAccuracy > 1 || loglk[ADvsBC] > loglk[ABvsCD] - closeLogLkLimit) { loglk[ADvsBC] = MLQuartetOptimize(profiles[0], profiles[3], profiles[2], profiles[1], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenADvsBC, /*pStarTest*/NULL, /*OUT*/site_likelihoods[ADvsBC]); } } if (loglk[ABvsCD] >= loglk[ACvsBD] && loglk[ABvsCD] >= loglk[ADvsBC]) choice = ABvsCD; else if (loglk[ACvsBD] >= loglk[ABvsCD] && loglk[ACvsBD] >= loglk[ADvsBC]) choice = ACvsBD; else choice = ADvsBC; bool badSplit = loglk[choice] > loglk[ABvsCD] + treeLogLkDelta; /* ignore small changes in likelihood */ /* constraint penalties, indexed by nni_t (lower is better) */ double p[3]; QuartetConstraintPenalties(profiles, NJ->nConstraints, /*OUT*/p); bool bBadConstr = p[ABvsCD] > p[ACvsBD] + tolerance || p[ABvsCD] > p[ADvsBC] + tolerance; bool violateConstraint = false; int iC; for (iC=0; iC < NJ->nConstraints; iC++) { if (SplitViolatesConstraint(profiles, iC)) { violateConstraint = true; break; } } splitcount->nSplits++; if (violateConstraint) splitcount->nConstraintViolations++; if (badSplit) splitcount->nBadSplits++; if (badSplit && bBadConstr) splitcount->nBadBoth++; if (badSplit) { double delta = loglk[choice] - loglk[ABvsCD]; /* If ABvsCD is favored over the more likely NNI by constraints, then this is probably a bad split because of the constraint */ if (p[choice] > p[ABvsCD] + tolerance) splitcount->dWorstDeltaConstrained = MAX(delta, splitcount->dWorstDeltaConstrained); else splitcount->dWorstDeltaUnconstrained = MAX(delta, splitcount->dWorstDeltaUnconstrained); } if (nBootstrap>0) NJ->support[node] = badSplit ? 0.0 : SHSupport(NJ->nPos, nBootstrap, col, loglk, site_likelihoods); /* No longer needed */ DeleteUpProfile(upProfiles, NJ, nodeABCD[0]); DeleteUpProfile(upProfiles, NJ, nodeABCD[1]); DeleteUpProfile(upProfiles, NJ, nodeABCD[2]); } traversal = FreeTraversal(traversal,NJ); upProfiles = FreeUpProfiles(upProfiles,NJ); if (nBootstrap>0) col = myfree(col, sizeof(int)*((size_t)NJ->nPos)*nBootstrap); for (choice = 0; choice < 3; choice++) site_likelihoods[choice] = myfree(site_likelihoods[choice], sizeof(double)*NJ->nPos); } void TestSplitsMinEvo(NJ_t *NJ, /*OUT*/SplitCount_t *splitcount) { const double tolerance = 1e-6; splitcount->nBadSplits = 0; splitcount->nConstraintViolations = 0; splitcount->nBadBoth = 0; splitcount->nSplits = 0; splitcount->dWorstDeltaUnconstrained = 0.0; splitcount->dWorstDeltaConstrained = 0.0; profile_t **upProfiles = UpProfiles(NJ); traversal_t traversal = InitTraversal(NJ); int node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { if (node < NJ->nSeq || node == NJ->root) continue; /* nothing to do for leaves or root */ profile_t *profiles[4]; int nodeABCD[4]; SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/false); if (verbose>2) fprintf(stderr,"Testing Split around %d: A=%d B=%d C=%d D=up(%d) or node parent %d\n", node, nodeABCD[0], nodeABCD[1], nodeABCD[2], nodeABCD[3], NJ->parent[node]); double d[6]; /* distances, perhaps log-corrected distances, no constraint penalties */ CorrectedPairDistances(profiles, 4, NJ->distance_matrix, NJ->nPos, /*OUT*/d); /* alignment-based scores for each split (lower is better) */ double sABvsCD = d[qAB] + d[qCD]; double sACvsBD = d[qAC] + d[qBD]; double sADvsBC = d[qAD] + d[qBC]; /* constraint penalties, indexed by nni_t (lower is better) */ double p[3]; QuartetConstraintPenalties(profiles, NJ->nConstraints, /*OUT*/p); int nConstraintsViolated = 0; int iC; for (iC=0; iC < NJ->nConstraints; iC++) { if (SplitViolatesConstraint(profiles, iC)) { nConstraintsViolated++; if (verbose > 2) { double penalty[3] = {0.0,0.0,0.0}; (void)QuartetConstraintPenaltiesPiece(profiles, iC, /*OUT*/penalty); fprintf(stderr, "Violate constraint %d at %d (children %d %d) penalties %.3f %.3f %.3f %d/%d %d/%d %d/%d %d/%d\n", iC, node, NJ->child[node].child[0], NJ->child[node].child[1], penalty[ABvsCD], penalty[ACvsBD], penalty[ADvsBC], profiles[0]->nOn[iC], profiles[0]->nOff[iC], profiles[1]->nOn[iC], profiles[1]->nOff[iC], profiles[2]->nOn[iC], profiles[2]->nOff[iC], profiles[3]->nOn[iC], profiles[3]->nOff[iC]); } } } double delta = sABvsCD - MIN(sACvsBD,sADvsBC); bool bBadDist = delta > tolerance; bool bBadConstr = p[ABvsCD] > p[ACvsBD] + tolerance || p[ABvsCD] > p[ADvsBC] + tolerance; splitcount->nSplits++; if (bBadDist) { nni_t choice = sACvsBD < sADvsBC ? ACvsBD : ADvsBC; /* If ABvsCD is favored over the shorter NNI by constraints, then this is probably a bad split because of the constraint */ if (p[choice] > p[ABvsCD] + tolerance) splitcount->dWorstDeltaConstrained = MAX(delta, splitcount->dWorstDeltaConstrained); else splitcount->dWorstDeltaUnconstrained = MAX(delta, splitcount->dWorstDeltaUnconstrained); } if (nConstraintsViolated > 0) splitcount->nConstraintViolations++; /* count splits with any violations, not #constraints in a splits */ if (bBadDist) splitcount->nBadSplits++; if (bBadDist && bBadConstr) splitcount->nBadBoth++; if (bBadConstr && verbose > 2) { /* Which NNI would be better */ double dist_advantage = 0; double constraint_penalty = 0; if (p[ACvsBD] < p[ADvsBC]) { dist_advantage = sACvsBD - sABvsCD; constraint_penalty = p[ABvsCD] - p[ACvsBD]; } else { dist_advantage = sADvsBC - sABvsCD; constraint_penalty = p[ABvsCD] - p[ADvsBC]; } fprintf(stderr, "Violate constraints %d distance_advantage %.3f constraint_penalty %.3f (children %d %d):", node, dist_advantage, constraint_penalty, NJ->child[node].child[0], NJ->child[node].child[1]); /* list the constraints with a penalty, meaning that ABCD all have non-zero values and that AB|CD worse than others */ for (iC = 0; iC < NJ->nConstraints; iC++) { double ppart[6]; if (QuartetConstraintPenaltiesPiece(profiles, iC, /*OUT*/ppart)) { if (ppart[qAB] + ppart[qCD] > ppart[qAD] + ppart[qBC] + tolerance || ppart[qAB] + ppart[qCD] > ppart[qAC] + ppart[qBD] + tolerance) fprintf(stderr, " %d (%d/%d %d/%d %d/%d %d/%d)", iC, profiles[0]->nOn[iC], profiles[0]->nOff[iC], profiles[1]->nOn[iC], profiles[1]->nOff[iC], profiles[2]->nOn[iC], profiles[2]->nOff[iC], profiles[3]->nOn[iC], profiles[3]->nOff[iC]); } } fprintf(stderr, "\n"); } /* no longer needed */ DeleteUpProfile(upProfiles, NJ, nodeABCD[0]); DeleteUpProfile(upProfiles, NJ, nodeABCD[1]); } traversal = FreeTraversal(traversal,NJ); upProfiles = FreeUpProfiles(upProfiles,NJ); } /* Computes support for (A,B),(C,D) compared to that for (A,C),(B,D) and (A,D),(B,C) */ double SplitSupport(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD, /*OPTIONAL*/distance_matrix_t *dmat, int nPos, int nBootstrap, int *col) { int i,j; long lPos = nPos; /* to avoid overflow when multiplying */ /* Note distpieces are weighted */ double *distpieces[6]; double *weights[6]; for (j = 0; j < 6; j++) { distpieces[j] = (double*)mymalloc(sizeof(double)*nPos); weights[j] = (double*)mymalloc(sizeof(double)*nPos); } int iFreqA = 0; int iFreqB = 0; int iFreqC = 0; int iFreqD = 0; for (i = 0; i < nPos; i++) { numeric_t *fA = GET_FREQ(pA, i, /*IN/OUT*/iFreqA); numeric_t *fB = GET_FREQ(pB, i, /*IN/OUT*/iFreqB); numeric_t *fC = GET_FREQ(pC, i, /*IN/OUT*/iFreqC); numeric_t *fD = GET_FREQ(pD, i, /*IN/OUT*/iFreqD); weights[qAB][i] = pA->weights[i] * pB->weights[i]; weights[qAC][i] = pA->weights[i] * pC->weights[i]; weights[qAD][i] = pA->weights[i] * pD->weights[i]; weights[qBC][i] = pB->weights[i] * pC->weights[i]; weights[qBD][i] = pB->weights[i] * pD->weights[i]; weights[qCD][i] = pC->weights[i] * pD->weights[i]; distpieces[qAB][i] = weights[qAB][i] * ProfileDistPiece(pA->codes[i], pB->codes[i], fA, fB, dmat, NULL); distpieces[qAC][i] = weights[qAC][i] * ProfileDistPiece(pA->codes[i], pC->codes[i], fA, fC, dmat, NULL); distpieces[qAD][i] = weights[qAD][i] * ProfileDistPiece(pA->codes[i], pD->codes[i], fA, fD, dmat, NULL); distpieces[qBC][i] = weights[qBC][i] * ProfileDistPiece(pB->codes[i], pC->codes[i], fB, fC, dmat, NULL); distpieces[qBD][i] = weights[qBD][i] * ProfileDistPiece(pB->codes[i], pD->codes[i], fB, fD, dmat, NULL); distpieces[qCD][i] = weights[qCD][i] * ProfileDistPiece(pC->codes[i], pD->codes[i], fC, fD, dmat, NULL); } assert(iFreqA == pA->nVectors); assert(iFreqB == pB->nVectors); assert(iFreqC == pC->nVectors); assert(iFreqD == pD->nVectors); double totpieces[6]; double totweights[6]; double dists[6]; for (j = 0; j < 6; j++) { totpieces[j] = 0.0; totweights[j] = 0.0; for (i = 0; i < nPos; i++) { totpieces[j] += distpieces[j][i]; totweights[j] += weights[j][i]; } dists[j] = totweights[j] > 0.01 ? totpieces[j]/totweights[j] : 3.0; if (logdist) dists[j] = LogCorrect(dists[j]); } /* Support1 = Support(AB|CD over AC|BD) = d(A,C)+d(B,D)-d(A,B)-d(C,D) Support2 = Support(AB|CD over AD|BC) = d(A,D)+d(B,C)-d(A,B)-d(C,D) */ double support1 = dists[qAC] + dists[qBD] - dists[qAB] - dists[qCD]; double support2 = dists[qAD] + dists[qBC] - dists[qAB] - dists[qCD]; if (support1 < 0 || support2 < 0) { nSuboptimalSplits++; /* Another split seems superior */ } assert(nBootstrap > 0); int nSupport = 0; int iBoot; for (iBoot=0;iBoot<nBootstrap;iBoot++) { int *colw = &col[lPos*iBoot]; for (j = 0; j < 6; j++) { double totp = 0; double totw = 0; double *d = distpieces[j]; double *w = weights[j]; for (i=0; i<nPos; i++) { int c = colw[i]; totp += d[c]; totw += w[c]; } dists[j] = totw > 0.01 ? totp/totw : 3.0; if (logdist) dists[j] = LogCorrect(dists[j]); } support1 = dists[qAC] + dists[qBD] - dists[qAB] - dists[qCD]; support2 = dists[qAD] + dists[qBC] - dists[qAB] - dists[qCD]; if (support1 > 0 && support2 > 0) nSupport++; } /* end loop over bootstrap replicates */ for (j = 0; j < 6; j++) { distpieces[j] = myfree(distpieces[j], sizeof(double)*nPos); weights[j] = myfree(weights[j], sizeof(double)*nPos); } return( nSupport/(double)nBootstrap ); } double SHSupport(int nPos, int nBootstrap, int *col, double loglk[3], double *site_likelihoods[3]) { long lPos = nPos; /* to avoid overflow when multiplying */ assert(nBootstrap>0); double delta1 = loglk[0]-loglk[1]; double delta2 = loglk[0]-loglk[2]; double delta = delta1 < delta2 ? delta1 : delta2; double *siteloglk[3]; int i,j; for (i = 0; i < 3; i++) { siteloglk[i] = mymalloc(sizeof(double)*nPos); for (j = 0; j < nPos; j++) siteloglk[i][j] = log(site_likelihoods[i][j]); } int nSupport = 0; int iBoot; for (iBoot = 0; iBoot < nBootstrap; iBoot++) { double resampled[3]; for (i = 0; i < 3; i++) resampled[i] = -loglk[i]; for (j = 0; j < nPos; j++) { int pos = col[iBoot*lPos+j]; for (i = 0; i < 3; i++) resampled[i] += siteloglk[i][pos]; } int iBest = 0; for (i = 1; i < 3; i++) if (resampled[i] > resampled[iBest]) iBest = i; double resample1 = resampled[iBest] - resampled[(iBest+1)%3]; double resample2 = resampled[iBest] - resampled[(iBest+2)%3]; double resampleDelta = resample1 < resample2 ? resample1 : resample2; if (resampleDelta < delta) nSupport++; } for (i=0;i<3;i++) siteloglk[i] = myfree(siteloglk[i], sizeof(double)*nPos); return(nSupport/(double)nBootstrap); } void SetDistCriterion(/*IN/OUT*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *hit) { if (hit->i < NJ->nSeq && hit->j < NJ->nSeq) { SeqDist(NJ->profiles[hit->i]->codes, NJ->profiles[hit->j]->codes, NJ->nPos, NJ->distance_matrix, /*OUT*/hit); } else { ProfileDist(NJ->profiles[hit->i], NJ->profiles[hit->j], NJ->nPos, NJ->distance_matrix, /*OUT*/hit); hit->dist -= (NJ->diameter[hit->i] + NJ->diameter[hit->j]); } hit->dist += constraintWeight * (double)JoinConstraintPenalty(NJ, hit->i, hit->j); SetCriterion(NJ,nActive,/*IN/OUT*/hit); } void SetCriterion(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *join) { if(join->i < 0 || join->j < 0 || NJ->parent[join->i] >= 0 || NJ->parent[join->j] >= 0) return; assert(NJ->nOutDistActive[join->i] >= nActive); assert(NJ->nOutDistActive[join->j] >= nActive); int nDiffAllow = tophitsMult > 0 ? (int)(nActive*staleOutLimit) : 0; if (NJ->nOutDistActive[join->i] - nActive > nDiffAllow) SetOutDistance(NJ, join->i, nActive); if (NJ->nOutDistActive[join->j] - nActive > nDiffAllow) SetOutDistance(NJ, join->j, nActive); double outI = NJ->outDistances[join->i]; if (NJ->nOutDistActive[join->i] != nActive) outI *= (nActive-1)/(double)(NJ->nOutDistActive[join->i]-1); double outJ = NJ->outDistances[join->j]; if (NJ->nOutDistActive[join->j] != nActive) outJ *= (nActive-1)/(double)(NJ->nOutDistActive[join->j]-1); join->criterion = join->dist - (outI+outJ)/(double)(nActive-2); if (verbose > 2 && nActive <= 5) { fprintf(stderr, "Set Criterion to join %d %d with nActive=%d dist+penalty %.3f criterion %.3f\n", join->i, join->j, nActive, join->dist, join->criterion); } } void SetOutDistance(NJ_t *NJ, int iNode, int nActive) { if (NJ->nOutDistActive[iNode] == nActive) return; /* May be called by InitNJ before we have parents */ assert(iNode>=0 && (NJ->parent == NULL || NJ->parent[iNode]<0)); besthit_t dist; ProfileDist(NJ->profiles[iNode], NJ->outprofile, NJ->nPos, NJ->distance_matrix, &dist); outprofileOps++; /* out(A) = sum(X!=A) d(A,X) = sum(X!=A) (profiledist(A,X) - diam(A) - diam(X)) = sum(X!=A) profiledist(A,X) - (N-1)*diam(A) - (totdiam - diam(A)) in the absence of gaps: profiledist(A,out) = mean profiledist(A, all active nodes) sum(X!=A) profiledist(A,X) = N * profiledist(A,out) - profiledist(A,A) With gaps, we need to take the weights of the comparisons into account, where w(Ai) is the weight of position i in profile A: w(A,B) = sum_i w(Ai) * w(Bi) d(A,B) = sum_i w(Ai) * w(Bi) * d(Ai,Bi) / w(A,B) sum(X!=A) profiledist(A,X) ~= (N-1) * profiledist(A, Out w/o A) profiledist(A, Out w/o A) = sum_X!=A sum_i d(Ai,Xi) * w(Ai) * w(Bi) / ( sum_X!=A sum_i w(Ai) * w(Bi) ) d(A, Out) = sum_A sum_i d(Ai,Xi) * w(Ai) * w(Bi) / ( sum_X sum_i w(Ai) * w(Bi) ) and so we get profiledist(A,out w/o A) = (top of d(A,Out) - top of d(A,A)) / (weight of d(A,Out) - weight of d(A,A)) top = dist * weight with another correction of nActive because the weight of the out-profile is the average weight not the total weight. */ double top = (nActive-1) * (dist.dist * dist.weight * nActive - NJ->selfweight[iNode] * NJ->selfdist[iNode]); double bottom = (dist.weight * nActive - NJ->selfweight[iNode]); double pdistOutWithoutA = top/bottom; NJ->outDistances[iNode] = bottom > 0.01 ? pdistOutWithoutA - NJ->diameter[iNode] * (nActive-1) - (NJ->totdiam - NJ->diameter[iNode]) : 3.0; NJ->nOutDistActive[iNode] = nActive; if(verbose>3 && iNode < 5) fprintf(stderr,"NewOutDist for %d %f from dist %f selfd %f diam %f totdiam %f newActive %d\n", iNode, NJ->outDistances[iNode], dist.dist, NJ->selfdist[iNode], NJ->diameter[iNode], NJ->totdiam, nActive); if (verbose>6 && (iNode % 10) == 0) { /* Compute the actual out-distance and compare */ double total = 0.0; double total_pd = 0.0; int j; for (j=0;j<NJ->maxnode;j++) { if (j!=iNode && (NJ->parent==NULL || NJ->parent[j]<0)) { besthit_t bh; ProfileDist(NJ->profiles[iNode], NJ->profiles[j], NJ->nPos, NJ->distance_matrix, /*OUT*/&bh); total_pd += bh.dist; total += bh.dist - (NJ->diameter[iNode] + NJ->diameter[j]); } } fprintf(stderr,"OutDist for Node %d %f truth %f profiled %f truth %f pd_err %f\n", iNode, NJ->outDistances[iNode], total, pdistOutWithoutA, total_pd,fabs(pdistOutWithoutA-total_pd)); } } top_hits_t *FreeTopHits(top_hits_t *tophits) { if (tophits == NULL) return(NULL); int iNode; for (iNode = 0; iNode < tophits->maxnodes; iNode++) { top_hits_list_t *l = &tophits->top_hits_lists[iNode]; if (l->hits != NULL) l->hits = myfree(l->hits, sizeof(hit_t) * l->nHits); } tophits->top_hits_lists = myfree(tophits->top_hits_lists, sizeof(top_hits_list_t) * tophits->maxnodes); tophits->visible = myfree(tophits->visible, sizeof(hit_t*) * tophits->maxnodes); tophits->topvisible = myfree(tophits->topvisible, sizeof(int) * tophits->nTopVisible); #ifdef OPENMP for (iNode = 0; iNode < tophits->maxnodes; iNode++) omp_destroy_lock(&tophits->locks[iNode]); tophits->locks = myfree(tophits->locks, sizeof(omp_lock_t) * tophits->maxnodes); #endif return(myfree(tophits, sizeof(top_hits_t))); } top_hits_t *InitTopHits(NJ_t *NJ, int m) { int iNode; assert(m > 0); top_hits_t *tophits = mymalloc(sizeof(top_hits_t)); tophits->m = m; tophits->q = (int)(0.5 + tophits2Mult * sqrt(tophits->m)); if (!useTopHits2nd || tophits->q >= tophits->m) tophits->q = 0; tophits->maxnodes = NJ->maxnodes; tophits->top_hits_lists = mymalloc(sizeof(top_hits_list_t) * tophits->maxnodes); tophits->visible = mymalloc(sizeof(hit_t) * tophits->maxnodes); tophits->nTopVisible = (int)(0.5 + topvisibleMult*m); tophits->topvisible = mymalloc(sizeof(int) * tophits->nTopVisible); #ifdef OPENMP tophits->locks = mymalloc(sizeof(omp_lock_t) * tophits->maxnodes); for (iNode = 0; iNode < tophits->maxnodes; iNode++) omp_init_lock(&tophits->locks[iNode]); #endif int i; for (i = 0; i < tophits->nTopVisible; i++) tophits->topvisible[i] = -1; /* empty */ tophits->topvisibleAge = 0; for (iNode = 0; iNode < tophits->maxnodes; iNode++) { top_hits_list_t *l = &tophits->top_hits_lists[iNode]; l->nHits = 0; l->hits = NULL; l->hitSource = -1; l->age = 0; hit_t *v = &tophits->visible[iNode]; v->j = -1; v->dist = 1e20; } return(tophits); } /* Helper function for sorting in SetAllLeafTopHits, and the global variables it needs */ NJ_t *CompareSeedNJ = NULL; int *CompareSeedGaps = NULL; int CompareSeeds(const void *c1, const void *c2) { int seed1 = *(int *)c1; int seed2 = *(int *)c2; int gapdiff = CompareSeedGaps[seed1] - CompareSeedGaps[seed2]; if (gapdiff != 0) return(gapdiff); /* fewer gaps is better */ double outdiff = CompareSeedNJ->outDistances[seed1] - CompareSeedNJ->outDistances[seed2]; if(outdiff < 0) return(-1); /* closer to more nodes is better */ if(outdiff > 0) return(1); return(0); } /* Using the seed heuristic and the close global variable */ void SetAllLeafTopHits(/*IN/UPDATE*/NJ_t *NJ, /*IN/OUT*/top_hits_t *tophits) { double close = tophitsClose; if (close < 0) { if (fastest && NJ->nSeq >= 50000) { close = 0.99; } else { double logN = log((double)NJ->nSeq)/log(2.0); close = logN/(logN+2.0); } } /* Sort the potential seeds, by a combination of nGaps and NJ->outDistances We don't store nGaps so we need to compute that */ int *nGaps = (int*)mymalloc(sizeof(int)*NJ->nSeq); int iNode; for(iNode=0; iNode<NJ->nSeq; iNode++) { nGaps[iNode] = (int)(0.5 + NJ->nPos - NJ->selfweight[iNode]); } int *seeds = (int*)mymalloc(sizeof(int)*NJ->nSeq); for (iNode=0; iNode<NJ->nSeq; iNode++) seeds[iNode] = iNode; CompareSeedNJ = NJ; CompareSeedGaps = nGaps; qsort(/*IN/OUT*/seeds, NJ->nSeq, sizeof(int), CompareSeeds); CompareSeedNJ = NULL; CompareSeedGaps = NULL; /* For each seed, save its top 2*m hits and then look for close neighbors */ assert(2 * tophits->m <= NJ->nSeq); int iSeed; int nHasTopHits = 0; #ifdef OPENMP #pragma omp parallel for schedule(dynamic, 50) #endif for(iSeed=0; iSeed < NJ->nSeq; iSeed++) { int seed = seeds[iSeed]; if (iSeed > 0 && (iSeed % 100) == 0) { #ifdef OPENMP #pragma omp critical #endif ProgressReport("Top hits for %6d of %6d seqs (at seed %6d)", nHasTopHits, NJ->nSeq, iSeed, 0); } if (tophits->top_hits_lists[seed].nHits > 0) { if(verbose>2) fprintf(stderr, "Skipping seed %d\n", seed); continue; } besthit_t *besthitsSeed = (besthit_t*)mymalloc(sizeof(besthit_t)*NJ->nSeq); besthit_t *besthitsNeighbor = (besthit_t*)mymalloc(sizeof(besthit_t) * 2 * tophits->m); besthit_t bestjoin; if(verbose>2) fprintf(stderr,"Trying seed %d\n", seed); SetBestHit(seed, NJ, /*nActive*/NJ->nSeq, /*OUT*/&bestjoin, /*OUT*/besthitsSeed); /* sort & save top hits of self. besthitsSeed is now sorted. */ SortSaveBestHits(seed, /*IN/SORT*/besthitsSeed, /*IN-SIZE*/NJ->nSeq, /*OUT-SIZE*/tophits->m, /*IN/OUT*/tophits); nHasTopHits++; /* find "close" neighbors and compute their top hits */ double neardist = besthitsSeed[2 * tophits->m - 1].dist * close; /* must have at least average weight, rem higher is better and allow a bit more than average, e.g. if we are looking for within 30% away, 20% more gaps than usual seems OK Alternatively, have a coverage requirement in case neighbor is short If fastest, consider the top q/2 hits to be close neighbors, regardless */ double nearweight = 0; int iClose; for (iClose = 0; iClose < 2 * tophits->m; iClose++) nearweight += besthitsSeed[iClose].weight; nearweight = nearweight/(2.0 * tophits->m); /* average */ nearweight *= (1.0-2.0*neardist/3.0); double nearcover = 1.0 - neardist/2.0; if(verbose>2) fprintf(stderr,"Distance limit for close neighbors %f weight %f ungapped %d\n", neardist, nearweight, NJ->nPos-nGaps[seed]); for (iClose = 0; iClose < tophits->m; iClose++) { besthit_t *closehit = &besthitsSeed[iClose]; int closeNode = closehit->j; if (tophits->top_hits_lists[closeNode].nHits > 0) continue; /* If within close-distance, or identical, use as close neighbor */ bool close = closehit->dist <= neardist && (closehit->weight >= nearweight || closehit->weight >= (NJ->nPos-nGaps[closeNode])*nearcover); bool identical = closehit->dist < 1e-6 && fabs(closehit->weight - (NJ->nPos - nGaps[seed])) < 1e-5 && fabs(closehit->weight - (NJ->nPos - nGaps[closeNode])) < 1e-5; if (useTopHits2nd && iClose < tophits->q && (close || identical)) { nHasTopHits++; nClose2Used++; int nUse = MIN(tophits->q * tophits2Safety, 2 * tophits->m); besthit_t *besthitsClose = mymalloc(sizeof(besthit_t) * nUse); TransferBestHits(NJ, /*nActive*/NJ->nSeq, closeNode, /*IN*/besthitsSeed, /*SIZE*/nUse, /*OUT*/besthitsClose, /*updateDistance*/true); SortSaveBestHits(closeNode, /*IN/SORT*/besthitsClose, /*IN-SIZE*/nUse, /*OUT-SIZE*/tophits->q, /*IN/OUT*/tophits); tophits->top_hits_lists[closeNode].hitSource = seed; besthitsClose = myfree(besthitsClose, sizeof(besthit_t) * nUse); } else if (close || identical || (fastest && iClose < (tophits->q+1)/2)) { nHasTopHits++; nCloseUsed++; if(verbose>2) fprintf(stderr, "Near neighbor %d (rank %d weight %f ungapped %d %d)\n", closeNode, iClose, besthitsSeed[iClose].weight, NJ->nPos-nGaps[seed], NJ->nPos-nGaps[closeNode]); /* compute top 2*m hits */ TransferBestHits(NJ, /*nActive*/NJ->nSeq, closeNode, /*IN*/besthitsSeed, /*SIZE*/2 * tophits->m, /*OUT*/besthitsNeighbor, /*updateDistance*/true); SortSaveBestHits(closeNode, /*IN/SORT*/besthitsNeighbor, /*IN-SIZE*/2 * tophits->m, /*OUT-SIZE*/tophits->m, /*IN/OUT*/tophits); /* And then try for a second level of transfer. We assume we are in a good area, because of the 1st level of transfer, and in a small neighborhood, because q is small (32 for 1 million sequences), so we do not make any close checks. */ int iClose2; for (iClose2 = 0; iClose2 < tophits->q && iClose2 < 2 * tophits->m; iClose2++) { int closeNode2 = besthitsNeighbor[iClose2].j; assert(closeNode2 >= 0); if (tophits->top_hits_lists[closeNode2].hits == NULL) { nClose2Used++; nHasTopHits++; int nUse = MIN(tophits->q * tophits2Safety, 2 * tophits->m); besthit_t *besthitsClose2 = mymalloc(sizeof(besthit_t) * nUse); TransferBestHits(NJ, /*nActive*/NJ->nSeq, closeNode2, /*IN*/besthitsNeighbor, /*SIZE*/nUse, /*OUT*/besthitsClose2, /*updateDistance*/true); SortSaveBestHits(closeNode2, /*IN/SORT*/besthitsClose2, /*IN-SIZE*/nUse, /*OUT-SIZE*/tophits->q, /*IN/OUT*/tophits); tophits->top_hits_lists[closeNode2].hitSource = closeNode; besthitsClose2 = myfree(besthitsClose2, sizeof(besthit_t) * nUse); } /* end if should do 2nd-level transfer */ } } } /* end loop over close candidates */ besthitsSeed = myfree(besthitsSeed, sizeof(besthit_t)*NJ->nSeq); besthitsNeighbor = myfree(besthitsNeighbor, sizeof(besthit_t) * 2 * tophits->m); } /* end loop over seeds */ for (iNode=0; iNode<NJ->nSeq; iNode++) { top_hits_list_t *l = &tophits->top_hits_lists[iNode]; assert(l->hits != NULL); assert(l->hits[0].j >= 0); assert(l->hits[0].j < NJ->nSeq); assert(l->hits[0].j != iNode); tophits->visible[iNode] = l->hits[0]; } if (verbose >= 2) fprintf(stderr, "#Close neighbors among leaves: 1st-level %ld 2nd-level %ld seeds %ld\n", nCloseUsed, nClose2Used, NJ->nSeq-nCloseUsed-nClose2Used); nGaps = myfree(nGaps, sizeof(int)*NJ->nSeq); seeds = myfree(seeds, sizeof(int)*NJ->nSeq); /* Now add a "checking phase" where we ensure that the q or 2*sqrt(m) hits of i are represented in j (if they should be) */ long lReplace = 0; int nCheck = tophits->q > 0 ? tophits->q : (int)(0.5 + 2.0*sqrt(tophits->m)); for (iNode = 0; iNode < NJ->nSeq; iNode++) { if ((iNode % 100) == 0) ProgressReport("Checking top hits for %6d of %6d seqs", iNode+1, NJ->nSeq, 0, 0); top_hits_list_t *lNode = &tophits->top_hits_lists[iNode]; int iHit; for (iHit = 0; iHit < nCheck && iHit < lNode->nHits; iHit++) { besthit_t bh = HitToBestHit(iNode, lNode->hits[iHit]); SetCriterion(NJ, /*nActive*/NJ->nSeq, /*IN/OUT*/&bh); top_hits_list_t *lTarget = &tophits->top_hits_lists[bh.j]; /* If this criterion is worse than the nCheck-1 entry of the target, then skip the check. This logic is based on assuming that the list is sorted, which is true initially but may not be true later. Still, is a good heuristic. */ assert(nCheck > 0); assert(nCheck <= lTarget->nHits); besthit_t bhCheck = HitToBestHit(bh.j, lTarget->hits[nCheck-1]); SetCriterion(NJ, /*nActive*/NJ->nSeq, /*IN/OUT*/&bhCheck); if (bhCheck.criterion < bh.criterion) continue; /* no check needed */ /* Check if this is present in the top-hit list */ int iHit2; bool bFound = false; for (iHit2 = 0; iHit2 < lTarget->nHits && !bFound; iHit2++) if (lTarget->hits[iHit2].j == iNode) bFound = true; if (!bFound) { /* Find the hit with the worst criterion and replace it with this one */ int iWorst = -1; double dWorstCriterion = -1e20; for (iHit2 = 0; iHit2 < lTarget->nHits; iHit2++) { besthit_t bh2 = HitToBestHit(bh.j, lTarget->hits[iHit2]); SetCriterion(NJ, /*nActive*/NJ->nSeq, /*IN/OUT*/&bh2); if (bh2.criterion > dWorstCriterion) { iWorst = iHit2; dWorstCriterion = bh2.criterion; } } if (dWorstCriterion > bh.criterion) { assert(iWorst >= 0); lTarget->hits[iWorst].j = iNode; lTarget->hits[iWorst].dist = bh.dist; lReplace++; /* and perhaps update visible */ besthit_t v; bool bSuccess = GetVisible(NJ, /*nActive*/NJ->nSeq, tophits, bh.j, /*OUT*/&v); assert(bSuccess); if (bh.criterion < v.criterion) tophits->visible[bh.j] = lTarget->hits[iWorst]; } } } } if (verbose >= 2) fprintf(stderr, "Replaced %ld top hit entries\n", lReplace); } /* Updates out-distances but does not reset or update visible set */ void GetBestFromTopHits(int iNode, /*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN*/top_hits_t *tophits, /*OUT*/besthit_t *bestjoin) { assert(iNode >= 0); assert(NJ->parent[iNode] < 0); top_hits_list_t *l = &tophits->top_hits_lists[iNode]; assert(l->nHits > 0); assert(l->hits != NULL); if(!fastest) SetOutDistance(NJ, iNode, nActive); /* ensure out-distances are not stale */ bestjoin->i = -1; bestjoin->j = -1; bestjoin->dist = 1e20; bestjoin->criterion = 1e20; int iBest; for(iBest=0; iBest < l->nHits; iBest++) { besthit_t bh = HitToBestHit(iNode, l->hits[iBest]); if (UpdateBestHit(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/&bh, /*update dist*/true)) { SetCriterion(/*IN/OUT*/NJ, nActive, /*IN/OUT*/&bh); /* make sure criterion is correct */ if (bh.criterion < bestjoin->criterion) *bestjoin = bh; } } assert(bestjoin->j >= 0); /* a hit was found */ assert(bestjoin->i == iNode); } int ActiveAncestor(/*IN*/NJ_t *NJ, int iNode) { if (iNode < 0) return(iNode); while(NJ->parent[iNode] >= 0) iNode = NJ->parent[iNode]; return(iNode); } bool UpdateBestHit(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *hit, bool bUpdateDist) { int i = ActiveAncestor(/*IN*/NJ, hit->i); int j = ActiveAncestor(/*IN*/NJ, hit->j); if (i < 0 || j < 0 || i == j) { hit->i = -1; hit->j = -1; hit->weight = 0; hit->dist = 1e20; hit->criterion = 1e20; return(false); } if (i != hit->i || j != hit->j) { hit->i = i; hit->j = j; if (bUpdateDist) { SetDistCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/hit); } else { hit->dist = -1e20; hit->criterion = 1e20; } } return(true); } bool GetVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits, int iNode, /*OUT*/besthit_t *visible) { if (iNode < 0 || NJ->parent[iNode] >= 0) return(false); hit_t *v = &tophits->visible[iNode]; if (v->j < 0 || NJ->parent[v->j] >= 0) return(false); *visible = HitToBestHit(iNode, *v); SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/visible); return(true); } besthit_t *UniqueBestHits(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/SORT*/besthit_t *combined, int nCombined, /*OUT*/int *nUniqueOut) { int iHit; for (iHit = 0; iHit < nCombined; iHit++) { besthit_t *hit = &combined[iHit]; UpdateBestHit(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/hit, /*update*/false); } qsort(/*IN/OUT*/combined, nCombined, sizeof(besthit_t), CompareHitsByIJ); besthit_t *uniqueList = (besthit_t*)mymalloc(sizeof(besthit_t)*nCombined); int nUnique = 0; int iSavedLast = -1; /* First build the new list */ for (iHit = 0; iHit < nCombined; iHit++) { besthit_t *hit = &combined[iHit]; if (hit->i < 0 || hit->j < 0) continue; if (iSavedLast >= 0) { /* toss out duplicates */ besthit_t *saved = &combined[iSavedLast]; if (saved->i == hit->i && saved->j == hit->j) continue; } assert(nUnique < nCombined); assert(hit->j >= 0 && NJ->parent[hit->j] < 0); uniqueList[nUnique++] = *hit; iSavedLast = iHit; } *nUniqueOut = nUnique; /* Then do any updates to the criterion or the distances in parallel */ #ifdef OPENMP #pragma omp parallel for schedule(dynamic, 50) #endif for (iHit = 0; iHit < nUnique; iHit++) { besthit_t *hit = &uniqueList[iHit]; if (hit->dist < 0.0) SetDistCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/hit); else SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/hit); } return(uniqueList); } /* Create a top hit list for the new node, either from children (if there are enough best hits left) or by a "refresh" Also set visible set for newnode Also update visible set for other nodes if we stumble across a "better" hit */ void TopHitJoin(int newnode, /*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits) { long startProfileOps = profileOps; long startOutProfileOps = outprofileOps; assert(NJ->child[newnode].nChild == 2); top_hits_list_t *lNew = &tophits->top_hits_lists[newnode]; assert(lNew->hits == NULL); /* Copy the hits */ int i; top_hits_list_t *lChild[2]; for (i = 0; i< 2; i++) { lChild[i] = &tophits->top_hits_lists[NJ->child[newnode].child[i]]; assert(lChild[i]->hits != NULL && lChild[i]->nHits > 0); } int nCombined = lChild[0]->nHits + lChild[1]->nHits; besthit_t *combinedList = (besthit_t*)mymalloc(sizeof(besthit_t)*nCombined); HitsToBestHits(lChild[0]->hits, lChild[0]->nHits, NJ->child[newnode].child[0], /*OUT*/combinedList); HitsToBestHits(lChild[1]->hits, lChild[1]->nHits, NJ->child[newnode].child[1], /*OUT*/combinedList + lChild[0]->nHits); int nUnique; /* UniqueBestHits() replaces children (used in the calls to HitsToBestHits) with active ancestors, so all distances & criteria will be recomputed */ besthit_t *uniqueList = UniqueBestHits(/*IN/UPDATE*/NJ, nActive, /*IN/SORT*/combinedList, nCombined, /*OUT*/&nUnique); int nUniqueAlloc = nCombined; combinedList = myfree(combinedList, sizeof(besthit_t)*nCombined); /* Forget the top-hit lists of the joined nodes */ for (i = 0; i < 2; i++) { lChild[i]->hits = myfree(lChild[i]->hits, sizeof(hit_t) * lChild[i]->nHits); lChild[i]->nHits = 0; } /* Use the average age, rounded up, by 1 Versions 2.0 and earlier used the maximum age, which leads to more refreshes without improving the accuracy of the NJ phase. Intuitively, if one of them was just refreshed then another refresh is unlikely to help. */ lNew->age = (lChild[0]->age+lChild[1]->age+1)/2 + 1; /* If top hit ages always match (perfectly balanced), then a limit of log2(m) would mean a refresh after m joins, which is about what we want. */ int tophitAgeLimit = MAX(1, (int)(0.5 + log((double)tophits->m)/log(2.0))); /* Either use the merged list as candidate top hits, or move from 2nd level to 1st level, or do a refresh UniqueBestHits eliminates hits to self, so if nUnique==nActive-1, we've already done the exhaustive search. Either way, we set tophits, visible(newnode), update visible of its top hits, and modify topvisible: if we do a refresh, then we reset it, otherwise we update */ bool bSecondLevel = lChild[0]->hitSource >= 0 && lChild[1]->hitSource >= 0; bool bUseUnique = nUnique==nActive-1 || (lNew->age <= tophitAgeLimit && nUnique >= (bSecondLevel ? (int)(0.5 + tophits2Refresh * tophits->q) : (int)(0.5 + tophits->m * tophitsRefresh) )); if (bUseUnique && verbose > 2) fprintf(stderr,"Top hits for %d from combined %d nActive=%d tophitsage %d %s\n", newnode,nUnique,nActive,lNew->age, bSecondLevel ? "2ndlevel" : "1stlevel"); if (!bUseUnique && bSecondLevel && lNew->age <= tophitAgeLimit) { int source = ActiveAncestor(NJ, lChild[0]->hitSource); if (source == newnode) source = ActiveAncestor(NJ, lChild[1]->hitSource); /* In parallel mode, it is possible that we would select a node as the hit-source and then over-write that top hit with a short list. So we need this sanity check. */ if (source != newnode && source >= 0 && tophits->top_hits_lists[source].hitSource < 0) { /* switch from 2nd-level to 1st-level top hits -- compute top hits list of node from what we have so far plus the active source plus its top hits */ top_hits_list_t *lSource = &tophits->top_hits_lists[source]; assert(lSource->hitSource < 0); assert(lSource->nHits > 0); int nMerge = 1 + lSource->nHits + nUnique; besthit_t *mergeList = mymalloc(sizeof(besthit_t) * nMerge); memcpy(/*to*/mergeList, /*from*/uniqueList, nUnique * sizeof(besthit_t)); int iMerge = nUnique; mergeList[iMerge].i = newnode; mergeList[iMerge].j = source; SetDistCriterion(NJ, nActive, /*IN/OUT*/&mergeList[iMerge]); iMerge++; HitsToBestHits(lSource->hits, lSource->nHits, newnode, /*OUT*/mergeList+iMerge); for (i = 0; i < lSource->nHits; i++) { SetDistCriterion(NJ, nActive, /*IN/OUT*/&mergeList[iMerge]); iMerge++; } assert(iMerge == nMerge); uniqueList = myfree(uniqueList, nUniqueAlloc * sizeof(besthit_t)); uniqueList = UniqueBestHits(/*IN/UPDATE*/NJ, nActive, /*IN/SORT*/mergeList, nMerge, /*OUT*/&nUnique); nUniqueAlloc = nMerge; mergeList = myfree(mergeList, sizeof(besthit_t)*nMerge); assert(nUnique > 0); bUseUnique = nUnique >= (int)(0.5 + tophits->m * tophitsRefresh); bSecondLevel = false; if (bUseUnique && verbose > 2) fprintf(stderr, "Top hits for %d from children and source %d's %d hits, nUnique %d\n", newnode, source, lSource->nHits, nUnique); } } if (bUseUnique) { if (bSecondLevel) { /* pick arbitrarily */ lNew->hitSource = lChild[0]->hitSource; } int nSave = MIN(nUnique, bSecondLevel ? tophits->q : tophits->m); assert(nSave>0); if (verbose > 2) fprintf(stderr, "Combined %d ops so far %ld\n", nUnique, profileOps - startProfileOps); SortSaveBestHits(newnode, /*IN/SORT*/uniqueList, /*nIn*/nUnique, /*nOut*/nSave, /*IN/OUT*/tophits); assert(lNew->hits != NULL); /* set by sort/save */ tophits->visible[newnode] = lNew->hits[0]; UpdateTopVisible(/*IN*/NJ, nActive, newnode, &tophits->visible[newnode], /*IN/OUT*/tophits); UpdateVisible(/*IN/UPDATE*/NJ, nActive, /*IN*/uniqueList, nSave, /*IN/OUT*/tophits); } else { /* need to refresh: set top hits for node and for its top hits */ if(verbose > 2) fprintf(stderr,"Top hits for %d by refresh (%d unique age %d) nActive=%d\n", newnode,nUnique,lNew->age,nActive); nRefreshTopHits++; lNew->age = 0; int iNode; /* ensure all out-distances are up to date ahead of time to avoid any data overwriting issues. */ #ifdef OPENMP #pragma omp parallel for schedule(dynamic, 50) #endif for (iNode = 0; iNode < NJ->maxnode; iNode++) { if (NJ->parent[iNode] < 0) { if (fastest) { besthit_t bh; bh.i = iNode; bh.j = iNode; bh.dist = 0; SetCriterion(/*IN/UPDATE*/NJ, nActive, &bh); } else { SetOutDistance(/*IN/UDPATE*/NJ, iNode, nActive); } } } /* exhaustively get the best 2*m hits for newnode, set visible, and save the top m */ besthit_t *allhits = (besthit_t*)mymalloc(sizeof(besthit_t)*NJ->maxnode); assert(2 * tophits->m <= NJ->maxnode); besthit_t bh; SetBestHit(newnode, NJ, nActive, /*OUT*/&bh, /*OUT*/allhits); qsort(/*IN/OUT*/allhits, NJ->maxnode, sizeof(besthit_t), CompareHitsByCriterion); SortSaveBestHits(newnode, /*IN/SORT*/allhits, /*nIn*/NJ->maxnode, /*nOut*/tophits->m, /*IN/OUT*/tophits); /* Do not need to call UpdateVisible because we set visible below */ /* And use the top 2*m entries to expand other best-hit lists, but only for top m */ int iHit; #ifdef OPENMP #pragma omp parallel for schedule(dynamic, 50) #endif for (iHit=0; iHit < tophits->m; iHit++) { if (allhits[iHit].i < 0) continue; int iNode = allhits[iHit].j; assert(iNode>=0); if (NJ->parent[iNode] >= 0) continue; top_hits_list_t *l = &tophits->top_hits_lists[iNode]; int nHitsOld = l->nHits; assert(nHitsOld <= tophits->m); l->age = 0; /* Merge: old hits into 0->nHitsOld and hits from iNode above that */ besthit_t *bothList = (besthit_t*)mymalloc(sizeof(besthit_t) * 3 * tophits->m); HitsToBestHits(/*IN*/l->hits, nHitsOld, iNode, /*OUT*/bothList); /* does not compute criterion */ for (i = 0; i < nHitsOld; i++) SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/&bothList[i]); if (nActive <= 2 * tophits->m) l->hitSource = -1; /* abandon the 2nd-level top-hits heuristic */ int nNewHits = l->hitSource >= 0 ? tophits->q : tophits->m; assert(nNewHits > 0); TransferBestHits(/*IN/UPDATE*/NJ, nActive, iNode, /*IN*/allhits, /*nOldHits*/2 * nNewHits, /*OUT*/&bothList[nHitsOld], /*updateDist*/false); /* rely on UniqueBestHits to update dist and/or criterion */ int nUnique2; besthit_t *uniqueList2 = UniqueBestHits(/*IN/UPDATE*/NJ, nActive, /*IN/SORT*/bothList, nHitsOld + 2 * nNewHits, /*OUT*/&nUnique2); assert(nUnique2 > 0); bothList = myfree(bothList,3 * tophits->m * sizeof(besthit_t)); /* Note this will overwrite l, but we saved nHitsOld */ SortSaveBestHits(iNode, /*IN/SORT*/uniqueList2, /*nIn*/nUnique2, /*nOut*/nNewHits, /*IN/OUT*/tophits); /* will update topvisible below */ tophits->visible[iNode] = tophits->top_hits_lists[iNode].hits[0]; uniqueList2 = myfree(uniqueList2, (nHitsOld + 2 * tophits->m) * sizeof(besthit_t)); } ResetTopVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits); /* outside of the parallel phase */ allhits = myfree(allhits,sizeof(besthit_t)*NJ->maxnode); } uniqueList = myfree(uniqueList, nUniqueAlloc * sizeof(besthit_t)); if (verbose > 2) { fprintf(stderr, "New top-hit list for %d profile-ops %ld (out-ops %ld): source %d age %d members ", newnode, profileOps - startProfileOps, outprofileOps - startOutProfileOps, lNew->hitSource, lNew->age); int i; for (i = 0; i < lNew->nHits; i++) fprintf(stderr, " %d", lNew->hits[i].j); fprintf(stderr,"\n"); } } void UpdateVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN*/besthit_t *tophitsNode, int nTopHits, /*IN/OUT*/top_hits_t *tophits) { int iHit; for(iHit = 0; iHit < nTopHits; iHit++) { besthit_t *hit = &tophitsNode[iHit]; if (hit->i < 0) continue; /* possible empty entries */ assert(NJ->parent[hit->i] < 0); assert(hit->j >= 0 && NJ->parent[hit->j] < 0); besthit_t visible; bool bSuccess = GetVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, hit->j, /*OUT*/&visible); if (!bSuccess || hit->criterion < visible.criterion) { if (bSuccess) nVisibleUpdate++; hit_t *v = &tophits->visible[hit->j]; v->j = hit->i; v->dist = hit->dist; UpdateTopVisible(NJ, nActive, hit->j, v, /*IN/OUT*/tophits); if(verbose>5) fprintf(stderr,"NewVisible %d %d %f\n", hit->j,v->j,v->dist); } } /* end loop over hits */ } /* Update the top-visible list to perhaps include visible[iNode] */ void UpdateTopVisible(/*IN*/NJ_t * NJ, int nActive, int iIn, /*IN*/hit_t *hit, /*IN/OUT*/top_hits_t *tophits) { assert(tophits != NULL); bool bIn = false; /* placed in the list */ int i; /* First, if the list is not full, put it in somewhere */ for (i = 0; i < tophits->nTopVisible && !bIn; i++) { int iNode = tophits->topvisible[i]; if (iNode == iIn) { /* this node is already in the top hit list */ bIn = true; } else if (iNode < 0 || NJ->parent[iNode] >= 0) { /* found an empty spot */ bIn = true; tophits->topvisible[i] = iIn; } } int iPosWorst = -1; double dCriterionWorst = -1e20; if (!bIn) { /* Search for the worst hit */ for (i = 0; i < tophits->nTopVisible && !bIn; i++) { int iNode = tophits->topvisible[i]; assert(iNode >= 0 && NJ->parent[iNode] < 0 && iNode != iIn); besthit_t visible; if (!GetVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, iNode, /*OUT*/&visible)) { /* found an empty spot */ tophits->topvisible[i] = iIn; bIn = true; } else if (visible.i == hit->j && visible.j == iIn) { /* the reverse hit is already in the top hit list */ bIn = true; } else if (visible.criterion >= dCriterionWorst) { iPosWorst = i; dCriterionWorst = visible.criterion; } } } if (!bIn && iPosWorst >= 0) { besthit_t visible = HitToBestHit(iIn, *hit); SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/&visible); if (visible.criterion < dCriterionWorst) { if (verbose > 2) { int iOld = tophits->topvisible[iPosWorst]; fprintf(stderr, "TopVisible replace %d=>%d with %d=>%d\n", iOld, tophits->visible[iOld].j, visible.i, visible.j); } tophits->topvisible[iPosWorst] = iIn; } } if (verbose > 2) { fprintf(stderr, "Updated TopVisible: "); for (i = 0; i < tophits->nTopVisible; i++) { int iNode = tophits->topvisible[i]; if (iNode >= 0 && NJ->parent[iNode] < 0) { besthit_t bh = HitToBestHit(iNode, tophits->visible[iNode]); SetDistCriterion(NJ, nActive, &bh); fprintf(stderr, " %d=>%d:%.4f", bh.i, bh.j, bh.criterion); } } fprintf(stderr,"\n"); } } /* Recompute the topvisible list */ void ResetTopVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits) { besthit_t *visibleSorted = mymalloc(sizeof(besthit_t)*nActive); int nVisible = 0; /* #entries in visibleSorted */ int iNode; for (iNode = 0; iNode < NJ->maxnode; iNode++) { /* skip joins involving stale nodes */ if (NJ->parent[iNode] >= 0) continue; besthit_t v; if (GetVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, iNode, /*OUT*/&v)) { assert(nVisible < nActive); visibleSorted[nVisible++] = v; } } assert(nVisible > 0); qsort(/*IN/OUT*/visibleSorted,nVisible,sizeof(besthit_t),CompareHitsByCriterion); /* Only keep the top m items, and try to avoid duplicating i->j with j->i Note that visible(i) -> j does not necessarily imply visible(j) -> i, so we store what the pairing was (or -1 for not used yet) */ int *inTopVisible = malloc(sizeof(int) * NJ->maxnodes); int i; for (i = 0; i < NJ->maxnodes; i++) inTopVisible[i] = -1; if (verbose > 2) fprintf(stderr, "top-hit search: nActive %d nVisible %d considering up to %d items\n", nActive, nVisible, tophits->m); /* save the sorted indices in topvisible */ int iSave = 0; for (i = 0; i < nVisible && iSave < tophits->nTopVisible; i++) { besthit_t *v = &visibleSorted[i]; if (inTopVisible[v->i] != v->j) { /* not seen already */ tophits->topvisible[iSave++] = v->i; inTopVisible[v->i] = v->j; inTopVisible[v->j] = v->i; } } while(iSave < tophits->nTopVisible) tophits->topvisible[iSave++] = -1; myfree(visibleSorted, sizeof(besthit_t)*nActive); myfree(inTopVisible, sizeof(int) * NJ->maxnodes); tophits->topvisibleAge = 0; if (verbose > 2) { fprintf(stderr, "Reset TopVisible: "); for (i = 0; i < tophits->nTopVisible; i++) { int iNode = tophits->topvisible[i]; if (iNode < 0) break; fprintf(stderr, " %d=>%d", iNode, tophits->visible[iNode].j); } fprintf(stderr,"\n"); } } /* Find best hit to do in O(N*log(N) + m*L*log(N)) time, by copying and sorting the visible list updating out-distances for the top (up to m) candidates selecting the best hit if !fastest then local hill-climbing for a better join, using best-hit lists only, and updating all out-distances in every best-hit list */ void TopHitNJSearch(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits, /*OUT*/besthit_t *join) { /* first, do we have at least m/2 candidates in topvisible? And remember the best one */ int nCandidate = 0; int iNodeBestCandidate = -1; double dBestCriterion = 1e20; int i; for (i = 0; i < tophits->nTopVisible; i++) { int iNode = tophits->topvisible[i]; besthit_t visible; if (GetVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, iNode, /*OUT*/&visible)) { nCandidate++; if (iNodeBestCandidate < 0 || visible.criterion < dBestCriterion) { iNodeBestCandidate = iNode; dBestCriterion = visible.criterion; } } } tophits->topvisibleAge++; /* Note we may have only nActive/2 joins b/c we try to store them once */ if (2 * tophits->topvisibleAge > tophits->m || (3*nCandidate < tophits->nTopVisible && 3*nCandidate < nActive)) { /* recompute top visible */ if (verbose > 2) fprintf(stderr, "Resetting the top-visible list at nActive=%d\n",nActive); /* If age is low, then our visible set is becoming too sparse, because we have recently recomputed the top visible subset. This is very rare but can happen with -fastest. A quick-and-dirty solution is to walk up the parents to get additional entries in top hit lists. To ensure that the visible set becomes full, pick an arbitrary node if walking up terminates at self. */ if (tophits->topvisibleAge <= 2) { if (verbose > 2) fprintf(stderr, "Expanding visible set by walking up to active nodes at nActive=%d\n", nActive); int iNode; for (iNode = 0; iNode < NJ->maxnode; iNode++) { if (NJ->parent[iNode] >= 0) continue; hit_t *v = &tophits->visible[iNode]; int newj = ActiveAncestor(NJ, v->j); if (newj >= 0 && newj != v->j) { if (newj == iNode) { /* pick arbitrarily */ newj = 0; while (NJ->parent[newj] >= 0 || newj == iNode) newj++; } assert(newj >= 0 && newj < NJ->maxnodes && newj != iNode && NJ->parent[newj] < 0); /* Set v to point to newj */ besthit_t bh = { iNode, newj, -1e20, -1e20, -1e20 }; SetDistCriterion(NJ, nActive, /*IN/OUT*/&bh); v->j = newj; v->dist = bh.dist; } } } ResetTopVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits); /* and recurse to try again */ TopHitNJSearch(NJ, nActive, tophits, join); return; } if (verbose > 2) fprintf(stderr, "Top-visible list size %d (nActive %d m %d)\n", nCandidate, nActive, tophits->m); assert(iNodeBestCandidate >= 0 && NJ->parent[iNodeBestCandidate] < 0); bool bSuccess = GetVisible(NJ, nActive, tophits, iNodeBestCandidate, /*OUT*/join); assert(bSuccess); assert(join->i >= 0 && NJ->parent[join->i] < 0); assert(join->j >= 0 && NJ->parent[join->j] < 0); if(fastest) return; int changed; do { changed = 0; besthit_t bestI; GetBestFromTopHits(join->i, NJ, nActive, tophits, /*OUT*/&bestI); assert(bestI.i == join->i); if (bestI.j != join->j && bestI.criterion < join->criterion) { changed = 1; if (verbose>2) fprintf(stderr,"BetterI\t%d\t%d\t%d\t%d\t%f\t%f\n", join->i,join->j,bestI.i,bestI.j, join->criterion,bestI.criterion); *join = bestI; } besthit_t bestJ; GetBestFromTopHits(join->j, NJ, nActive, tophits, /*OUT*/&bestJ); assert(bestJ.i == join->j); if (bestJ.j != join->i && bestJ.criterion < join->criterion) { changed = 1; if (verbose>2) fprintf(stderr,"BetterJ\t%d\t%d\t%d\t%d\t%f\t%f\n", join->i,join->j,bestJ.i,bestJ.j, join->criterion,bestJ.criterion); *join = bestJ; } if(changed) nHillBetter++; } while(changed); } int NGaps(/*IN*/NJ_t *NJ, int iNode) { assert(iNode < NJ->nSeq); int nGaps = 0; int p; for(p=0; p<NJ->nPos; p++) { if (NJ->profiles[iNode]->codes[p] == NOCODE) nGaps++; } return(nGaps); } int CompareHitsByCriterion(const void *c1, const void *c2) { const besthit_t *hit1 = (besthit_t*)c1; const besthit_t *hit2 = (besthit_t*)c2; if (hit1->criterion < hit2->criterion) return(-1); if (hit1->criterion > hit2->criterion) return(1); return(0); } int CompareHitsByIJ(const void *c1, const void *c2) { const besthit_t *hit1 = (besthit_t*)c1; const besthit_t *hit2 = (besthit_t*)c2; return hit1->i != hit2->i ? hit1->i - hit2->i : hit1->j - hit2->j; } void SortSaveBestHits(int iNode, /*IN/SORT*/besthit_t *besthits, int nIn, int nOut, /*IN/OUT*/top_hits_t *tophits) { assert(nIn > 0); assert(nOut > 0); top_hits_list_t *l = &tophits->top_hits_lists[iNode]; /* */ qsort(/*IN/OUT*/besthits,nIn,sizeof(besthit_t),CompareHitsByCriterion); /* First count how many we will save Not sure if removing duplicates is actually necessary. */ int nSave = 0; int jLast = -1; int iBest; for (iBest = 0; iBest < nIn && nSave < nOut; iBest++) { if (besthits[iBest].i < 0) continue; assert(besthits[iBest].i == iNode); int j = besthits[iBest].j; if (j != iNode && j != jLast && j >= 0) { nSave++; jLast = j; } } assert(nSave > 0); #ifdef OPENMP omp_set_lock(&tophits->locks[iNode]); #endif if (l->hits != NULL) { l->hits = myfree(l->hits, l->nHits * sizeof(hit_t)); l->nHits = 0; } l->hits = mymalloc(sizeof(hit_t) * nSave); l->nHits = nSave; int iSave = 0; jLast = -1; for (iBest = 0; iBest < nIn && iSave < nSave; iBest++) { int j = besthits[iBest].j; if (j != iNode && j != jLast && j >= 0) { l->hits[iSave].j = j; l->hits[iSave].dist = besthits[iBest].dist; iSave++; jLast = j; } } #ifdef OPENMP omp_unset_lock(&tophits->locks[iNode]); #endif assert(iSave == nSave); } void TransferBestHits(/*IN/UPDATE*/NJ_t *NJ, int nActive, int iNode, /*IN*/besthit_t *oldhits, int nOldHits, /*OUT*/besthit_t *newhits, bool updateDistances) { assert(iNode >= 0); assert(NJ->parent[iNode] < 0); int iBest; for(iBest = 0; iBest < nOldHits; iBest++) { besthit_t *old = &oldhits[iBest]; besthit_t *new = &newhits[iBest]; new->i = iNode; new->j = ActiveAncestor(/*IN*/NJ, old->j); new->dist = old->dist; /* may get reset below */ new->weight = old->weight; new->criterion = old->criterion; if(new->j < 0 || new->j == iNode) { new->weight = 0; new->dist = -1e20; new->criterion = 1e20; } else if (new->i != old->i || new->j != old->j) { if (updateDistances) SetDistCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/new); else { new->dist = -1e20; new->criterion = 1e20; } } else { if (updateDistances) SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/new); else new->criterion = 1e20; /* leave dist alone */ } } } void HitsToBestHits(/*IN*/hit_t *hits, int nHits, int iNode, /*OUT*/besthit_t *newhits) { int i; for (i = 0; i < nHits; i++) { hit_t *hit = &hits[i]; besthit_t *bh = &newhits[i]; bh->i = iNode; bh->j = hit->j; bh->dist = hit->dist; bh->criterion = 1e20; bh->weight = -1; /* not the true value -- we compute these directly when needed */ } } besthit_t HitToBestHit(int i, hit_t hit) { besthit_t bh; bh.i = i; bh.j = hit.j; bh.dist = hit.dist; bh.criterion = 1e20; bh.weight = -1; return(bh); } char *OpenMPString(void) { #ifdef OPENMP static char buf[100]; sprintf(buf, ", OpenMP (%d threads)", omp_get_max_threads()); return(buf); #else return(""); #endif } /* Algorithm 26.2.17 from Abromowitz and Stegun, Handbook of Mathematical Functions Absolute accuracy of only about 1e-7, which is enough for us */ double pnorm(double x) { double b1 = 0.319381530; double b2 = -0.356563782; double b3 = 1.781477937; double b4 = -1.821255978; double b5 = 1.330274429; double p = 0.2316419; double c = 0.39894228; if(x >= 0.0) { double t = 1.0 / ( 1.0 + p * x ); return (1.0 - c * exp( -x * x / 2.0 ) * t * ( t *( t * ( t * ( t * b5 + b4 ) + b3 ) + b2 ) + b1 )); } /*else*/ double t = 1.0 / ( 1.0 - p * x ); return ( c * exp( -x * x / 2.0 ) * t * ( t *( t * ( t * ( t * b5 + b4 ) + b3 ) + b2 ) + b1 )); } void *mymalloc(size_t sz) { if (sz == 0) return(NULL); void *new = malloc(sz); if (new == NULL) { fprintf(stderr, "Out of memory\n"); exit(1); } szAllAlloc += sz; mymallocUsed += sz; #ifdef TRACK_MEMORY struct mallinfo mi = mallinfo(); if (mi.arena+mi.hblkhd > maxmallocHeap) maxmallocHeap = mi.arena+mi.hblkhd; #endif /* gcc malloc should always return 16-byte-aligned values... */ assert(IS_ALIGNED(new)); return (new); } void *mymemdup(void *data, size_t sz) { if(data==NULL) return(NULL); void *new = mymalloc(sz); memcpy(/*to*/new, /*from*/data, sz); return(new); } void *myrealloc(void *data, size_t szOld, size_t szNew, bool bCopy) { if (data == NULL && szOld == 0) return(mymalloc(szNew)); if (data == NULL || szOld == 0 || szNew == 0) { fprintf(stderr,"Empty myrealloc\n"); exit(1); } if (szOld == szNew) return(data); void *new = NULL; if (bCopy) { /* Try to reduce memory fragmentation by allocating anew and copying Seems to help in practice */ new = mymemdup(data, szNew); myfree(data, szOld); } else { new = realloc(data,szNew); if (new == NULL) { fprintf(stderr, "Out of memory\n"); exit(1); } assert(IS_ALIGNED(new)); szAllAlloc += (szNew-szOld); mymallocUsed += (szNew-szOld); #ifdef TRACK_MEMORY struct mallinfo mi = mallinfo(); if (mi.arena+mi.hblkhd > maxmallocHeap) maxmallocHeap = mi.arena+mi.hblkhd; #endif } return(new); } void *myfree(void *p, size_t sz) { if(p==NULL) return(NULL); free(p); mymallocUsed -= sz; return(NULL); } /******************************************************************************/ /* Minimization of a 1-dimensional function by Brent's method (Numerical Recipes) * Borrowed from Tree-Puzzle 5.1 util.c under GPL * Modified by M.N.P to pass in the accessory data for the optimization function, * to use 2x bounds around the starting guess and expand them if necessary, * and to use both a fractional and an absolute tolerance */ #define ITMAX 100 #define CGOLD 0.3819660 #define TINY 1.0e-20 #define ZEPS 1.0e-10 #define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d); #define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a)) /* Brents method in one dimension */ double brent(double ax, double bx, double cx, double (*f)(double, void *), void *data, double ftol, double atol, double *foptx, double *f2optx, double fax, double fbx, double fcx) { int iter; double a,b,d=0,etemp,fu,fv,fw,fx,p,q,r,tol1,tol2,u,v,w,x,xm; double xw,wv,vx; double e=0.0; a=(ax < cx ? ax : cx); b=(ax > cx ? ax : cx); x=bx; fx=fbx; if (fax < fcx) { w=ax; fw=fax; v=cx; fv=fcx; } else { w=cx; fw=fcx; v=ax; fv=fax; } for (iter=1;iter<=ITMAX;iter++) { xm=0.5*(a+b); tol1=ftol*fabs(x); tol2=2.0*(tol1+ZEPS); if (fabs(x-xm) <= (tol2-0.5*(b-a)) || fabs(a-b) < atol) { *foptx = fx; xw = x-w; wv = w-v; vx = v-x; *f2optx = 2.0*(fv*xw + fx*wv + fw*vx)/ (v*v*xw + x*x*wv + w*w*vx); return x; } if (fabs(e) > tol1) { r=(x-w)*(fx-fv); q=(x-v)*(fx-fw); p=(x-v)*q-(x-w)*r; q=2.0*(q-r); if (q > 0.0) p = -p; q=fabs(q); etemp=e; e=d; if (fabs(p) >= fabs(0.5*q*etemp) || p <= q*(a-x) || p >= q*(b-x)) d=CGOLD*(e=(x >= xm ? a-x : b-x)); else { d=p/q; u=x+d; if (u-a < tol2 || b-u < tol2) d=SIGN(tol1,xm-x); } } else { d=CGOLD*(e=(x >= xm ? a-x : b-x)); } u=(fabs(d) >= tol1 ? x+d : x+SIGN(tol1,d)); fu=(*f)(u,data); if (fu <= fx) { if (u >= x) a=x; else b=x; SHFT(v,w,x,u) SHFT(fv,fw,fx,fu) } else { if (u < x) a=u; else b=u; if (fu <= fw || w == x) { v=w; w=u; fv=fw; fw=fu; } else if (fu <= fv || v == x || v == w) { v=u; fv=fu; } } } *foptx = fx; xw = x-w; wv = w-v; vx = v-x; *f2optx = 2.0*(fv*xw + fx*wv + fw*vx)/ (v*v*xw + x*x*wv + w*w*vx); return x; } /* brent */ #undef ITMAX #undef CGOLD #undef ZEPS #undef SHFT #undef SIGN /* one-dimensional minimization - as input a lower and an upper limit and a trial value for the minimum is needed: xmin < xguess < xmax the function and a fractional tolerance has to be specified onedimenmin returns the optimal x value and the value of the function and its second derivative at this point */ double onedimenmin(double xmin, double xguess, double xmax, double (*f)(double,void*), void *data, double ftol, double atol, /*OUT*/double *fx, /*OUT*/double *f2x) { double optx, ax, bx, cx, fa, fb, fc; /* first attempt to bracketize minimum */ if (xguess == xmin) { ax = xmin; bx = 2.0*xguess; cx = 10.0*xguess; } else if (xguess <= 2.0 * xmin) { ax = xmin; bx = xguess; cx = 5.0*xguess; } else { ax = 0.5*xguess; bx = xguess; cx = 2.0*xguess; } if (cx > xmax) cx = xmax; if (bx >= cx) bx = 0.5*(ax+cx); if (verbose > 4) fprintf(stderr, "onedimenmin lo %.4f guess %.4f hi %.4f range %.4f %.4f\n", ax, bx, cx, xmin, xmax); /* ideally this range includes the true minimum, i.e., fb < fa and fb < fc if not, we gradually expand the boundaries until it does, or we near the boundary of the allowed range and use that */ fa = (*f)(ax,data); fb = (*f)(bx,data); fc = (*f)(cx,data); while(fa < fb && ax > xmin) { ax = (ax+xmin)/2.0; if (ax < 2.0*xmin) /* give up on shrinking the region */ ax = xmin; fa = (*f)(ax,data); } while(fc < fb && cx < xmax) { cx = (cx+xmax)/2.0; if (cx > xmax * 0.95) cx = xmax; fc = (*f)(cx,data); } optx = brent(ax, bx, cx, f, data, ftol, atol, fx, f2x, fa, fb, fc); if (verbose > 4) fprintf(stderr, "onedimenmin reaches optimum f(%.4f) = %.4f f2x %.4f\n", optx, *fx, *f2x); return optx; /* return optimal x */ } /* onedimenmin */ /* Numerical code for the gamma distribution is modified from the PhyML 3 code (GNU public license) of Stephane Guindon */ double LnGamma (double alpha) { /* returns ln(gamma(alpha)) for alpha>0, accurate to 10 decimal places. Stirling's formula is used for the central polynomial part of the procedure. Pike MC & Hill ID (1966) Algorithm 291: Logarithm of the gamma function. Communications of the Association for Computing Machinery, 9:684 */ double x=alpha, f=0, z; if (x<7) { f=1; z=x-1; while (++z<7) f*=z; x=z; f=-(double)log(f); } z = 1/(x*x); return f + (x-0.5)*(double)log(x) - x + .918938533204673 + (((-.000595238095238*z+.000793650793651)*z-.002777777777778)*z +.083333333333333)/x; } double IncompleteGamma(double x, double alpha, double ln_gamma_alpha) { /* returns the incomplete gamma ratio I(x,alpha) where x is the upper limit of the integration and alpha is the shape parameter. returns (-1) if in error ln_gamma_alpha = ln(Gamma(alpha)), is almost redundant. (1) series expansion if (alpha>x || x<=1) (2) continued fraction otherwise RATNEST FORTRAN by Bhattacharjee GP (1970) The incomplete gamma integral. Applied Statistics, 19: 285-287 (AS32) */ int i; double p=alpha, g=ln_gamma_alpha; double accurate=1e-8, overflow=1e30; double factor, gin=0, rn=0, a=0,b=0,an=0,dif=0, term=0, pn[6]; if (x==0) return (0); if (x<0 || p<=0) return (-1); factor=(double)exp(p*(double)log(x)-x-g); if (x>1 && x>=p) goto l30; /* (1) series expansion */ gin=1; term=1; rn=p; l20: rn++; term*=x/rn; gin+=term; if (term > accurate) goto l20; gin*=factor/p; goto l50; l30: /* (2) continued fraction */ a=1-p; b=a+x+1; term=0; pn[0]=1; pn[1]=x; pn[2]=x+1; pn[3]=x*b; gin=pn[2]/pn[3]; l32: a++; b+=2; term++; an=a*term; for (i=0; i<2; i++) pn[i+4]=b*pn[i+2]-an*pn[i]; if (pn[5] == 0) goto l35; rn=pn[4]/pn[5]; dif=fabs(gin-rn); if (dif>accurate) goto l34; if (dif<=accurate*rn) goto l42; l34: gin=rn; l35: for (i=0; i<4; i++) pn[i]=pn[i+2]; if (fabs(pn[4]) < overflow) goto l32; for (i=0; i<4; i++) pn[i]/=overflow; goto l32; l42: gin=1-factor*gin; l50: return (gin); } double PGamma(double x, double alpha) { /* scale = 1/alpha */ return IncompleteGamma(x*alpha,alpha,LnGamma(alpha)); } /* helper function to subtract timval structures */ /* Subtract the `struct timeval' values X and Y, storing the result in RESULT. Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract (struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } double clockDiff(/*IN*/struct timeval *clock_start) { struct timeval time_now, elapsed; gettimeofday(/*OUT*/&time_now,NULL); timeval_subtract(/*OUT*/&elapsed,/*IN*/&time_now,/*IN*/clock_start); return(elapsed.tv_sec + elapsed.tv_usec*1e-6); } /* The random number generator is taken from D E Knuth http://www-cs-faculty.stanford.edu/~knuth/taocp.html */ /* This program by D E Knuth is in the public domain and freely copyable. * It is explained in Seminumerical Algorithms, 3rd edition, Section 3.6 * (or in the errata to the 2nd edition --- see * http://www-cs-faculty.stanford.edu/~knuth/taocp.html * in the changes to Volume 2 on pages 171 and following). */ /* N.B. The MODIFICATIONS introduced in the 9th printing (2002) are included here; there's no backwards compatibility with the original. */ /* This version also adopts Brendan McKay's suggestion to accommodate naive users who forget to call ran_start(seed). */ /* If you find any bugs, please report them immediately to * taocp@cs.stanford.edu * (and you will be rewarded if the bug is genuine). Thanks! */ /************ see the book for explanations and caveats! *******************/ /************ in particular, you need two's complement arithmetic **********/ #define KK 100 /* the long lag */ #define LL 37 /* the short lag */ #define MM (1L<<30) /* the modulus */ #define mod_diff(x,y) (((x)-(y))&(MM-1)) /* subtraction mod MM */ long ran_x[KK]; /* the generator state */ #ifdef __STDC__ void ran_array(long aa[],int n) #else void ran_array(aa,n) /* put n new random numbers in aa */ long *aa; /* destination */ int n; /* array length (must be at least KK) */ #endif { register int i,j; for (j=0;j<KK;j++) aa[j]=ran_x[j]; for (;j<n;j++) aa[j]=mod_diff(aa[j-KK],aa[j-LL]); for (i=0;i<LL;i++,j++) ran_x[i]=mod_diff(aa[j-KK],aa[j-LL]); for (;i<KK;i++,j++) ran_x[i]=mod_diff(aa[j-KK],ran_x[i-LL]); } /* the following routines are from exercise 3.6--15 */ /* after calling ran_start, get new randoms by, e.g., "x=ran_arr_next()" */ #define QUALITY 1009 /* recommended quality level for high-res use */ long ran_arr_buf[QUALITY]; long ran_arr_dummy=-1, ran_arr_started=-1; long *ran_arr_ptr=&ran_arr_dummy; /* the next random number, or -1 */ #define TT 70 /* guaranteed separation between streams */ #define is_odd(x) ((x)&1) /* units bit of x */ #ifdef __STDC__ void ran_start(long seed) #else void ran_start(seed) /* do this before using ran_array */ long seed; /* selector for different streams */ #endif { register int t,j; long x[KK+KK-1]; /* the preparation buffer */ register long ss=(seed+2)&(MM-2); for (j=0;j<KK;j++) { x[j]=ss; /* bootstrap the buffer */ ss<<=1; if (ss>=MM) ss-=MM-2; /* cyclic shift 29 bits */ } x[1]++; /* make x[1] (and only x[1]) odd */ for (ss=seed&(MM-1),t=TT-1; t; ) { for (j=KK-1;j>0;j--) x[j+j]=x[j], x[j+j-1]=0; /* "square" */ for (j=KK+KK-2;j>=KK;j--) x[j-(KK-LL)]=mod_diff(x[j-(KK-LL)],x[j]), x[j-KK]=mod_diff(x[j-KK],x[j]); if (is_odd(ss)) { /* "multiply by z" */ for (j=KK;j>0;j--) x[j]=x[j-1]; x[0]=x[KK]; /* shift the buffer cyclically */ x[LL]=mod_diff(x[LL],x[KK]); } if (ss) ss>>=1; else t--; } for (j=0;j<LL;j++) ran_x[j+KK-LL]=x[j]; for (;j<KK;j++) ran_x[j-LL]=x[j]; for (j=0;j<10;j++) ran_array(x,KK+KK-1); /* warm things up */ ran_arr_ptr=&ran_arr_started; } #define ran_arr_next() (*ran_arr_ptr>=0? *ran_arr_ptr++: ran_arr_cycle()) long ran_arr_cycle() { if (ran_arr_ptr==&ran_arr_dummy) ran_start(314159L); /* the user forgot to initialize */ ran_array(ran_arr_buf,QUALITY); ran_arr_buf[KK]=-1; ran_arr_ptr=ran_arr_buf+1; return ran_arr_buf[0]; } /* end of code from Knuth */ double knuth_rand() { return(9.31322574615479e-10 * ran_arr_next()); /* multiply by 2**-30 */ } hashstrings_t *MakeHashtable(char **strings, int nStrings) { hashstrings_t *hash = (hashstrings_t*)mymalloc(sizeof(hashstrings_t)); hash->nBuckets = 8*nStrings; hash->buckets = (hashbucket_t*)mymalloc(sizeof(hashbucket_t) * hash->nBuckets); int i; for (i=0; i < hash->nBuckets; i++) { hash->buckets[i].string = NULL; hash->buckets[i].nCount = 0; hash->buckets[i].first = -1; } for (i=0; i < nStrings; i++) { hashiterator_t hi = FindMatch(hash, strings[i]); if (hash->buckets[hi].string == NULL) { /* save a unique entry */ assert(hash->buckets[hi].nCount == 0); hash->buckets[hi].string = strings[i]; hash->buckets[hi].nCount = 1; hash->buckets[hi].first = i; } else { /* record a duplicate entry */ assert(hash->buckets[hi].string != NULL); assert(strcmp(hash->buckets[hi].string, strings[i]) == 0); assert(hash->buckets[hi].first >= 0); hash->buckets[hi].nCount++; } } return(hash); } hashstrings_t *FreeHashtable(hashstrings_t* hash) { if (hash != NULL) { myfree(hash->buckets, sizeof(hashbucket_t) * hash->nBuckets); myfree(hash, sizeof(hashstrings_t)); } return(NULL); } #define MAXADLER 65521 hashiterator_t FindMatch(hashstrings_t *hash, char *string) { /* Adler-32 checksum */ unsigned int hashA = 1; unsigned int hashB = 0; char *p; for (p = string; *p != '\0'; p++) { hashA = ((unsigned int)*p + hashA); hashB = hashA+hashB; } hashA %= MAXADLER; hashB %= MAXADLER; hashiterator_t hi = (hashB*65536+hashA) % hash->nBuckets; while(hash->buckets[hi].string != NULL && strcmp(hash->buckets[hi].string, string) != 0) { hi++; if (hi >= hash->nBuckets) hi = 0; } return(hi); } char *GetHashString(hashstrings_t *hash, hashiterator_t hi) { return(hash->buckets[hi].string); } int HashCount(hashstrings_t *hash, hashiterator_t hi) { return(hash->buckets[hi].nCount); } int HashFirst(hashstrings_t *hash, hashiterator_t hi) { return(hash->buckets[hi].first); } uniquify_t *UniquifyAln(alignment_t *aln) { int nUniqueSeq = 0; char **uniqueSeq = (char**)mymalloc(aln->nSeq * sizeof(char*)); /* iUnique -> seq */ int *uniqueFirst = (int*)mymalloc(aln->nSeq * sizeof(int)); /* iUnique -> iFirst in aln */ int *alnNext = (int*)mymalloc(aln->nSeq * sizeof(int)); /* i in aln -> next, or -1 */ int *alnToUniq = (int*)mymalloc(aln->nSeq * sizeof(int)); /* i in aln -> iUnique; many -> -1 */ int i; for (i = 0; i < aln->nSeq; i++) { uniqueSeq[i] = NULL; uniqueFirst[i] = -1; alnNext[i] = -1; alnToUniq[i] = -1; } hashstrings_t *hashseqs = MakeHashtable(aln->seqs, aln->nSeq); for (i=0; i<aln->nSeq; i++) { hashiterator_t hi = FindMatch(hashseqs,aln->seqs[i]); int first = HashFirst(hashseqs,hi); if (first == i) { uniqueSeq[nUniqueSeq] = aln->seqs[i]; uniqueFirst[nUniqueSeq] = i; alnToUniq[i] = nUniqueSeq; nUniqueSeq++; } else { int last = first; while (alnNext[last] != -1) last = alnNext[last]; assert(last>=0); alnNext[last] = i; assert(alnToUniq[last] >= 0 && alnToUniq[last] < nUniqueSeq); alnToUniq[i] = alnToUniq[last]; } } assert(nUniqueSeq>0); hashseqs = FreeHashtable(hashseqs); uniquify_t *uniquify = (uniquify_t*)mymalloc(sizeof(uniquify_t)); uniquify->nSeq = aln->nSeq; uniquify->nUnique = nUniqueSeq; uniquify->uniqueFirst = uniqueFirst; uniquify->alnNext = alnNext; uniquify->alnToUniq = alnToUniq; uniquify->uniqueSeq = uniqueSeq; return(uniquify); } uniquify_t *FreeUniquify(uniquify_t *unique) { if (unique != NULL) { myfree(unique->uniqueFirst, sizeof(int)*unique->nSeq); myfree(unique->alnNext, sizeof(int)*unique->nSeq); myfree(unique->alnToUniq, sizeof(int)*unique->nSeq); myfree(unique->uniqueSeq, sizeof(char*)*unique->nSeq); myfree(unique,sizeof(uniquify_t)); unique = NULL; } return(unique); } traversal_t InitTraversal(NJ_t *NJ) { traversal_t worked = (bool*)mymalloc(sizeof(bool)*NJ->maxnodes); int i; for (i=0; i<NJ->maxnodes; i++) worked[i] = false; return(worked); } void SkipTraversalInto(int node, /*IN/OUT*/traversal_t traversal) { traversal[node] = true; } int TraversePostorder(int node, NJ_t *NJ, /*IN/OUT*/traversal_t traversal, /*OPTIONAL OUT*/bool *pUp) { if (pUp) *pUp = false; while(1) { assert(node >= 0); /* move to a child if possible */ bool found = false; int iChild; for (iChild=0; iChild < NJ->child[node].nChild; iChild++) { int child = NJ->child[node].child[iChild]; if (!traversal[child]) { node = child; found = true; break; } } if (found) continue; /* keep moving down */ if (!traversal[node]) { traversal[node] = true; return(node); } /* If we've already done this node, need to move up */ if (node == NJ->root) return(-1); /* nowhere to go -- done traversing */ node = NJ->parent[node]; /* If we go up to someplace that was already marked as visited, this is due to a change in topology, so return it marked as "up" */ if (pUp && traversal[node]) { *pUp = true; return(node); } } } traversal_t FreeTraversal(traversal_t traversal, NJ_t *NJ) { myfree(traversal, sizeof(bool)*NJ->maxnodes); return(NULL); } profile_t **UpProfiles(NJ_t *NJ) { profile_t **upProfiles = (profile_t**)mymalloc(sizeof(profile_t*)*NJ->maxnodes); int i; for (i=0; i<NJ->maxnodes; i++) upProfiles[i] = NULL; return(upProfiles); } profile_t *GetUpProfile(/*IN/OUT*/profile_t **upProfiles, NJ_t *NJ, int outnode, bool useML) { assert(outnode != NJ->root && outnode >= NJ->nSeq); /* not for root or leaves */ if (upProfiles[outnode] != NULL) return(upProfiles[outnode]); int depth; int *pathToRoot = PathToRoot(NJ, outnode, /*OUT*/&depth); int i; /* depth-1 is root */ for (i = depth-2; i>=0; i--) { int node = pathToRoot[i]; if (upProfiles[node] == NULL) { /* Note -- SetupABCD may call GetUpProfile, but it should do it farther up in the path to the root */ profile_t *profiles[4]; int nodeABCD[4]; SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, useML); if (useML) { /* If node is a child of root, then the 4th profile is of the 2nd root-sibling of node Otherwise, the 4th profile is the up-profile of the parent of node, and that is the branch-length we need */ double lenC = NJ->branchlength[nodeABCD[2]]; double lenD = NJ->branchlength[nodeABCD[3]]; if (verbose > 3) { fprintf(stderr, "Computing UpProfile for node %d with lenC %.4f lenD %.4f pair-loglk %.3f\n", node, lenC, lenD, PairLogLk(profiles[2],profiles[3],lenC+lenD,NJ->nPos,NJ->transmat,&NJ->rates, /*site_lk*/NULL)); PrintNJInternal(stderr, NJ, /*useLen*/true); } upProfiles[node] = PosteriorProfile(/*C*/profiles[2], /*D*/profiles[3], lenC, lenD, NJ->transmat, &NJ->rates, NJ->nPos, NJ->nConstraints); } else { profile_t *profilesCDAB[4] = { profiles[2], profiles[3], profiles[0], profiles[1] }; double weight = QuartetWeight(profilesCDAB, NJ->distance_matrix, NJ->nPos); if (verbose>3) fprintf(stderr, "Compute upprofile of %d from %d and parents (vs. children %d %d) with weight %.3f\n", node, nodeABCD[2], nodeABCD[0], nodeABCD[1], weight); upProfiles[node] = AverageProfile(profiles[2], profiles[3], NJ->nPos, NJ->nConstraints, NJ->distance_matrix, weight); } } } FreePath(pathToRoot,NJ); assert(upProfiles[outnode] != NULL); return(upProfiles[outnode]); } profile_t *DeleteUpProfile(/*IN/OUT*/profile_t **upProfiles, NJ_t *NJ, int node) { assert(node>=0 && node < NJ->maxnodes); if (upProfiles[node] != NULL) upProfiles[node] = FreeProfile(upProfiles[node], NJ->nPos, NJ->nConstraints); /* returns NULL */ return(NULL); } profile_t **FreeUpProfiles(profile_t **upProfiles, NJ_t *NJ) { int i; int nUsed = 0; for (i=0; i < NJ->maxnodes; i++) { if (upProfiles[i] != NULL) nUsed++; DeleteUpProfile(upProfiles, NJ, i); } myfree(upProfiles, sizeof(profile_t*)*NJ->maxnodes); if (verbose >= 3) fprintf(stderr,"FreeUpProfiles -- freed %d\n", nUsed); return(NULL); } int *PathToRoot(NJ_t *NJ, int node, /*OUT*/int *outDepth) { int *pathToRoot = (int*)mymalloc(sizeof(int)*NJ->maxnodes); int depth = 0; int ancestor = node; while(ancestor >= 0) { pathToRoot[depth] = ancestor; ancestor = NJ->parent[ancestor]; depth++; } *outDepth = depth; return(pathToRoot); } int *FreePath(int *path, NJ_t *NJ) { myfree(path, sizeof(int)*NJ->maxnodes); return(NULL); } transition_matrix_t *CreateGTR(double *r/*ac ag at cg ct gt*/, double *f/*acgt*/) { double matrix[4][MAXCODES]; assert(nCodes==4); int i, j; /* Place rates onto a symmetric matrix, but correct by f(target), so that stationary distribution f[] is maintained Leave diagonals as 0 (CreateTransitionMatrix will fix them) */ int imat = 0; for (i = 0; i < nCodes; i++) { matrix[i][i] = 0; for (j = i+1; j < nCodes; j++) { double rate = r[imat++]; assert(rate > 0); /* Want t(matrix) * f to be 0 */ matrix[i][j] = rate * f[i]; matrix[j][i] = rate * f[j]; } } /* Compute average mutation rate */ double total_rate = 0; for (i = 0; i < nCodes; i++) for (j = 0; j < nCodes; j++) total_rate += f[i] * matrix[i][j]; assert(total_rate > 1e-6); double inv = 1.0/total_rate; for (i = 0; i < nCodes; i++) for (j = 0; j < nCodes; j++) matrix[i][j] *= inv; return(CreateTransitionMatrix(matrix,f)); } transition_matrix_t *CreateTransitionMatrix(/*IN*/double matrix[MAXCODES][MAXCODES], /*IN*/double stat[MAXCODES]) { int i,j,k; transition_matrix_t *transmat = mymalloc(sizeof(transition_matrix_t)); double sqrtstat[20]; for (i = 0; i < nCodes; i++) { transmat->stat[i] = stat[i]; transmat->statinv[i] = 1.0/stat[i]; sqrtstat[i] = sqrt(stat[i]); } double sym[20*20]; /* symmetrized matrix M' */ /* set diagonals so columns sums are 0 before symmetrization */ for (i = 0; i < nCodes; i++) for (j = 0; j < nCodes; j++) sym[nCodes*i+j] = matrix[i][j]; for (j = 0; j < nCodes; j++) { double sum = 0; sym[nCodes*j+j] = 0; for (i = 0; i < nCodes; i++) sum += sym[nCodes*i+j]; sym[nCodes*j+j] = -sum; } /* M' = S**-1 M S */ for (i = 0; i < nCodes; i++) for (j = 0; j < nCodes; j++) sym[nCodes*i+j] *= sqrtstat[j]/sqrtstat[i]; /* eigen decomposition of M' -- note that eigenW is the transpose of what we want, which is eigenvectors in columns */ double eigenW[20*20], eval[20], e[20]; for (i = 0; i < nCodes*nCodes; i++) eigenW[i] = sym[i]; tred2(eigenW, nCodes, nCodes, eval, e); tqli(eval, e, nCodes , nCodes, eigenW); /* save eigenvalues */ for (i = 0; i < nCodes; i++) transmat->eigenval[i] = eval[i]; /* compute eigen decomposition of M into t(codeFreq): V = S*W */ /* compute inverse of V in eigeninv: V**-1 = t(W) S**-1 */ for (i = 0; i < nCodes; i++) { for (j = 0; j < nCodes; j++) { transmat->eigeninv[i][j] = eigenW[nCodes*i+j] / sqrtstat[j]; transmat->eigeninvT[j][i] = transmat->eigeninv[i][j]; } } for (i = 0; i < nCodes; i++) for (j = 0; j < nCodes; j++) transmat->codeFreq[i][j] = eigenW[j*nCodes+i] * sqrtstat[i]; /* codeFreq[NOCODE] is the rotation of (1,1,...) not (1/nCodes,1/nCodes,...), which gives correct posterior probabilities */ for (j = 0; j < nCodes; j++) { transmat->codeFreq[NOCODE][j] = 0.0; for (i = 0; i < nCodes; i++) transmat->codeFreq[NOCODE][j] += transmat->codeFreq[i][j]; } /* save some posterior probabilities for approximating later: first, we compute P(B | A, t) for t = approxMLnearT, by using V * exp(L*t) * V**-1 */ double expvalues[MAXCODES]; for (i = 0; i < nCodes; i++) expvalues[i] = exp(approxMLnearT * transmat->eigenval[i]); double LVinv[MAXCODES][MAXCODES]; /* exp(L*t) * V**-1 */ for (i = 0; i < nCodes; i++) { for (j = 0; j < nCodes; j++) LVinv[i][j] = transmat->eigeninv[i][j] * expvalues[i]; } /* matrix transform for converting A -> B given t: transt[i][j] = P(j->i | t) */ double transt[MAXCODES][MAXCODES]; for (i = 0; i < nCodes; i++) { for (j = 0; j < nCodes; j++) { transt[i][j] = 0; for (k = 0; k < nCodes; k++) transt[i][j] += transmat->codeFreq[i][k] * LVinv[k][j]; } } /* nearP[i][j] = P(parent = j | both children are i) = P(j | i,i) ~ stat(j) * P(j->i | t)**2 */ for (i = 0; i < nCodes; i++) { double nearP[MAXCODES]; double tot = 0; for (j = 0; j < nCodes; j++) { assert(transt[j][i] > 0); assert(transmat->stat[j] > 0); nearP[j] = transmat->stat[j] * transt[i][j] * transt[i][j]; tot += nearP[j]; } assert(tot > 0); for (j = 0; j < nCodes; j++) nearP[j] *= 1.0/tot; /* save nearP in transmat->nearP[i][] */ for (j = 0; j < nCodes; j++) transmat->nearP[i][j] = nearP[j]; /* multiply by 1/stat and rotate nearP */ for (j = 0; j < nCodes; j++) nearP[j] /= transmat->stat[j]; for (j = 0; j < nCodes; j++) { double rot = 0; for (k = 0; k < nCodes; k++) rot += nearP[k] * transmat->codeFreq[i][j]; transmat->nearFreq[i][j] = rot; } } return(transmat); assert(0); } distance_matrix_t *TransMatToDistanceMat(transition_matrix_t *transmat) { if (transmat == NULL) return(NULL); distance_matrix_t *dmat = mymalloc(sizeof(distance_matrix_t)); int i, j; for (i=0; i<nCodes; i++) { for (j=0; j<nCodes; j++) { dmat->distances[i][j] = 0; /* never actually used */ dmat->eigeninv[i][j] = transmat->eigeninv[i][j]; dmat->codeFreq[i][j] = transmat->codeFreq[i][j]; } } /* eigentot . rotated-vector is the total frequency of the unrotated vector (used to normalize in NormalizeFreq() For transition matrices, we rotate by transpose of eigenvectors, so we need to multiply by the inverse matrix by 1....1 to get this vector, or in other words, sum the columns */ for(i = 0; i<nCodes; i++) { dmat->eigentot[i] = 0.0; for (j = 0; j<nCodes; j++) dmat->eigentot[i] += transmat->eigeninv[i][j]; } return(dmat); } /* Numerical recipes code for eigen decomposition (actually taken from RAxML rev_functions.c) */ void tred2 (double *a, const int n, const int np, double *d, double *e) { #define a(i,j) a[(j-1)*np + (i-1)] #define e(i) e[i-1] #define d(i) d[i-1] int i, j, k, l; double f, g, h, hh, scale; for (i = n; i > 1; i--) { l = i-1; h = 0; scale = 0; if ( l > 1 ) { for ( k = 1; k <= l; k++ ) scale += fabs(a(i,k)); if (scale == 0) e(i) = a(i,l); else { for (k = 1; k <= l; k++) { a(i,k) /= scale; h += a(i,k) * a(i,k); } f = a(i,l); g = -sqrt(h); if (f < 0) g = -g; e(i) = scale *g; h -= f*g; a(i,l) = f-g; f = 0; for (j = 1; j <=l ; j++) { a(j,i) = a(i,j) / h; g = 0; for (k = 1; k <= j; k++) g += a(j,k)*a(i,k); for (k = j+1; k <= l; k++) g += a(k,j)*a(i,k); e(j) = g/h; f += e(j)*a(i,j); } hh = f/(h+h); for (j = 1; j <= l; j++) { f = a(i,j); g = e(j) - hh * f; e(j) = g; for (k = 1; k <= j; k++) a(j,k) -= f*e(k) + g*a(i,k); } } } else e(i) = a(i,l); d(i) = h; } d(1) = 0; e(1) = 0; for (i = 1; i <= n; i++) { l = i-1; if (d(i) != 0) { for (j = 1; j <=l; j++) { g = 0; for (k = 1; k <= l; k++) g += a(i,k)*a(k,j); for (k=1; k <=l; k++) a(k,j) -= g * a(k,i); } } d(i) = a(i,i); a(i,i) = 1; for (j=1; j<=l; j++) a(i,j) = a(j,i) = 0; } return; #undef a #undef e #undef d } double pythag(double a, double b) { double absa = fabs(a), absb = fabs(b); return (absa > absb) ? absa * sqrt(1+ (absb/absa)*(absb/absa)) : absb == 0 ? 0 : absb * sqrt(1+ (absa/absb)*(absa/absb)); } void tqli(double *d, double *e, int n, int np, double *z) { #define z(i,j) z[(j-1)*np + (i-1)] #define e(i) e[i-1] #define d(i) d[i-1] int i = 0, iter = 0, k = 0, l = 0, m = 0; double b = 0, c = 0, dd = 0, f = 0, g = 0, p = 0, r = 0, s = 0; for(i=2; i<=n; i++) e(i-1) = e(i); e(n) = 0; for (l = 1; l <= n; l++) { iter = 0; labelExtra: for (m = l; (m < n); m++) { dd = fabs(d(m))+fabs(d(m+1)); if (fabs(e(m))+dd == dd) break; } if (m != l) { assert(iter < 30); iter++; g = (d(l+1)-d(l))/(2*e(l)); r = pythag(g,1.); g = d(m)-d(l)+e(l)/(g+(g<0?-r:r)); s = 1; c = 1; p = 0; for (i = m-1; i>=l; i--) { f = s*e(i); b = c*e(i); r = pythag(f,g); e(i+1) = r; if (r == 0) { d (i+1) -= p; e (m) = 0; goto labelExtra; } s = f/r; c = g/r; g = d(i+1)-p; r = (d(i)-g)*s + 2*c*b; p = s*r; d(i+1) = g + p; g = c*r - b; for (k=1; k <= n; k++) { f = z(k,i+1); z(k,i+1) = s * z(k,i) + c*f; z(k,i) = c * z(k,i) - s*f; } } d(l) -= p; e(l) = g; e(m) = 0; goto labelExtra; } } return; #undef z #undef e #undef d } #ifdef USE_SSE3 inline float mm_sum(register __m128 sum) { #if 1 /* stupider but faster */ float f[4] ALIGNED; _mm_store_ps(f,sum); return(f[0]+f[1]+f[2]+f[3]); #else /* first we get sum[0]+sum[1], sum[2]+sum[3] by selecting 0/1 and 2/3 */ sum = _mm_add_ps(sum,_mm_shuffle_ps(sum,sum,_MM_SHUFFLE(0,1,2,3))); /* then get sum[0]+sum[1]+sum[2]+sum[3] by selecting 0/1 and 0/1 */ sum = _mm_add_ps(sum,_mm_shuffle_ps(sum,sum,_MM_SHUFFLE(0,1,0,1))); float f; _mm_store_ss(&f, sum); /* save the lowest word */ return(f); #endif } #endif void vector_multiply(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, int n, /*OUT*/numeric_t *fOut) { #ifdef USE_SSE3 int i; for (i = 0; i < n; i += 4) { __m128 a, b, c; a = _mm_load_ps(f1+i); b = _mm_load_ps(f2+i); c = _mm_mul_ps(a, b); _mm_store_ps(fOut+i,c); } #else int i; for (i = 0; i < n; i++) fOut[i] = f1[i]*f2[i]; #endif } numeric_t vector_multiply_sum(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, int n) { #ifdef USE_SSE3 if (n == 4) return(f1[0]*f2[0]+f1[1]*f2[1]+f1[2]*f2[2]+f1[3]*f2[3]); __m128 sum = _mm_setzero_ps(); int i; for (i = 0; i < n; i += 4) { __m128 a, b, c; a = _mm_load_ps(f1+i); b = _mm_load_ps(f2+i); c = _mm_mul_ps(a, b); sum = _mm_add_ps(c, sum); } return(mm_sum(sum)); #else int i; numeric_t out = 0.0; for (i=0; i < n; i++) out += f1[i]*f2[i]; return(out); #endif } /* sum(f1*f2*f3) */ numeric_t vector_multiply3_sum(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, /*IN*/numeric_t* f3, int n) { #ifdef USE_SSE3 __m128 sum = _mm_setzero_ps(); int i; for (i = 0; i < n; i += 4) { __m128 a1, a2, a3; a1 = _mm_load_ps(f1+i); a2 = _mm_load_ps(f2+i); a3 = _mm_load_ps(f3+i); sum = _mm_add_ps(_mm_mul_ps(_mm_mul_ps(a1,a2),a3),sum); } return(mm_sum(sum)); #else int i; numeric_t sum = 0.0; for (i = 0; i < n; i++) sum += f1[i]*f2[i]*f3[i]; return(sum); #endif } numeric_t vector_dot_product_rot(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, /*IN*/numeric_t *fBy, int n) { #ifdef USE_SSE3 __m128 sum1 = _mm_setzero_ps(); __m128 sum2 = _mm_setzero_ps(); int i; for (i = 0; i < n; i += 4) { __m128 a1, a2, aBy; a1 = _mm_load_ps(f1+i); a2 = _mm_load_ps(f2+i); aBy = _mm_load_ps(fBy+i); sum1 = _mm_add_ps(_mm_mul_ps(a1, aBy), sum1); sum2 = _mm_add_ps(_mm_mul_ps(a2, aBy), sum2); } return(mm_sum(sum1)*mm_sum(sum2)); #else int i; numeric_t out1 = 0.0; numeric_t out2 = 0.0; for (i=0; i < n; i++) { out1 += f1[i]*fBy[i]; out2 += f2[i]*fBy[i]; } return(out1*out2); #endif } numeric_t vector_sum(/*IN*/numeric_t *f1, int n) { #ifdef USE_SSE3 if (n==4) return(f1[0]+f1[1]+f1[2]+f1[3]); __m128 sum = _mm_setzero_ps(); int i; for (i = 0; i < n; i+=4) { __m128 a; a = _mm_load_ps(f1+i); sum = _mm_add_ps(a, sum); } return(mm_sum(sum)); #else numeric_t out = 0.0; int i; for (i = 0; i < n; i++) out += f1[i]; return(out); #endif } void vector_multiply_by(/*IN/OUT*/numeric_t *f, /*IN*/numeric_t fBy, int n) { int i; #ifdef USE_SSE3 __m128 c = _mm_set1_ps(fBy); for (i = 0; i < n; i += 4) { __m128 a, b; a = _mm_load_ps(f+i); b = _mm_mul_ps(a,c); _mm_store_ps(f+i,b); } #else for (i = 0; i < n; i++) f[i] *= fBy; #endif } void vector_add_mult(/*IN/OUT*/numeric_t *fTot, /*IN*/numeric_t *fAdd, numeric_t weight, int n) { #ifdef USE_SSE3 int i; __m128 w = _mm_set1_ps(weight); for (i = 0; i < n; i += 4) { __m128 tot, add; tot = _mm_load_ps(fTot+i); add = _mm_load_ps(fAdd+i); _mm_store_ps(fTot+i, _mm_add_ps(tot, _mm_mul_ps(add,w))); } #else int i; for (i = 0; i < n; i++) fTot[i] += fAdd[i] * weight; #endif } void matrixt_by_vector4(/*IN*/numeric_t mat[4][MAXCODES], /*IN*/numeric_t vec[4], /*OUT*/numeric_t out[4]) { #ifdef USE_SSE3 /*__m128 v = _mm_load_ps(vec);*/ __m128 o = _mm_setzero_ps(); int j; /* result is a sum of vectors: sum(k) v[k] * mat[k][] */ for (j = 0; j < 4; j++) { __m128 m = _mm_load_ps(&mat[j][0]); __m128 vj = _mm_load1_ps(&vec[j]); /* is it faster to shuffle v? */ o = _mm_add_ps(o, _mm_mul_ps(vj,m)); } _mm_store_ps(out, o); #else int j,k; for (j = 0; j < 4; j++) { double sum = 0; for (k = 0; k < 4; k++) sum += vec[k] * mat[k][j]; out[j] = sum; } #endif } transition_matrix_t *ReadAATransitionMatrix(/*IN*/char *filename) { assert(nCodes==20); double stat[20]; static double matrix[MAXCODES][MAXCODES]; static char buf[BUFFER_SIZE]; FILE *fp = fopen(filename, "r"); if (fp == NULL) { fprintf(stderr, "Cannot read transition matrix file %s\n", filename); exit(1); } char expected[2*MAXCODES+20]; int posE = 0; int i, j; for (i = 0; i < 20; i++) { expected[posE++] = codesStringAA[i]; expected[posE++] = '\t'; } expected[posE++] = '*'; expected[posE++] = '\n'; expected[posE++] = '\0'; if (fgets(buf, sizeof(buf), fp) == NULL) { fprintf(stderr, "Error reading header line from transition matrix file\n"); exit(1); } if (strcmp(buf, expected) != 0) { fprintf(stderr, "Invalid header line in transition matrix file, it must match:\n%s\n", expected); exit(1); } for (i = 0; i < 20; i++) { if (fgets(buf, sizeof(buf), fp) == NULL) { fprintf(stderr, "Error reading matrix line\n"); exit(1); } char *field = strtok(buf,"\t\r\n"); if (field == NULL || strlen(field) != 1 || field[0] != codesStringAA[i]) { fprintf(stderr, "Line for amino acid %c does not have the expected beginning\n", codesStringAA[i]); exit(1); } for (j = 0; j < 20; j++) { field = strtok(NULL, "\t\r\n"); if (field == NULL) { fprintf(stderr, "Not enough fields for amino acid %c\n", codesStringAA[i]); exit(1); } matrix[i][j] = atof(field); } field = strtok(NULL, "\t\r\n"); if (field == NULL) { fprintf(stderr, "Not enough fields for amino acid %c\n", codesStringAA[i]); exit(1); } stat[i] = atof(field); } double tol = 1e-5; /* Verify that stat is positive and sums to 1 */ double statTot = 0; for (i = 0; i < 20; i++) { if (stat[i] < tol) { fprintf(stderr, "stationary frequency for amino acid %c must be positive\n", codesStringAA[i]); exit(1); } statTot += stat[i]; } if (fabs(statTot - 1) > tol) { fprintf(stderr, "stationary frequencies must sum to 1 -- actual sum is %g\n", statTot); exit(1); } /* Verify that diagonals are negative and dot product of stat and diagonals is -1 */ double totRate = 0; for (i = 0; i < 20; i++) { double diag = matrix[i][i]; if (diag > -tol) { fprintf(stderr, "transition rate(%c,%c) must be negative\n", codesStringAA[i], codesStringAA[i]); exit(1); } totRate += stat[i] * diag; } if (fabs(totRate + 1) > tol) { fprintf(stderr, "Dot product of matrix diagonal and stationary frequencies must be -1 -- actual dot product is %g\n", totRate); exit(1); } /* Verify that each off-diagonal entry is nonnegative and that each column sums to 0 */ for (j = 0; j < 20; j++) { double colSum = 0; for (i = 0; i < 20; i++) { double value = matrix[i][j]; colSum += value; if (i != j && value < 0) { fprintf(stderr, "Off-diagonal matrix entry for (%c,%c) is negative\n", codesStringAA[i], codesStringAA[j]); exit(1); } } if (fabs(colSum) > tol) { fprintf(stderr, "Sum of column %c must be zero -- actual sum is %g\n", codesStringAA[j], colSum); exit(1); } } return CreateTransitionMatrix(matrix, stat); } distance_matrix_t matrixBLOSUM45 = { /*distances*/ { {0, 1.31097856157468, 1.06573001937323, 1.2682782988532, 0.90471293383305, 1.05855446876905, 1.05232790675508, 0.769574440593014, 1.27579668305679, 0.964604099952603, 0.987178199640556, 1.05007594438157, 1.05464162250736, 1.1985987403937, 0.967404475245526, 0.700490199584332, 0.880060189098976, 1.09748548316685, 1.28141710375267, 0.800038509951648}, {1.31097856157468, 0, 0.8010890222701, 0.953340718498495, 1.36011107208122, 0.631543775840481, 0.791014908659279, 1.15694899265629, 0.761152570032029, 1.45014917711188, 1.17792001455227, 0.394661075648738, 0.998807558909651, 1.135143404599, 1.15432562628921, 1.05309036790541, 1.05010474413616, 1.03938321130789, 0.963216908696184, 1.20274751778601}, {1.06573001937323, 0.8010890222701, 0, 0.488217214273568, 1.10567116937273, 0.814970207038261, 0.810176440932339, 0.746487413974582, 0.61876156253224, 1.17886558630004, 1.52003670190022, 0.808442678243754, 1.2889025816028, 1.16264109995678, 1.18228799147301, 0.679475681649858, 0.853658619686283, 1.68988558988005, 1.24297493464833, 1.55207513886163}, {1.2682782988532, 0.953340718498495, 0.488217214273568, 0, 1.31581050011876, 0.769778474953791, 0.482077627352988, 0.888361752320536, 0.736360849050364, 1.76756333403346, 1.43574761894039, 0.763612910719347, 1.53386612356483, 1.74323672079854, 0.886347403928663, 0.808614044804528, 1.01590147813779, 1.59617804551619, 1.1740494822217, 1.46600946033173}, {0.90471293383305, 1.36011107208122, 1.10567116937273, 1.31581050011876, 0, 1.3836789310481, 1.37553994252576, 1.26740695314856, 1.32361065635259, 1.26087264215993, 1.02417540515351, 1.37259631233791, 1.09416720447891, 0.986982088723923, 1.59321190226694, 0.915638787768407, 0.913042853922533, 1.80744143643002, 1.3294417177004, 0.830022143283238}, {1.05855446876905, 0.631543775840481, 0.814970207038261, 0.769778474953791, 1.3836789310481, 0, 0.506942797642807, 1.17699648087288, 0.614595446514896, 1.17092829494457, 1.19833088638994, 0.637341078675405, 0.806490842729072, 1.83315144709714, 0.932064479113502, 0.850321696813199, 1.06830084665916, 1.05739353225849, 0.979907428113788, 1.5416250309563}, {1.05232790675508, 0.791014908659279, 0.810176440932339, 0.482077627352988, 1.37553994252576, 0.506942797642807, 0, 1.17007322676118, 0.769786956320484, 1.46659942462342, 1.19128214039009, 0.633592151371708, 1.27269395724349, 1.44641491621774, 0.735428579892476, 0.845319988414402, 1.06201695511881, 1.324395996498, 1.22734387448031, 1.53255698189437}, {0.769574440593014, 1.15694899265629, 0.746487413974582, 0.888361752320536, 1.26740695314856, 1.17699648087288, 1.17007322676118, 0, 1.1259007054424, 1.7025415585924, 1.38293205218175, 1.16756929156758, 1.17264582493965, 1.33271035269688, 1.07564768421292, 0.778868281341681, 1.23287107008366, 0.968539655354582, 1.42479529031801, 1.41208067821187}, {1.27579668305679, 0.761152570032029, 0.61876156253224, 0.736360849050364, 1.32361065635259, 0.614595446514896, 0.769786956320484, 1.1259007054424, 0, 1.4112324673522, 1.14630894167097, 0.967795284542623, 0.771479459384692, 1.10468029976148, 1.12334774065132, 1.02482926701639, 1.28754326478771, 1.27439749294131, 0.468683841672724, 1.47469999960758}, {0.964604099952603, 1.45014917711188, 1.17886558630004, 1.76756333403346, 1.26087264215993, 1.17092829494457, 1.46659942462342, 1.7025415585924, 1.4112324673522, 0, 0.433350517223017, 1.463460928818, 0.462965544381851, 0.66291968000662, 1.07010201755441, 1.23000200130049, 0.973485453109068, 0.963546200571036, 0.708724769805536, 0.351200119909572}, {0.987178199640556, 1.17792001455227, 1.52003670190022, 1.43574761894039, 1.02417540515351, 1.19833088638994, 1.19128214039009, 1.38293205218175, 1.14630894167097, 0.433350517223017, 0, 1.49770950074319, 0.473800072611076, 0.538473125003292, 1.37979627224964, 1.5859723170438, 0.996267398224516, 0.986095542821092, 0.725310666139274, 0.570542199221932}, {1.05007594438157, 0.394661075648738, 0.808442678243754, 0.763612910719347, 1.37259631233791, 0.637341078675405, 0.633592151371708, 1.16756929156758, 0.967795284542623, 1.463460928818, 1.49770950074319, 0, 1.0079761868248, 1.44331961488922, 0.924599080166146, 1.06275728888356, 1.05974425835993, 1.04892430642749, 0.972058829603409, 1.21378822764856}, {1.05464162250736, 0.998807558909651, 1.2889025816028, 1.53386612356483, 1.09416720447891, 0.806490842729072, 1.27269395724349, 1.17264582493965, 0.771479459384692, 0.462965544381851, 0.473800072611076, 1.0079761868248, 0, 0.72479754849538, 1.1699868662153, 1.34481214251794, 1.06435197383538, 1.05348497728858, 0.774878150710318, 0.609532859331199}, {1.1985987403937, 1.135143404599, 1.16264109995678, 1.74323672079854, 0.986982088723923, 1.83315144709714, 1.44641491621774, 1.33271035269688, 1.10468029976148, 0.66291968000662, 0.538473125003292, 1.44331961488922, 0.72479754849538, 0, 1.32968844979665, 1.21307373491949, 0.960087571600877, 0.475142555482979, 0.349485367759138, 0.692733248746636}, {0.967404475245526, 1.15432562628921, 1.18228799147301, 0.886347403928663, 1.59321190226694, 0.932064479113502, 0.735428579892476, 1.07564768421292, 1.12334774065132, 1.07010201755441, 1.37979627224964, 0.924599080166146, 1.1699868662153, 1.32968844979665, 0, 0.979087429691819, 0.97631161216338, 1.21751652292503, 1.42156458605332, 1.40887880416009}, {0.700490199584332, 1.05309036790541, 0.679475681649858, 0.808614044804528, 0.915638787768407, 0.850321696813199, 0.845319988414402, 0.778868281341681, 1.02482926701639, 1.23000200130049, 1.5859723170438, 1.06275728888356, 1.34481214251794, 1.21307373491949, 0.979087429691819, 0, 0.56109848274013, 1.76318885009194, 1.29689226231656, 1.02015839286433}, {0.880060189098976, 1.05010474413616, 0.853658619686283, 1.01590147813779, 0.913042853922533, 1.06830084665916, 1.06201695511881, 1.23287107008366, 1.28754326478771, 0.973485453109068, 0.996267398224516, 1.05974425835993, 1.06435197383538, 0.960087571600877, 0.97631161216338, 0.56109848274013, 0, 1.39547634461879, 1.02642577026706, 0.807404666228614}, {1.09748548316685, 1.03938321130789, 1.68988558988005, 1.59617804551619, 1.80744143643002, 1.05739353225849, 1.324395996498, 0.968539655354582, 1.27439749294131, 0.963546200571036, 0.986095542821092, 1.04892430642749, 1.05348497728858, 0.475142555482979, 1.21751652292503, 1.76318885009194, 1.39547634461879, 0, 0.320002937404137, 1.268589159299}, {1.28141710375267, 0.963216908696184, 1.24297493464833, 1.1740494822217, 1.3294417177004, 0.979907428113788, 1.22734387448031, 1.42479529031801, 0.468683841672724, 0.708724769805536, 0.725310666139274, 0.972058829603409, 0.774878150710318, 0.349485367759138, 1.42156458605332, 1.29689226231656, 1.02642577026706, 0.320002937404137, 0, 0.933095433689795}, {0.800038509951648, 1.20274751778601, 1.55207513886163, 1.46600946033173, 0.830022143283238, 1.5416250309563, 1.53255698189437, 1.41208067821187, 1.47469999960758, 0.351200119909572, 0.570542199221932, 1.21378822764856, 0.609532859331199, 0.692733248746636, 1.40887880416009, 1.02015839286433, 0.807404666228614, 1.268589159299, 0.933095433689795, 0} }, /*eigeninv*/ { {-0.216311217101265, -0.215171653035930, -0.217000020881064, -0.232890860601250, -0.25403526530177, -0.211569372858927, -0.218073620637049, -0.240585637190076, -0.214507049619293, -0.228476323330312, -0.223235445346107, -0.216116483840334, -0.206903836810903, -0.223553828183343, -0.236937609127783, -0.217652789023588, -0.211982652566286, -0.245995223308316, -0.206187718714279, -0.227670670439422}, {-0.0843931919568687, -0.0342164464991033, 0.393702284928246, -0.166018266253027, 0.0500896782860136, -0.262731388032538, 0.030139964190519, -0.253997503551094, -0.0932603349591988, -0.32884667697173, 0.199966846276877, -0.117543453869516, 0.196248237055757, -0.456448703853250, 0.139286961076387, 0.241166801918811, -0.0783508285295053, 0.377438091416498, 0.109499076984234, 0.128581669647144}, {-0.0690428674271772, 0.0133858672878363, -0.208289917312908, 0.161232925220819, 0.0735806288007248, -0.316269599838174, -0.0640708424745702, -0.117078801507436, 0.360805085405857, 0.336899760384943, 0.0332447078185156, 0.132954055834276, 0.00595209121998118, -0.157755611190327, -0.199839273133436, 0.193688928807663, 0.0970290928040946, 0.374683975138541, -0.478110944870958, -0.243290196936098}, {0.117284581850481, 0.310399467781876, -0.143513477698805, 0.088808130300351, 0.105747812943691, -0.373871701179853, 0.189069306295134, 0.133258225034741, -0.213043549687694, 0.301303731259140, -0.182085224761849, -0.161971915020789, 0.229301173581378, -0.293586313243755, -0.0260480060747498, -0.0217953684540699, 0.0202675755458796, -0.160134624443657, 0.431950096999465, -0.329885160320501}, {0.256496969244703, 0.0907408349583135, 0.0135731083898029, 0.477557831930769, -0.0727379669280703, 0.101732675207959, -0.147293025369251, -0.348325291603251, -0.255678082078362, -0.187092643740172, -0.177164064346593, -0.225921480146133, 0.422318841046522, 0.319959853469398, -0.0623652546300045, 0.0824203908606883, -0.102057926881110, 0.120728407576411, -0.156845807891241, -0.123528163091204}, {-0.00906668858975576, -0.0814722888231236, -0.0762715085459023, 0.055819989938286, -0.0540516675257271, -0.0070589302769034, -0.315813159989213, -0.0103527463419808, -0.194634331372293, -0.0185860407566822, 0.50134169352609, 0.384531812730061, -0.0405008616742061, 0.0781033650669525, 0.069334900096687, 0.396455180448549, -0.204065801866462, -0.215272089630713, 0.171046818996465, -0.396393364716348}, {0.201971098571663, 0.489747667606921, 0.00226258734592836, 0.0969514005747054, 0.0853921636903791, 0.0862068740282345, -0.465412154271164, -0.130516676347786, 0.165513616974634, 0.0712238027886633, 0.140746943067963, -0.325919272273406, -0.421213488261598, -0.163508199065965, 0.269695802810568, -0.110296405171437, -0.106834099902202, 0.00509414588152415, 0.00909215239544615, 0.0500401865589727}, {0.515854176692456, -0.087468413428258, 0.102796468891449, -0.06046105990993, -0.212014383772414, -0.259853648383794, -0.0997372883043333, -0.109934574535736, 0.284891018406112, -0.250578342940183, 0.142174204994568, 0.210384918947619, 0.118803190788946, -0.0268434355996836, 0.0103721198836548, -0.355555176478458, 0.428042332431476, -0.150610175411631, 0.0464090887952940, -0.140238796382057}, {-0.239392215229762, -0.315483492656425, 0.100205194952396, 0.197830195325302, 0.40178804665223, 0.195809461460298, -0.407817115321684, 0.0226836686147386, -0.169780276210306, 0.0818161585952184, -0.172886230584939, 0.174982644851064, 0.0868786992159535, -0.198450519980824, 0.168581078329968, -0.361514336004068, 0.238668430084722, 0.165494019791904, 0.110437707249228, -0.169592003035203}, {-0.313151735678025, 0.10757884850664, -0.49249098807229, 0.0993472335619114, -0.148695715250836, 0.0573801136941699, -0.190040373500722, 0.254848437434773, 0.134147888304352, -0.352719341442756, 0.0839609323513986, -0.207904182300122, 0.253940523323376, -0.109832138553288, 0.0980084518687944, 0.209026594443723, 0.406236051871548, -0.0521120230935943, 0.0554108014592302, 0.134681046631955}, {-0.102905214421384, 0.235803606800009, 0.213414976431981, -0.253606415825635, 0.00945656859370683, 0.259551282655855, 0.159527348902192, 0.083218761193016, -0.286815935191867, 0.0135069477264877, 0.336758103107357, -0.271707359524149, -0.0400009875851839, 0.0871186292716414, -0.171506310409388, -0.0954276577211755, 0.393467571460712, 0.111732846649458, -0.239886066474217, -0.426474828195231}, {-0.0130795552324104, 0.0758967690968058, -0.165099404017689, -0.46035152559912, 0.409888158016031, -0.0235053940299396, 0.0699393201709723, -0.161320910316996, 0.226111732196825, -0.177811841258496, -0.219073917645916, -0.00703219376737286, 0.162831878334912, 0.271670554900684, 0.451033612762052, 0.0820942662443393, -0.0904983490498446, -0.0587000279313978, -0.0938852980928252, -0.306078621571843}, {0.345092040577428, -0.257721588971295, -0.301689123771848, -0.0875212184538126, 0.161012613069275, 0.385104899829821, 0.118355290985046, -0.241723794416731, 0.083201920119646, -0.0809095291508749, -0.0820275390511991, -0.115569770103317, -0.250105681098033, -0.164197583037664, -0.299481453795592, 0.255906951902366, 0.129042051416371, 0.203761730442746, 0.347550071284268, -0.109264854744020}, {0.056345924962239, 0.072536751679082, 0.303127492633681, -0.368877185781648, -0.343024497082421, 0.206879529669083, -0.413012709639426, 0.078538816203612, 0.103382383425097, 0.288319996147499, -0.392663258459423, 0.0319588502083897, 0.220316797792669, -0.0563686494606947, -0.0869286063283735, 0.323677017794391, 0.0984875197088935, -0.0303289828821742, 0.0450197853450979, -0.0261771221270139}, {-0.253701638374729, -0.148922815783583, 0.111794052194159, 0.157313977830326, -0.269846001260543, -0.222989872703583, 0.115441028189268, -0.350456582262355, -0.0409581422905941, 0.174078744248002, -0.130673397086811, -0.123963802708056, -0.351609207081548, 0.281548012920868, 0.340382662112428, 0.180262131025562, 0.3895263830793, 0.0121546812430960, 0.214830943227063, -0.0617782909660214}, {-0.025854479416026, 0.480654788977767, -0.138024550829229, -0.130191670810919, 0.107816875829919, -0.111243997319276, -0.0679814460571245, -0.183167991080677, -0.363355166018786, -0.183934891092050, -0.216097125080962, 0.520240628803255, -0.179616013606479, 0.0664131536100941, -0.178350708111064, 0.0352047611606709, 0.223857228692892, 0.128363679623513, -0.000403433628490731, 0.224972110977704}, {0.159207394033448, -0.0371517305736114, -0.294302634912281, -0.0866954375908417, -0.259998567870054, 0.284966673982689, 0.205356416771391, -0.257613708650298, -0.264820519037270, 0.293359248624603, 0.0997476397434102, 0.151390539497369, 0.165571346773648, -0.347569523551258, 0.43792310820533, -0.0723248163210163, 0.0379214984816955, -0.0542758730251438, -0.258020301801603, 0.128680501102363}, {0.316853842351797, -0.153950010941153, -0.13387065213508, -0.0702971390607613, -0.202558481846057, -0.172941438694837, -0.068882524588574, 0.524738203063889, -0.271670479920716, -0.112864756695310, -0.146831636946145, -0.0352336188578041, -0.211108490884767, 0.097857111349555, 0.276459740956662, 0.0231297536754823, -0.0773173324868396, 0.487208384389438, -0.0734191389266824, -0.113198765573319}, {-0.274285525741087, 0.227334266052039, -0.0973746625709059, -0.00965256583655389, -0.402438444750043, 0.198586229519026, 0.0958135064575833, -0.108934376958686, 0.253641732094319, -0.0551918478254021, 0.0243640218331436, 0.181936272247179, 0.090952738347629, 0.0603352483029044, -0.0043821671755761, -0.347720824658591, -0.267879988539971, 0.403804652116592, 0.337654323971186, -0.241509293972297}, {-0.0197089518344238, 0.139681034626696, 0.251980475788267, 0.341846624362846, -0.075141195125153, 0.2184951591319, 0.268870823491343, 0.150392399018138, 0.134592404015057, -0.337050200539163, -0.313109373497998, 0.201993318439135, -0.217140733851970, -0.337622749083808, 0.135253284365068, 0.181729249828045, -0.00627813335422765, -0.197218833324039, -0.194060005031698, -0.303055888528004} }, /*eigenval*/ { 20.29131, 0.5045685, 0.2769945, 0.1551147, 0.03235484, -0.04127639, -0.3516426, -0.469973, -0.5835191, -0.6913107, -0.7207972, -0.7907875, -0.9524307, -1.095310, -1.402153, -1.424179, -1.936704, -2.037965, -3.273561, -5.488734 }, /*eigentot and codeFreq left out, these are initialized elsewhere*/ }; /* The JTT92 matrix, D. T. Jones, W. R. Taylor, & J. M. Thorton, CABIOS 8:275 (1992) Derived from the PhyML source code (models.c) by filling in the other side of the symmetric matrix, scaling the entries by the stationary rate (to give the rate of a->b not b|a), to set the diagonals so the rows sum to 0, to rescale the matrix so that the implied rate of evolution is 1. The resulting matrix is the transpose (I think). */ #if 0 { int i,j; for (i=0; i<20; i++) for (j=0; j<i; j++) daa[j*20+i] = daa[i*20+j]; for (i = 0; i < 20; i++) for (j = 0; j < 20; j++) daa[i*20+j] *= pi[j] / 100.0; double mr = 0; /* mean rate */ for (i = 0; i < 20; i++) { double sum = 0; for (j = 0; j < 20; j++) sum += daa[i*20+j]; daa[i*20+i] = -sum; mr += pi[i] * sum; } for (i = 0; i < 20*20; i++) daa[i] /= mr; } #endif double statJTT92[MAXCODES] = {0.07674789,0.05169087,0.04264509,0.05154407,0.01980301,0.04075195,0.06182989,0.07315199,0.02294399,0.05376110,0.09190390,0.05867583,0.02382594,0.04012589,0.05090097,0.06876503,0.05856501,0.01426057,0.03210196,0.06600504}; double matrixJTT92[MAXCODES][MAXCODES] = { { -1.247831,0.044229,0.041179,0.061769,0.042704,0.043467,0.08007,0.136501,0.02059,0.027453,0.022877,0.02669,0.041179,0.011439,0.14794,0.288253,0.362223,0.006863,0.008388,0.227247 }, { 0.029789,-1.025965,0.023112,0.008218,0.058038,0.159218,0.014895,0.070364,0.168463,0.011299,0.019517,0.33179,0.022599,0.002568,0.038007,0.051874,0.032871,0.064714,0.010272,0.008731 }, { 0.022881,0.019068,-1.280568,0.223727,0.014407,0.03644,0.024576,0.034322,0.165676,0.019915,0.005085,0.11144,0.012712,0.004237,0.006356,0.213134,0.098304,0.00339,0.029661,0.00678 }, { 0.041484,0.008194,0.270413,-1.044903,0.005121,0.025095,0.392816,0.066579,0.05736,0.005634,0.003585,0.013316,0.007682,0.002049,0.007682,0.030217,0.019462,0.002049,0.023559,0.015877 }, { 0.011019,0.022234,0.00669,0.001968,-0.56571,0.001771,0.000984,0.011609,0.013577,0.003345,0.004526,0.001377,0.0061,0.015348,0.002755,0.043878,0.008264,0.022628,0.041124,0.012199 }, { 0.02308,0.125524,0.034823,0.019841,0.003644,-1.04415,0.130788,0.010528,0.241735,0.003644,0.029154,0.118235,0.017411,0.00162,0.066406,0.021461,0.020651,0.007288,0.009718,0.008098 }, { 0.064507,0.017816,0.035632,0.471205,0.003072,0.198435,-0.944343,0.073107,0.015973,0.007372,0.005529,0.111197,0.011058,0.003072,0.011058,0.01843,0.019659,0.006143,0.0043,0.027646 }, { 0.130105,0.099578,0.058874,0.09449,0.042884,0.018898,0.086495,-0.647831,0.016717,0.004361,0.004361,0.019625,0.010176,0.003634,0.017444,0.146096,0.023986,0.039976,0.005815,0.034162 }, { 0.006155,0.074775,0.089138,0.025533,0.01573,0.1361,0.005927,0.005243,-1.135695,0.003648,0.012767,0.010259,0.007523,0.009119,0.026217,0.016642,0.010487,0.001824,0.130629,0.002508 }, { 0.01923,0.011752,0.025106,0.005876,0.009081,0.004808,0.00641,0.003205,0.008547,-1.273602,0.122326,0.011218,0.25587,0.047542,0.005342,0.021367,0.130873,0.004808,0.017094,0.513342 }, { 0.027395,0.0347,0.010958,0.006392,0.021003,0.065748,0.008219,0.005479,0.051137,0.209115,-0.668139,0.012784,0.354309,0.226465,0.093143,0.053877,0.022829,0.047485,0.021916,0.16437 }, { 0.020405,0.376625,0.153332,0.015158,0.004081,0.170239,0.105525,0.015741,0.026235,0.012243,0.008162,-0.900734,0.037896,0.002332,0.012243,0.027401,0.06005,0.00583,0.004664,0.008162 }, { 0.012784,0.010416,0.007102,0.003551,0.007339,0.01018,0.004261,0.003314,0.007812,0.113397,0.091854,0.015388,-1.182051,0.01018,0.003788,0.006865,0.053503,0.005682,0.004261,0.076466 }, { 0.00598,0.001993,0.003987,0.001595,0.031098,0.001595,0.001993,0.001993,0.015948,0.035484,0.098877,0.001595,0.017144,-0.637182,0.006778,0.03668,0.004784,0.021131,0.213701,0.024719 }, { 0.098117,0.037426,0.007586,0.007586,0.007081,0.082944,0.009104,0.012138,0.058162,0.005058,0.051587,0.010621,0.008092,0.008598,-0.727675,0.144141,0.059679,0.003035,0.005058,0.011632 }, { 0.258271,0.069009,0.343678,0.040312,0.152366,0.036213,0.020498,0.137334,0.049878,0.02733,0.040312,0.032113,0.019814,0.06286,0.194728,-1.447863,0.325913,0.023914,0.043045,0.025964 }, { 0.276406,0.037242,0.135003,0.022112,0.02444,0.029677,0.018621,0.019203,0.026768,0.142567,0.014548,0.059936,0.131511,0.006983,0.068665,0.27757,-1.335389,0.006983,0.01222,0.065174 }, { 0.001275,0.017854,0.001134,0.000567,0.016295,0.002551,0.001417,0.007793,0.001134,0.001275,0.007368,0.001417,0.003401,0.00751,0.00085,0.004959,0.0017,-0.312785,0.010061,0.003542 }, { 0.003509,0.006379,0.022328,0.014673,0.066664,0.007655,0.002233,0.002552,0.182769,0.010207,0.007655,0.002552,0.005741,0.170967,0.00319,0.020095,0.006698,0.022647,-0.605978,0.005103 }, { 0.195438,0.011149,0.010493,0.020331,0.040662,0.013117,0.029512,0.030824,0.007214,0.630254,0.11805,0.009182,0.211834,0.040662,0.015084,0.024922,0.073453,0.016396,0.010493,-1.241722 } }; double statWAG01[MAXCODES] = {0.0866279,0.043972, 0.0390894,0.0570451,0.0193078,0.0367281,0.0580589,0.0832518,0.0244314,0.048466, 0.086209, 0.0620286,0.0195027,0.0384319,0.0457631,0.0695179,0.0610127,0.0143859,0.0352742,0.0708956}; double matrixWAG01[MAXCODES][MAXCODES] = { {-1.117151, 0.050147, 0.046354, 0.067188, 0.093376, 0.082607, 0.143908, 0.128804, 0.028817, 0.017577, 0.036177, 0.082395, 0.081234, 0.019138, 0.130789, 0.306463, 0.192846, 0.010286, 0.021887, 0.182381}, {0.025455, -0.974318, 0.029321, 0.006798, 0.024376, 0.140086, 0.020267, 0.026982, 0.098628, 0.008629, 0.022967, 0.246964, 0.031527, 0.004740, 0.031358, 0.056495, 0.025586, 0.053714, 0.017607, 0.011623}, {0.020916, 0.026065, -1.452438, 0.222741, 0.010882, 0.063328, 0.038859, 0.046176, 0.162306, 0.022737, 0.005396, 0.123567, 0.008132, 0.003945, 0.008003, 0.163042, 0.083283, 0.002950, 0.044553, 0.008051}, {0.044244, 0.008819, 0.325058, -0.989665, 0.001814, 0.036927, 0.369645, 0.051822, 0.055719, 0.002361, 0.005077, 0.028729, 0.006212, 0.002798, 0.025384, 0.064166, 0.022443, 0.007769, 0.019500, 0.009120}, {0.020812, 0.010703, 0.005375, 0.000614, -0.487357, 0.002002, 0.000433, 0.006214, 0.005045, 0.003448, 0.007787, 0.001500, 0.007913, 0.008065, 0.002217, 0.028525, 0.010395, 0.014531, 0.011020, 0.020307}, {0.035023, 0.117008, 0.059502, 0.023775, 0.003809, -1.379785, 0.210830, 0.012722, 0.165524, 0.004391, 0.033516, 0.150135, 0.059565, 0.003852, 0.035978, 0.039660, 0.033070, 0.008316, 0.008777, 0.011613}, {0.096449, 0.026759, 0.057716, 0.376214, 0.001301, 0.333275, -1.236894, 0.034593, 0.034734, 0.007763, 0.009400, 0.157479, 0.019202, 0.004944, 0.041578, 0.042955, 0.050134, 0.009540, 0.011961, 0.035874}, {0.123784, 0.051085, 0.098345, 0.075630, 0.026795, 0.028838, 0.049604, -0.497615, 0.021792, 0.002661, 0.005356, 0.032639, 0.015212, 0.004363, 0.021282, 0.117240, 0.019732, 0.029444, 0.009052, 0.016361}, {0.008127, 0.054799, 0.101443, 0.023863, 0.006384, 0.110105, 0.014616, 0.006395, -0.992342, 0.003543, 0.012807, 0.022832, 0.010363, 0.017420, 0.017851, 0.018979, 0.012136, 0.006733, 0.099319, 0.003035}, {0.009834, 0.009511, 0.028192, 0.002006, 0.008654, 0.005794, 0.006480, 0.001549, 0.007029, -1.233162, 0.161294, 0.016472, 0.216559, 0.053891, 0.005083, 0.016249, 0.074170, 0.010808, 0.021372, 0.397837}, {0.036002, 0.045028, 0.011900, 0.007673, 0.034769, 0.078669, 0.013957, 0.005547, 0.045190, 0.286902, -0.726011, 0.023303, 0.439180, 0.191376, 0.037625, 0.031191, 0.029552, 0.060196, 0.036066, 0.162890}, {0.058998, 0.348377, 0.196082, 0.031239, 0.004820, 0.253558, 0.168246, 0.024319, 0.057967, 0.021081, 0.016767, -1.124580, 0.060821, 0.005783, 0.036254, 0.062960, 0.090292, 0.008952, 0.008675, 0.019884}, {0.018288, 0.013983, 0.004057, 0.002124, 0.007993, 0.031629, 0.006450, 0.003564, 0.008272, 0.087143, 0.099354, 0.019123, -1.322098, 0.024370, 0.003507, 0.010109, 0.031033, 0.010556, 0.008769, 0.042133}, {0.008490, 0.004143, 0.003879, 0.001885, 0.016054, 0.004030, 0.003273, 0.002014, 0.027402, 0.042734, 0.085315, 0.003583, 0.048024, -0.713669, 0.006512, 0.022020, 0.006934, 0.061698, 0.260332, 0.026213}, {0.069092, 0.032635, 0.009370, 0.020364, 0.005255, 0.044829, 0.032773, 0.011698, 0.033438, 0.004799, 0.019973, 0.026747, 0.008229, 0.007754, -0.605590, 0.077484, 0.038202, 0.006695, 0.010376, 0.015124}, {0.245933, 0.089317, 0.289960, 0.078196, 0.102703, 0.075066, 0.051432, 0.097899, 0.054003, 0.023306, 0.025152, 0.070562, 0.036035, 0.039831, 0.117705, -1.392239, 0.319421, 0.038212, 0.057419, 0.016981}, {0.135823, 0.035501, 0.129992, 0.024004, 0.032848, 0.054936, 0.052685, 0.014461, 0.030308, 0.093371, 0.020915, 0.088814, 0.097083, 0.011008, 0.050931, 0.280341, -1.154973, 0.007099, 0.018643, 0.088894}, {0.001708, 0.017573, 0.001086, 0.001959, 0.010826, 0.003257, 0.002364, 0.005088, 0.003964, 0.003208, 0.010045, 0.002076, 0.007786, 0.023095, 0.002105, 0.007908, 0.001674, -0.466694, 0.037525, 0.005516}, {0.008912, 0.014125, 0.040205, 0.012058, 0.020133, 0.008430, 0.007267, 0.003836, 0.143398, 0.015555, 0.014757, 0.004934, 0.015861, 0.238943, 0.007998, 0.029135, 0.010779, 0.092011, -0.726275, 0.011652}, {0.149259, 0.018739, 0.014602, 0.011335, 0.074565, 0.022417, 0.043805, 0.013932, 0.008807, 0.581952, 0.133956, 0.022726, 0.153161, 0.048356, 0.023429, 0.017317, 0.103293, 0.027186, 0.023418, -1.085487}, }; /* Le-Gascuel 2008 model data from Harry Yoo https://github.com/hyoo/FastTree */ double statLG08[MAXCODES] = {0.079066, 0.055941, 0.041977, 0.053052, 0.012937, 0.040767, 0.071586, 0.057337, 0.022355, 0.062157, 0.099081, 0.0646, 0.022951, 0.042302, 0.04404, 0.061197, 0.053287, 0.012066, 0.034155, 0.069147}; double matrixLG08[MAXCODES][MAXCODES] = { {-1.08959879,0.03361031,0.02188683,0.03124237,0.19680136,0.07668542,0.08211337,0.16335306,0.02837339,0.01184642,0.03125763,0.04242021,0.08887270,0.02005907,0.09311189,0.37375830,0.16916131,0.01428853,0.01731216,0.20144931}, {0.02378006,-0.88334349,0.04206069,0.00693409,0.02990323,0.15707674,0.02036079,0.02182767,0.13574610,0.00710398,0.01688563,0.35388551,0.02708281,0.00294931,0.01860218,0.04800569,0.03238902,0.03320688,0.01759004,0.00955956}, {0.01161996,0.03156149,-1.18705869,0.21308090,0.02219603,0.07118238,0.02273938,0.06034785,0.18928374,0.00803870,0.00287235,0.09004368,0.01557359,0.00375798,0.00679131,0.16825837,0.08398226,0.00190474,0.02569090,0.00351296}, {0.02096312,0.00657599,0.26929909,-0.86328733,0.00331871,0.02776660,0.27819699,0.04482489,0.04918511,0.00056712,0.00079981,0.01501150,0.00135537,0.00092395,0.02092662,0.06579888,0.02259266,0.00158572,0.00716768,0.00201422}, {0.03220119,0.00691547,0.00684065,0.00080928,-0.86781864,0.00109716,0.00004527,0.00736456,0.00828668,0.00414794,0.00768465,0.00017162,0.01156150,0.01429859,0.00097521,0.03602269,0.01479316,0.00866942,0.01507844,0.02534728}, {0.03953956,0.11446966,0.06913053,0.02133682,0.00345736,-1.24953177,0.16830979,0.01092385,0.19623161,0.00297003,0.02374496,0.13185209,0.06818543,0.00146170,0.02545052,0.04989165,0.04403378,0.00962910,0.01049079,0.00857458}, {0.07434507,0.02605508,0.03877888,0.37538659,0.00025048,0.29554848,-0.84254259,0.02497249,0.03034386,0.00316875,0.00498760,0.12936820,0.01243696,0.00134660,0.03002373,0.04380857,0.04327684,0.00557310,0.00859294,0.01754095}, {0.11846020,0.02237238,0.08243001,0.04844538,0.03263985,0.01536392,0.02000178,-0.50414422,0.01785951,0.00049912,0.00253779,0.01700817,0.00800067,0.00513658,0.01129312,0.09976552,0.00744439,0.01539442,0.00313512,0.00439779}, {0.00802225,0.05424651,0.10080372,0.02072557,0.01431930,0.10760560,0.00947583,0.00696321,-1.09324335,0.00243405,0.00818899,0.01558729,0.00989143,0.01524917,0.01137533,0.02213166,0.01306114,0.01334710,0.11863394,0.00266053}, {0.00931296,0.00789336,0.01190322,0.00066446,0.01992916,0.00452837,0.00275137,0.00054108,0.00676776,-1.41499789,0.25764421,0.00988722,0.26563382,0.06916358,0.00486570,0.00398456,0.06425393,0.00694043,0.01445289,0.66191466}, {0.03917027,0.02990732,0.00677980,0.00149374,0.05885464,0.05771026,0.00690325,0.00438541,0.03629495,0.41069624,-0.79375308,0.01362360,0.62543296,0.25688578,0.02467704,0.01806113,0.03001512,0.06139358,0.02968934,0.16870919}, {0.03465896,0.40866276,0.13857164,0.01827910,0.00085698,0.20893479,0.11674330,0.01916263,0.04504313,0.01027583,0.00888247,-0.97644156,0.04241650,0.00154510,0.02521473,0.04836478,0.07344114,0.00322392,0.00852278,0.01196402}, {0.02579765,0.01111131,0.00851489,0.00058635,0.02051079,0.03838702,0.00398738,0.00320253,0.01015515,0.09808327,0.14487451,0.01506968,-1.54195698,0.04128536,0.00229163,0.00796306,0.04636929,0.01597787,0.01104642,0.04357735}, {0.01073203,0.00223024,0.00378708,0.00073673,0.04675419,0.00151673,0.00079574,0.00378966,0.02885576,0.04707045,0.10967574,0.00101178,0.07609486,-0.81061579,0.00399600,0.01530562,0.00697985,0.10394083,0.33011973,0.02769432}, {0.05186360,0.01464471,0.00712508,0.01737179,0.00331981,0.02749383,0.01847072,0.00867414,0.02240973,0.00344749,0.01096857,0.01718973,0.00439734,0.00416018,-0.41664685,0.05893117,0.02516738,0.00418956,0.00394655,0.01305787}, {0.28928853,0.05251612,0.24529879,0.07590089,0.17040121,0.07489439,0.03745080,0.10648187,0.06058559,0.00392302,0.01115539,0.04581702,0.02123285,0.02214217,0.08188943,-1.42842431,0.39608294,0.01522956,0.02451220,0.00601987}, {0.11400727,0.03085239,0.10660988,0.02269274,0.06093244,0.05755704,0.03221430,0.00691855,0.03113348,0.05508469,0.01614250,0.06057985,0.10765893,0.00879238,0.03045173,0.34488735,-1.23444419,0.00750412,0.01310009,0.11660005}, {0.00218053,0.00716244,0.00054751,0.00036065,0.00808574,0.00284997,0.00093936,0.00323960,0.00720403,0.00134729,0.00747646,0.00060216,0.00840002,0.02964754,0.00114785,0.00300276,0.00169919,-0.44275283,0.03802969,0.00228662}, {0.00747852,0.01073967,0.02090366,0.00461457,0.03980863,0.00878929,0.00409985,0.00186756,0.18125441,0.00794180,0.01023445,0.00450612,0.01643896,0.26654152,0.00306072,0.01368064,0.00839668,0.10764993,-0.71435091,0.00851526}, {0.17617706,0.01181629,0.00578676,0.00262530,0.13547871,0.01454379,0.01694332,0.00530363,0.00822937,0.73635171,0.11773937,0.01280613,0.13129028,0.04526924,0.02050210,0.00680190,0.15130413,0.01310401,0.01723920,-1.33539639} };
ft.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - FT This benchmark is an OpenMP C version of the NPB FT code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: D. Bailey W. Saphir OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" /* global variables */ #include "global.h" /* function declarations */ static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX], int t, int indexmap[NZ][NY][NX], int d[3]); static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]); static void ipow46(double a, int exponent, double *result); static void setup(void); static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]); static void print_timers(void); static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]); static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void fft_init (int n); static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]); static void fftz2 (int is, int l, int m, int n, int ny, int ny1, dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]); static int ilog2(int n); static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]); static void verify (int d1, int d2, int d3, int nt, boolean *verified, char *class); /*-------------------------------------------------------------------- c FT benchmark c-------------------------------------------------------------------*/ int main(int argc, char **argv) { /*c------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i, ierr; /*------------------------------------------------------------------ c u0, u1, u2 are the main arrays in the problem. c Depending on the decomposition, these arrays will have different c dimensions. To accomodate all possibilities, we allocate them as c one-dimensional arrays and pass them to subroutines for different c views c - u0 contains the initial (transformed) initial condition c - u1 and u2 are working arrays c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the c time evolution operator. c-----------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Large arrays are in common so that they are allocated on the c heap rather than the stack. This common block is not c referenced directly anywhere else. Padding is to avoid accidental c cache problems, since all array sizes are powers of two. c-------------------------------------------------------------------*/ static dcomplex u0[NZ][NY][NX]; static dcomplex pad1[3]; static dcomplex u1[NZ][NY][NX]; static dcomplex pad2[3]; static dcomplex u2[NZ][NY][NX]; static dcomplex pad3[3]; static int indexmap[NZ][NY][NX]; int iter; int nthreads = 1; double total_time, mflops; boolean verified; char class; /*-------------------------------------------------------------------- c Run the entire problem once to make sure all data is touched. c This reduces variable startup costs, which is important for such a c short benchmark. The other NPB 2 implementations are similar. c-------------------------------------------------------------------*/ for (i = 0; i < T_MAX; i++) { timer_clear(i); } setup(); compute_indexmap(indexmap, dims[2]); compute_initial_conditions(u1, dims[0]); fft_init (dims[0][0]); fft(1, u1, u0); /*-------------------------------------------------------------------- c Start over from the beginning. Note that all operations must c be timed, in contrast to other benchmarks. c-------------------------------------------------------------------*/ for (i = 0; i < T_MAX; i++) { timer_clear(i); } timer_start(T_TOTAL); if (TIMERS_ENABLED == TRUE) timer_start(T_SETUP); compute_indexmap(indexmap, dims[2]); compute_initial_conditions(u1, dims[0]); fft_init (dims[0][0]); if (TIMERS_ENABLED == TRUE) { timer_stop(T_SETUP); } if (TIMERS_ENABLED == TRUE) { timer_start(T_FFT); } fft(1, u1, u0); if (TIMERS_ENABLED == TRUE) { timer_stop(T_FFT); } for (iter = 1; iter <= niter; iter++) { if (TIMERS_ENABLED == TRUE) { timer_start(T_EVOLVE); } evolve(u0, u1, iter, indexmap, dims[0]); if (TIMERS_ENABLED == TRUE) { timer_stop(T_EVOLVE); } if (TIMERS_ENABLED == TRUE) { timer_start(T_FFT); } fft(-1, u1, u2); if (TIMERS_ENABLED == TRUE) { timer_stop(T_FFT); } if (TIMERS_ENABLED == TRUE) { timer_start(T_CHECKSUM); } checksum(iter, u2, dims[0]); if (TIMERS_ENABLED == TRUE) { timer_stop(T_CHECKSUM); } } verify(NX, NY, NZ, niter, &verified, &class); { //#if defined(_OPENMP) // nthreads = omp_get_num_threads(); //#endif /* _OPENMP */ } /* end parallel */ timer_stop(T_TOTAL); total_time = timer_read(T_TOTAL); if( total_time != 0.0) { mflops = 1.0e-6*(double)(NTOTAL) * (14.8157+7.19641*log((double)(NTOTAL)) + (5.23518+7.21113*log((double)(NTOTAL)))*niter) /total_time; } else { mflops = 0.0; } c_print_results("FT", class, NX, NY, NZ, niter, nthreads, total_time, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (TIMERS_ENABLED == TRUE) print_timers(); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX], int t, int indexmap[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c evolve u0 -> u1 (t time steps) in fourier space c-------------------------------------------------------------------*/ int i, j, k; #pragma omp parallel for private(i ,j ,k ) for (k = 0; k < d[2]; k++) { for (j = 0; j < d[1]; j++) { for (i = 0; i < d[0]; i++) { crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]); } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Fill in array u0 with initial conditions from c random number generator c-------------------------------------------------------------------*/ int k; double x0, start, an, dummy; static double tmp[NX*2*MAXDIM+1]; int i,j,t; start = SEED; /*-------------------------------------------------------------------- c Jump to the starting element for our first plane. c-------------------------------------------------------------------*/ ipow46(A, (zstart[0]-1)*2*NX*NY + (ystart[0]-1)*2*NX, &an); dummy = randlc(&start, an); ipow46(A, 2*NX*NY, &an); /*-------------------------------------------------------------------- c Go through by z planes filling in one square at a time. c-------------------------------------------------------------------*/ for (k = 0; k < dims[0][2]; k++) { x0 = start; vranlc(2*NX*dims[0][1], &x0, A, tmp); t = 1; for (j = 0; j < dims[0][1]; j++) for (i = 0; i < NX; i++) { u0[k][j][i].real = tmp[t++]; u0[k][j][i].imag = tmp[t++]; } if (k != dims[0][2]) dummy = randlc(&start, an); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void ipow46(double a, int exponent, double *result) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute a^exponent mod 2^46 c-------------------------------------------------------------------*/ double dummy, q, r; int n, n2; /*-------------------------------------------------------------------- c Use c a^n = a^(n/2)*a^(n/2) if n even else c a^n = a*a^(n-1) if n odd c-------------------------------------------------------------------*/ *result = 1; if (exponent == 0) return; q = a; r = 1; n = exponent; while (n > 1) { n2 = n/2; if (n2 * 2 == n) { dummy = randlc(&q, q); n = n2; } else { dummy = randlc(&r, q); n = n-1; } } dummy = randlc(&r, q); *result = r; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void setup(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, i, j, fstatus; printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - FT Benchmark\n\n"); niter = NITER_DEFAULT; printf(" Size : %3dx%3dx%3d\n", NX, NY, NZ); printf(" Iterations : %7d\n", niter); /* 1004 format(' Number of processes : ', i7) 1005 format(' Processor array : ', i3, 'x', i3) 1006 format(' WARNING: compiled for ', i5, ' processes. ', > ' Will not verify. ')*/ #pragma omp parallel for for (i = 0;i < 3 ; i++) { dims[i][0] = NX; dims[i][1] = NY; dims[i][2] = NZ; } #pragma omp parallel for for (i = 0; i < 3; i++) { xstart[i] = 1; xend[i] = NX; ystart[i] = 1; yend[i] = NY; zstart[i] = 1; zend[i] = NZ; } /*-------------------------------------------------------------------- c Set up info for blocking of ffts and transposes. This improves c performance on cache-based systems. Blocking involves c working on a chunk of the problem at a time, taking chunks c along the first, second, or third dimension. c c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim) c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims) c Since 1st dim is always in processor, we'll assume it's long enough c (default blocking factor is 16 so min size for 1st dim is 16) c The only case we have to worry about is cffts1 in a 2d decomposition. c so the blocking factor should not be larger than the 2nd dimension. c-------------------------------------------------------------------*/ fftblock = FFTBLOCK_DEFAULT; fftblockpad = FFTBLOCKPAD_DEFAULT; if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock+3; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2 c for time evolution exponent. c-------------------------------------------------------------------*/ int i, j, k, ii, ii2, jj, ij2, kk; double ap; /*-------------------------------------------------------------------- c basically we want to convert the fortran indices c 1 2 3 4 5 6 7 8 c to c 0 1 2 3 -4 -3 -2 -1 c The following magic formula does the trick: c mod(i-1+n/2, n) - n/2 c-------------------------------------------------------------------*/ #pragma omp parallel for private(i ,j ,k ,ii ,ii2 ,jj ,ij2 ,kk ) for (i = 0; i < dims[2][0]; i++) { ii = (i+1+xstart[2]-2+NX/2)%NX - NX/2; ii2 = ii*ii; #pragma omp parallel for private(j) firstprivate(k ,ii ,ii2 ,jj ,ij2 ,kk ,i ) for (j = 0; j < dims[2][1]; j++) { jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2; ij2 = jj*jj+ii2; #pragma omp parallel for private(k) firstprivate(j ,ii ,ii2 ,jj ,ij2 ,kk ,i ) for (k = 0; k < dims[2][2]; k++) { kk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2; indexmap[k][j][i] = kk*kk+ij2; } } } /*-------------------------------------------------------------------- c compute array of exponentials for time evolution. c-------------------------------------------------------------------*/ ap = - 4.0 * ALPHA * PI * PI; ex[0] = 1.0; ex[1] = exp(ap); for (i = 2; i <= EXPMAX; i++) { ex[i] = ex[i-1]*ex[1]; } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void print_timers(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i; char *tstrings[] = { " total ", " setup ", " fft ", " evolve ", " checksum ", " fftlow ", " fftcopy " }; for (i = 0; i < T_MAX; i++) { if (timer_read(i) != 0.0) { printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i)); } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; /*-------------------------------------------------------------------- c note: args x1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ if (dir == 1) { cffts1(1, dims[0], x1, x1, y0, y1); /* x1 -> x1 */ cffts2(1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */ cffts3(1, dims[2], x1, x2, y0, y1); /* x1 -> x2 */ } else { cffts3(-1, dims[2], x1, x1, y0, y1); /* x1 -> x1 */ cffts2(-1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */ cffts1(-1, dims[0], x1, x2, y0, y1); /* x1 -> x2 */ } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, jj; #pragma omp parallel for for (i = 0; i < 3; i++) { logd[i] = ilog2(d[i]); } { dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; #pragma omp parallel for private(i ,j ,k ,jj ) for (k = 0; k < d[2]; k++) { for (jj = 0; jj <= d[1] - fftblock; jj+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ #pragma omp parallel for private(j) firstprivate(i ,jj ,k ) for (j = 0; j < fftblock; j++) { #pragma omp parallel for private(i) firstprivate(jj ,j ,k ) for (i = 0; i < d[0]; i++) { y0[i][j].real = x[k][j+jj][i].real; y0[i][j].imag = x[k][j+jj][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[0], d[0], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ #pragma omp parallel for private(j) firstprivate(fftblock ,i ,jj ,x ,k ) for (j = 0; j < fftblock; j++) { for (i = 0; i < d[0]; i++) { xout[k][j+jj][i].real = y0[i][j].real; xout[k][j+jj][i].imag = y0[i][j].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, ii; #pragma omp parallel for for (i = 0; i < 3; i++) { logd[i] = ilog2(d[i]); } { dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; #pragma omp parallel for private(i ,j ,k ,ii ) for (k = 0; k < d[2]; k++) { for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ #pragma omp parallel for private(j) firstprivate(i ,ii ,x ,fftblock ,k ) for (j = 0; j < d[1]; j++) { #pragma omp parallel for private(i) firstprivate(ii ,x ,fftblock ,j ,k ) for (i = 0; i < fftblock; i++) { y0[j][i].real = x[k][j][i+ii].real; y0[j][i].imag = x[k][j][i+ii].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[1], d[1], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ #pragma omp parallel for private(j) firstprivate(i ,ii ,x ,fftblock ,k ) for (j = 0; j < d[1]; j++) { for (i = 0; i < fftblock; i++) { xout[k][j][i+ii].real = y0[j][i].real; xout[k][j][i+ii].imag = y0[j][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, ii; #pragma omp parallel for for (i = 0;i < 3; i++) { logd[i] = ilog2(d[i]); } { dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; #pragma omp parallel for private(i ,j ,k ,ii ) for (j = 0; j < d[1]; j++) { for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ #pragma omp parallel for private(k) firstprivate(i ,ii ,j ) for (k = 0; k < d[2]; k++) { #pragma omp parallel for private(i) firstprivate(ii ,k ,j ) for (i = 0; i < fftblock; i++) { y0[k][i].real = x[k][j][i+ii].real; y0[k][i].imag = x[k][j][i+ii].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[2], d[2], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ #pragma omp parallel for private(k) firstprivate(i ,ii ,x ,fftblock ,j ) for (k = 0; k < d[2]; k++) { for (i = 0; i < fftblock; i++) { xout[k][j][i+ii].real = y0[k][i].real; xout[k][j][i+ii].imag = y0[k][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft_init (int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the roots-of-unity array that will be used for subsequent FFTs. c-------------------------------------------------------------------*/ int m,nu,ku,i,j,ln; double t, ti; /*-------------------------------------------------------------------- c Initialize the U array with sines and cosines in a manner that permits c stride one access at each FFT iteration. c-------------------------------------------------------------------*/ nu = n; m = ilog2(n); u[0].real = (double)m; u[0].imag = 0.0; ku = 1; ln = 1; for (j = 1; j <= m; j++) { t = PI / ln; for (i = 0; i <= ln - 1; i++) { ti = i * t; u[i+ku].real = cos(ti); u[i+ku].imag = sin(ti); } ku = ku + ln; ln = 2 * ln; } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Computes NY N-point complex-to-complex FFTs of X using an algorithm due c to Swarztrauber. X is both the input and the output array, while Y is a c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to c perform FFTs, the array U must be initialized by calling CFFTZ with IS c set to 0 and M set to MX, where MX is the maximum value of M for any c subsequent call. c-------------------------------------------------------------------*/ int i,j,l,mx; /*-------------------------------------------------------------------- c Check if input parameters are invalid. c-------------------------------------------------------------------*/ mx = (int)(u[0].real); if ((is != 1 && is != -1) || m < 1 || m > mx) { printf("CFFTZ: Either U has not been initialized, or else\n" "one of the input parameters is invalid%5d%5d%5d\n", is, m, mx); exit(1); } /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= m; l+=2) { fftz2 (is, l, m, n, fftblock, fftblockpad, u, x, y); if (l == m) break; fftz2 (is, l + 1, m, n, fftblock, fftblockpad, u, y, x); } /*-------------------------------------------------------------------- c Copy Y to X. c-------------------------------------------------------------------*/ if (m % 2 == 1) { for (j = 0; j < n; j++) { #pragma omp parallel for private(i) firstprivate(fftblock ,j ) for (i = 0; i < fftblock; i++) { x[j][i].real = y[j][i].real; x[j][i].imag = y[j][i].imag; } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fftz2 (int is, int l, int m, int n, int ny, int ny1, dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs the L-th iteration of the second variant of the Stockham FFT. c-------------------------------------------------------------------*/ int k,n1,li,lj,lk,ku,i,j,i11,i12,i21,i22; dcomplex u1,x11,x21; /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ n1 = n / 2; if (l-1 == 0) { lk = 1; } else { lk = 2 << ((l - 1)-1); } if (m-l == 0) { li = 1; } else { li = 2 << ((m - l)-1); } lj = 2 * lk; ku = li; for (i = 0; i < li; i++) { i11 = i * lk; i12 = i11 + n1; i21 = i * lj; i22 = i21 + lk; if (is >= 1) { u1.real = u[ku+i].real; u1.imag = u[ku+i].imag; } else { u1.real = u[ku+i].real; u1.imag = -u[ku+i].imag; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k = 0; k < lk; k++) { for (j = 0; j < ny; j++) { double x11real, x11imag; double x21real, x21imag; x11real = x[i11+k][j].real; x11imag = x[i11+k][j].imag; x21real = x[i12+k][j].real; x21imag = x[i12+k][j].imag; y[i21+k][j].real = x11real + x21real; y[i21+k][j].imag = x11imag + x21imag; y[i22+k][j].real = u1.real * (x11real - x21real) - u1.imag * (x11imag - x21imag); y[i22+k][j].imag = u1.real * (x11imag - x21imag) + u1.imag * (x11real - x21real); } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static int ilog2(int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int nn, lg; if (n == 1) { return 0; } lg = 1; nn = 2; while (nn < n) { nn = nn << 1; lg++; } return lg; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]) { { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int j, q,r,s, ierr; dcomplex chk,allchk; chk.real = 0.0; chk.imag = 0.0; #pragma omp parallel for for (j = 1; j <= 1024; j++) { q = j%NX+1; if (q >= xstart[0] && q <= xend[0]) { r = (3*j)%NY+1; if (r >= ystart[0] && r <= yend[0]) { s = (5*j)%NZ+1; if (s >= zstart[0] && s <= zend[0]) { cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]); } } } } { sums[i].real += chk.real; sums[i].imag += chk.imag; } { /* complex % real */ sums[i].real = sums[i].real/(double)(NTOTAL); sums[i].imag = sums[i].imag/(double)(NTOTAL); printf("T = %5d Checksum = %22.12e %22.12e\n", i, sums[i].real, sums[i].imag); } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void verify (int d1, int d2, int d3, int nt, boolean *verified, char *class) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, size, i; double err, epsilon; /*-------------------------------------------------------------------- c Sample size reference checksums c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Class S size reference checksums c-------------------------------------------------------------------*/ double vdata_real_s[6+1] = { 0.0, 5.546087004964e+02, 5.546385409189e+02, 5.546148406171e+02, 5.545423607415e+02, 5.544255039624e+02, 5.542683411902e+02 }; double vdata_imag_s[6+1] = { 0.0, 4.845363331978e+02, 4.865304269511e+02, 4.883910722336e+02, 4.901273169046e+02, 4.917475857993e+02, 4.932597244941e+02 }; /*-------------------------------------------------------------------- c Class W size reference checksums c-------------------------------------------------------------------*/ double vdata_real_w[6+1] = { 0.0, 5.673612178944e+02, 5.631436885271e+02, 5.594024089970e+02, 5.560698047020e+02, 5.530898991250e+02, 5.504159734538e+02 }; double vdata_imag_w[6+1] = { 0.0, 5.293246849175e+02, 5.282149986629e+02, 5.270996558037e+02, 5.260027904925e+02, 5.249400845633e+02, 5.239212247086e+02 }; /*-------------------------------------------------------------------- c Class A size reference checksums c-------------------------------------------------------------------*/ double vdata_real_a[6+1] = { 0.0, 5.046735008193e+02, 5.059412319734e+02, 5.069376896287e+02, 5.077892868474e+02, 5.085233095391e+02, 5.091487099959e+02 }; double vdata_imag_a[6+1] = { 0.0, 5.114047905510e+02, 5.098809666433e+02, 5.098144042213e+02, 5.101336130759e+02, 5.104914655194e+02, 5.107917842803e+02 }; /*-------------------------------------------------------------------- c Class B size reference checksums c-------------------------------------------------------------------*/ double vdata_real_b[20+1] = { 0.0, 5.177643571579e+02, 5.154521291263e+02, 5.146409228649e+02, 5.142378756213e+02, 5.139626667737e+02, 5.137423460082e+02, 5.135547056878e+02, 5.133910925466e+02, 5.132470705390e+02, 5.131197729984e+02, 5.130070319283e+02, 5.129070537032e+02, 5.128182883502e+02, 5.127393733383e+02, 5.126691062020e+02, 5.126064276004e+02, 5.125504076570e+02, 5.125002331720e+02, 5.124551951846e+02, 5.124146770029e+02 }; double vdata_imag_b[20+1] = { 0.0, 5.077803458597e+02, 5.088249431599e+02, 5.096208912659e+02, 5.101023387619e+02, 5.103976610617e+02, 5.105948019802e+02, 5.107404165783e+02, 5.108576573661e+02, 5.109577278523e+02, 5.110460304483e+02, 5.111252433800e+02, 5.111968077718e+02, 5.112616233064e+02, 5.113203605551e+02, 5.113735928093e+02, 5.114218460548e+02, 5.114656139760e+02, 5.115053595966e+02, 5.115415130407e+02, 5.115744692211e+02 }; /*-------------------------------------------------------------------- c Class C size reference checksums c-------------------------------------------------------------------*/ double vdata_real_c[20+1] = { 0.0, 5.195078707457e+02, 5.155422171134e+02, 5.144678022222e+02, 5.140150594328e+02, 5.137550426810e+02, 5.135811056728e+02, 5.134569343165e+02, 5.133651975661e+02, 5.132955192805e+02, 5.132410471738e+02, 5.131971141679e+02, 5.131605205716e+02, 5.131290734194e+02, 5.131012720314e+02, 5.130760908195e+02, 5.130528295923e+02, 5.130310107773e+02, 5.130103090133e+02, 5.129905029333e+02, 5.129714421109e+02 }; double vdata_imag_c[20+1] = { 0.0, 5.149019699238e+02, 5.127578201997e+02, 5.122251847514e+02, 5.121090289018e+02, 5.121143685824e+02, 5.121496764568e+02, 5.121870921893e+02, 5.122193250322e+02, 5.122454735794e+02, 5.122663649603e+02, 5.122830879827e+02, 5.122965869718e+02, 5.123075927445e+02, 5.123166486553e+02, 5.123241541685e+02, 5.123304037599e+02, 5.123356167976e+02, 5.123399592211e+02, 5.123435588985e+02, 5.123465164008e+02 }; epsilon = 1.0e-12; *verified = TRUE; *class = 'U'; if (d1 == 64 && d2 == 64 && d3 == 64 && nt == 6) { *class = 'S'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_s[i]) / vdata_real_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_s[i]) / vdata_imag_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 128 && d2 == 128 && d3 == 32 && nt == 6) { *class = 'W'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_w[i]) / vdata_real_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_w[i]) / vdata_imag_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 256 && d2 == 256 && d3 == 128 && nt == 6) { *class = 'A'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_a[i]) / vdata_real_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_a[i]) / vdata_imag_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 256 && d3 == 256 && nt == 20) { *class = 'B'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_b[i]) / vdata_real_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_b[i]) / vdata_imag_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 512 && d3 == 512 && nt == 20) { *class = 'C'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_c[i]) / vdata_real_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_c[i]) / vdata_imag_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } if (*class != 'U') { printf("Result verification successful\n"); } else { printf("Result verification failed\n"); } printf("class = %1c\n", *class); }
ExplicitGeometry.h
/* * MIT License * * Copyright (c) 2018-2019 Benjamin Köhler * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #ifndef BK_EXPLICITGEOMETRY_H #define BK_EXPLICITGEOMETRY_H #include <algorithm> #include <cmath> #include <fstream> #include <limits> #include <memory> #include <utility> #include <vector> #include <bkAlgorithm/mean.h> #include <bk/KDTree> #include <bk/Matrix> #include <bk/StringUtils> #ifdef BK_EMIT_PROGRESS #include <bk/Localization> #include <bk/Progress> #endif namespace bk { template<int TDims = -1, typename TValue = double> class ExplicitGeometry { //==================================================================================================== //===== ASSERTIONS //==================================================================================================== static_assert(TDims == -1 || TDims > 0); //==================================================================================================== //===== DEFINITIONS //==================================================================================================== using self_type = ExplicitGeometry<TDims>; public: using value_type = TValue; using point_type = Vec<TDims, value_type>; using kdtree_type = KDTree<point_type>; using container_type = std::vector<point_type>; /// @{ -------------------------------------------------- IS EXPLICIT GEOMETRY [[nodiscard]] static constexpr bool IsExplicit() noexcept { return true; } /// @} //==================================================================================================== //===== MEMBERS //==================================================================================================== private: std::vector<point_type> _points; std::unique_ptr<kdtree_type> _kdtree; bool _kdtree_is_up2date; //==================================================================================================== //===== CONSTRUCTORS & DESTRUCTOR //==================================================================================================== public: /// @{ -------------------------------------------------- CTOR ExplicitGeometry() : _kdtree_is_up2date(false) { /* do nothing */ } ExplicitGeometry(const self_type& other) : _points(other._points), _kdtree_is_up2date(false) { if (other._kdtree_is_up2date) { construct_kd_tree(); } } ExplicitGeometry(self_type&& other) : _points(std::move(other._points)), _kdtree(std::move(other._kdtree)), _kdtree_is_up2date(other._kdtree_is_up2date) { /* do nothing */ } /// @} /// @{ -------------------------------------------------- DTOR ~ExplicitGeometry() = default; /// @} //==================================================================================================== //===== GETTER //==================================================================================================== /// @{ -------------------------------------------------- GET NUM DIMENSIONS [[nodiscard]] unsigned int num_dimensions() const { return has_points() ? _points[0].num_elements() : 0U; } /// @} /// @{ -------------------------------------------------- GET NUM POINTS [[nodiscard]] unsigned int num_points() const { return _points.size(); } [[nodiscard]] bool has_points() const { return num_points() != 0; } /// @} /// @{ -------------------------------------------------- GET POINT [[nodiscard]] point_type& point(unsigned int id) { return _points[std::min(id, num_points() - 1)]; } [[nodiscard]] const point_type& point(unsigned int id) const { return _points[std::min(id, num_points() - 1)]; } /// @} /// @{ -------------------------------------------------- GET ITERATORS [[nodiscard]] typename container_type::iterator begin() { return _points.begin(); } [[nodiscard]] typename container_type::const_iterator begin() const { return _points.begin(); } [[nodiscard]] typename container_type::iterator end() { return _points.end(); } [[nodiscard]] typename container_type::const_iterator end() const { return _points.end(); } [[nodiscard]] typename container_type::reverse_iterator rbegin() { return _points.rbegin(); } [[nodiscard]] typename container_type::const_reverse_iterator rbegin() const { return _points.rbegin(); } [[nodiscard]] typename container_type::reverse_iterator rend() { return _points.rend(); } [[nodiscard]] typename container_type::const_reverse_iterator rend() const { return _points.rend(); } /// @} /// @{ -------------------------------------------------- GET KDTREE [[nodiscard]] const std::unique_ptr<kdtree_type>& kd_tree() const { return _kdtree; } [[nodiscard]] bool has_kdtree() const { return _kdtree_is_up2date && _kdtree.get() != nullptr; } /// @} //==================================================================================================== //===== SETTER //==================================================================================================== /// @{ -------------------------------------------------- OPERATOR = [[maybe_unused]] self_type& operator=(const self_type& other) { _points = other._points; _kdtree_is_up2date = false; if (other._kdtree_is_up2date) { construct_kd_tree(); } return *this; } [[maybe_unused]] self_type& operator=(self_type&& other) noexcept { _points = std::move(other._points); _kdtree = std::move(other._kdtree); _kdtree_is_up2date = other._kdtree_is_up2date; return *this; } /// @} /// @{ -------------------------------------------------- SET NUM POINTS void set_num_points(unsigned int n) { _points.resize(n); this->clear_kd_tree(); } /// @} //==================================================================================================== //===== ADD / REMOVE //==================================================================================================== /// @{ -------------------------------------------------- PUSH BACK template<typename... TPoints> void push_back(TPoints&& ... vecs) { (_points.push_back(std::forward<TPoints>(vecs)), ...); clear_kd_tree(); } /// @} /// @{ -------------------------------------------------- EMPLACE BACK template<typename... TArgs> void emplace_back(TArgs&& ... args) { _points.emplace_back(std::forward<TArgs>(args)...); clear_kd_tree(); } /// @} /// @{ -------------------------------------------------- REMOVE [[maybe_unused]] bool remove(unsigned int i) { if (i < num_points()) { _points.erase(_points.begin() + i); clear_kd_tree(); return true; } return false; } /// @} //==================================================================================================== //===== KD TREE //==================================================================================================== /// @{ -------------------------------------------------- KDTREE CLEAR void clear_kd_tree() { _kdtree.reset(); _kdtree_is_up2date = false; } /// @} /// @{ -------------------------------------------------- KDTREE INIT void construct_kd_tree() { clear_kd_tree(); if (num_points() != 0) { _kdtree = std::make_unique<kdtree_type>(); _kdtree->construct(begin(), end(), num_dimensions()); _kdtree_is_up2date = true; } } /// @} /// @{ -------------------------------------------------- KDTREE IS UP TO DATE bool kd_tree_is_up_to_date() const { return _kdtree_is_up2date && _kdtree.get() != nullptr; } /// @} /// @{ -------------------------------------------------- HELPERS: KDTREE QUERIES private: template<typename TVec> [[nodiscard]] static constexpr bool _vector_is_same_size_static(const TVec& pos) { return bk::is_static_vector_of_size_v<TVec, point_type::NumElementsAtCompileTime()> || bk::is_dynamic_matrix_v<TVec>; } public: /// @} /// @{ -------------------------------------------------- CLOSEST POINT template<typename TVec> [[nodiscard]] bk::KDPointInfo<point_type> closest_point(const TVec& pos) const { //static_assert(_vector_is_same_size_static(pos)); if (kd_tree_is_up_to_date()) { return kd_tree()->nearest_neighbor(pos); } else { using kdpoint_info_type = typename kdtree_type::kdpoint_info_type; kdpoint_info_type closestPoint; closestPoint.distance_to_query = std::numeric_limits<double>::max(); #pragma omp parallel for for (unsigned int pointId = 0; pointId < num_points(); ++pointId) { const double sqDist = point(pointId).distance_squared(pos); #pragma omp critical(closest_point_comparison) { if (sqDist < closestPoint.distance_to_query) { closestPoint.distance_to_query = sqDist; closestPoint.point_id = pointId; } } } closestPoint.distance_to_query = std::sqrt(closestPoint.distance_to_query); closestPoint.point = point(closestPoint.point_id); return closestPoint; } } /// @} /// @{ -------------------------------------------------- POINTS WITHIN RADIUS template<typename TVec> [[nodiscard]] std::vector<bk::KDPointInfo<point_type>> points_within_radius(const TVec& pos, double radius) const { static_assert(_vector_is_same_size_static(pos)); if (kd_tree_is_up_to_date()) { return kd_tree()->neighbors_within_radius(pos, radius); } else { using kdpoint_info_type = typename kdtree_type::kdpoint_info_type; std::vector<kdpoint_info_type> closestPoints; const double sqRadius = radius * radius; #pragma omp parallel for for (unsigned int pointId = 0; pointId < num_points(); ++pointId) { const double sqDist = point(pointId).distance_squared(pos); if (sqDist < sqRadius) { kdpoint_info_type closestPoint; closestPoint.distance_to_query = std::sqrt(sqDist); closestPoint.point_id = pointId; closestPoint.point = point(pointId); #pragma omp critical(points_within_radius_comparison) { closestPoints.emplace_back(std::move(closestPoint)); } } } return closestPoints; } } /// @} /// @{ -------------------------------------------------- CLOSEST N POINTS template<typename TVec> [[nodiscard]] std::vector<bk::KDPointInfo<point_type>> closest_n_points(const TVec& pos, unsigned int n) const { static_assert(_vector_is_same_size_static(pos)); if (kd_tree_is_up_to_date()) { return kd_tree()->k_nearest_neighbors(pos, n); } else { using pdist_pair_type = std::pair<unsigned int /*pointId*/, double /*distance squared*/>; std::vector<pdist_pair_type> points_distances(num_points()); #pragma omp parallel for for (unsigned int pointId = 0; pointId < num_points(); ++pointId) { const double sqDist = point(pointId).distance_squared(pos); points_distances[pointId].first = pointId; points_distances[pointId].second = sqDist; } std::sort(points_distances.begin(), points_distances.end(), [](const pdist_pair_type& a, const pdist_pair_type& b) { return a.second < b.second; }); const unsigned int _n = std::min(n, static_cast<unsigned int>(points_distances.size())); using kdpoint_info_type = typename kdtree_type::kdpoint_info_type; std::vector<kdpoint_info_type> closestPoints(_n); #pragma omp parallel for for (unsigned int i = 0; i < _n; ++i) { closestPoints[i].distance_to_query = std::sqrt(points_distances[i].second); closestPoints[i].point_id = points_distances[i].first; closestPoints[i].point = point(closestPoints[i].point_id); } return closestPoints; } } /// @} //==================================================================================================== //===== FUNCTIONS //==================================================================================================== /// @{ -------------------------------------------------- CLEAR void clear() { _points.clear(); clear_kd_tree(); } /// @} /// @{ -------------------------------------------------- RESERVE void reserve(unsigned int n) { _points.reserve(n); } /// @} /// @{ -------------------------------------------------- CALCULATE CENTER [[nodiscard]] auto center() const { return bk::mean(begin(), end()); } /// @} //==================================================================================================== //===== I/O //==================================================================================================== /// @{ -------------------------------------------------- SAVE [[maybe_unused]] bool save(std::string_view filename) const { /* * check filename */ std::string fname(filename); const std::string suffix = ".egeom"; if (fname.empty()) { fname = "explicit-geometry" + suffix; } else if (!bk::string_utils::ends_with(fname, suffix)) { fname.append(suffix); } /* * create file */ std::ofstream file(fname, std::ios_base::out | std::ios_base::binary); /* * save */ const bool success = save(file); if (success) { file.close(); } return success; } [[maybe_unused]] bool save(std::ofstream& file) const { if (!file.is_open() || !file.good()) { return false; } std::uint8_t numDimensions = num_dimensions(); file.write(reinterpret_cast<char*>(&numDimensions), sizeof(std::uint8_t)); std::uint32_t numPoints = num_points(); file.write(reinterpret_cast<char*>(&numPoints), sizeof(std::uint32_t)); for (unsigned int pointId = 0; pointId < numPoints; ++pointId) { const point_type& v = point(pointId); for (std::uint8_t dimId = 0; dimId < numDimensions; ++dimId) { double dtemp = static_cast<double>(v[dimId]); file.write(reinterpret_cast<char*>(&dtemp), sizeof(double)); } } return true; } /// @} /// @{ -------------------------------------------------- LOAD [[maybe_unused]] bool load(std::string_view filename) { /* * check file ending */ if (!string_utils::ends_with(filename.data(), ".egeom")) { return false; } /* * open file */ std::ifstream file(filename.data(), std::ios_base::in | std::ios_base::binary); /* * load */ const bool success = load(file); if (success) { file.close(); } return success; } [[maybe_unused]] bool load(std::ifstream& file) { clear(); if (!file.is_open() || !file.good()) { return false; } std::uint8_t numDimensions; file.read(reinterpret_cast<char*>(&numDimensions), sizeof(std::uint8_t)); std::uint32_t numPoints; file.read(reinterpret_cast<char*>(&numPoints), sizeof(std::uint32_t)); set_num_points(numPoints); std::vector<double> pointBuffer(numDimensions * numPoints); file.read(reinterpret_cast<char*>(pointBuffer.data()), numDimensions * numPoints * sizeof(double)); #pragma omp parallel for for (unsigned int pointId = 0; pointId < numPoints; ++pointId) { point_type& v = point(pointId); for (std::uint8_t dimId = 0; dimId < numDimensions; ++dimId) { v[dimId] = pointBuffer[numDimensions * pointId + dimId]; } } return num_points() != 0; } /// @} }; // class ExplicitGeometry } // namespace bk #endif //BK_EXPLICITGEOMETRY_H
GB_unop__sinh_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__sinh_fp32_fp32 // op(A') function: GB_unop_tran__sinh_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = sinhf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = sinhf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = sinhf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SINH || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__sinh_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = sinhf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = sinhf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__sinh_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
elemwise_binary_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_op.h * \brief Function definition of elementwise binary operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #include <mxnet/operator_util.h> #include <mxnet/op_attr_types.h> #include <vector> #include <string> #include <utility> #include <typeinfo> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../../engine/openmp.h" #include "elemwise_unary_op.h" #include "../../common/utils.h" #include "./init_op.h" namespace mxnet { namespace op { /*! Gather binary operator functions into ElemwiseBinaryOp class */ class ElemwiseBinaryOp : public OpBase { public: /*! \brief For sparse, assume missing rvalue is 0 */ template<typename OP, int Req> struct MissingRValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0))); } }; /*! \brief For sparse, assume missing lvalue is 0 */ template<typename OP, int Req> struct MissingLValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i])); } }; private: /*! * \brief CSR operation requires temp space */ enum ResourceRequestType { kTempSpace }; /*! * \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input * CPU-Only version */ template<typename DType, typename OP, typename xpu> static inline size_t FillDense(mshadow::Stream<xpu> *s, const size_t idx_l, const size_t idx_r, const OpReqType req, mshadow::Tensor<xpu, 2, DType> *out, const size_t iter_out) { const int index_out_min = static_cast<int>(std::min(idx_l, idx_r)); if (static_cast<size_t>(index_out_min) > iter_out) { const DType zero_input_val = OP::Map(DType(0), DType(0)); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) { Fill<false>(s, (*out)[i], req, zero_input_val); } } return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int' } static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) { return a1.var() == a2.var(); } public: /*! \brief Minimum of three */ static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) { return a < b ? (a < c ? a : c) : (b < c ? b : c); } private: template<typename LOP, typename ROP> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, mshadow::Stream<cpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { using namespace mxnet_op; const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes); const DType *ograd_dptr = inputs[0].dptr<DType>(); if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>()); } else if (req[0] != kNullOp) { DType *lgrad_dptr = outputs[0].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { Kernel<mxnet_op::op_with_req<LOP, Req>, cpu>::Launch(s, size, lgrad_dptr, ograd_dptr); }); } if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>()); } else if (req[1] != kNullOp) { DType *rgrad_dptr = outputs[1].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { Kernel<mxnet_op::op_with_req<ROP, Req>, cpu>::Launch(s, size, rgrad_dptr, ograd_dptr); }); } }); } #if MXNET_USE_CUDA template<typename LOP, typename ROP> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, mshadow::Stream<gpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs); #endif template<typename LOP, typename ROP> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, mshadow::Stream<cpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { DCHECK_EQ(outputs.size(), 2U); DCHECK_EQ(inputs.size(), 3U); const DType *ograd_dptr = inputs[0].dptr<DType>(); const DType *lhs_dptr = inputs[1].dptr<DType>(); const DType *rhs_dptr = inputs[2].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { const int size = static_cast<int>( (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * lgrad_dptr = outputs[0].dptr<DType>(); mxnet_op::Kernel< mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, cpu>::Launch( s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr); }); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { const int size = static_cast<int>( (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * rgrad_dptr = outputs[1].dptr<DType>(); mxnet_op::Kernel< mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, cpu>::Launch( s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr); }); }); } #if MXNET_USE_CUDA template<typename LOP, typename ROP> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, mshadow::Stream<gpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs); #endif template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false, typename BackupCompute> static inline void RspRspOpBackward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs, BackupCompute backup_compute) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); // lhs grad if (req[0] != kNullOp) { // RspRspOp can handle dense outputs so long as OP(0, 0) == 0 RspRspOp<LOP>( s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false); // lhs in-place RspRspOp<op::mshadow_op::mul>( s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false); } // rhs grad if (req[1] != kNullOp) { RspRspOp<ROP>( s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false); // rhs in-place RspRspOp<op::mshadow_op::mul>( s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false); } } template<typename xpu, typename LOP, typename ROP> static inline void DnsCsrCsrOpBackward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { const bool supported_ops = std::is_same<mshadow_op::right, LOP>::value && std::is_same<mshadow_op::left, ROP>::value; CHECK(supported_ops) << "Only backward for mul is supported (LOP should be right, ROP should be left)"; const NDArray& out_grad = inputs[0]; const NDArray& lhs_in = inputs[1]; const NDArray& rhs_in = inputs[2]; const NDArray& lhs_grad = outputs[0]; const NDArray& rhs_grad = outputs[1]; const bool reverse = (outputs[0].storage_type() == kCSRStorage); if (reverse) { DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, rhs_in, req[0], lhs_grad, false); Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), lhs_in.data()}, {req[1]}, {rhs_grad.data()}); } else { DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, lhs_in, req[1], rhs_grad, false); Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), rhs_in.data()}, {req[0]}, {lhs_grad.data()}); } } public: /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename OP> static void RspRspOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename OP> static void RspRspOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void CsrCsrOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void CsrCsrOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void DnsCsrDnsOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void DnsCsrDnsOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename xpu, typename OP> static void DnsCsrCsrOp(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- RSP binary operator for non-canonical NDArray */ template<typename xpu, typename OP> static void DnsRspDnsOp(mshadow::Stream<xpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); public: /*! * \brief Rsp-op-Rsp operation which produces a dense result * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); /*! * \brief Allow one of the binary inputs to be dense and still produce a sparse output. * Typically used for sparse * dense = sparse. * Note: for csr, it dispatches to fallback other than csr, csr -> csr * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name; CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name; const auto& lhs_stype = in_attrs->at(0); const auto& rhs_stype = in_attrs->at(1); auto& out_stype = out_attrs->at(0); bool dispatched = false; const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns -> dns dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) { // rsp, dns -> rsp // dns, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) { // csr, dns -> csr // dns, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } return dispatched; } /*! * \brief Allow one of the inputs to be dense and produce a dense output, * for rsp inputs only support when both inputs are rsp type. * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ template<bool cpu_only, bool rsp, bool csr> static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2); CHECK_EQ(out_attrs->size(), 1); const auto lhs_stype = (*in_attrs)[0]; const auto rhs_stype = (*in_attrs)[1]; bool dispatched = false; const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns ... -> dns dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp, ... -> rsp dispatched = storage_type_assign(out_attrs, kRowSparseStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr, ... -> csr dispatched = storage_type_assign(out_attrs, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) || (lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) { // dense, csr -> dense / csr, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) { // dense, rsp -> dense / rsp, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched) { dispatch_fallback(out_attrs, dispatch_mode); } return true; } /*! * \brief Backward pass computing input gradient using forward inputs * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); template<typename xpu, typename OP> static void ComputeInt(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename OP> static void Compute_(const nnvm::NodeAttrs &attrs, mshadow::Stream<cpu> *s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); if (outputs[0].type_flag_ == mshadow::kBool) { LOG(FATAL) << "Operator " << attrs.op->name << " does not support boolean type"; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } #if MXNET_USE_CUDA template<typename OP> static void Compute_(const nnvm::NodeAttrs &attrs, mshadow::Stream<gpu> *s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs); #endif template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { if (req[0] == kNullOp) return; mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); Compute_<OP>(attrs, s, inputs, req, outputs); } template<typename xpu, typename OP> static void MixedUnaryBackwardUseInCompute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); if (mxnet::common::is_int(outputs[0].type_flag_) || outputs[0].type_flag_ == mshadow::kBool) { LOG(FATAL) << "gradient computation of operator " << attrs.op->name << " for " << mshadow::dtype_string(outputs[0].type_flag_) << " type is not supported"; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void MixedUnaryBackwardUseInOutCompute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 1U); if (mxnet::common::is_int(outputs[0].type_flag_) || outputs[0].type_flag_ == mshadow::kBool) { LOG(FATAL) << "gradient computation of operator " << attrs.op->name << " for " << mshadow::dtype_string(outputs[0].type_flag_) << " type is not supported"; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[2].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[2].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void ComputeWithBool(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void ComputeLogic(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) && (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) { // rsp, rsp -> rsp // rsp, rsp -> dns RspRspOp<OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false); } else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) { // csr, csr -> csr CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]); } else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kCSRStorage); DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse); } else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kRowSparseStorage); const NDArray& rsp = (reverse)? inputs[0] : inputs[1]; DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } /*! \brief ComputeEx allowing dense lvalue and/or rvalue */ template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense> static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) && ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && lhs_may_be_dense && rhs_may_be_dense) { // rsp, rsp -> rsp // rsp, rsp -> dns // rsp, dns -> rsp // dns, rsp -> rsp // More than once dense not allowed (this will be checked in RspRspOp): // rsp, dns -> dns <-- NOT ALLOWED // dns, rsp -> dns <-- NOT ALLOWED mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); RspRspOp<OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs); } else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) && out_stype == kCSRStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kCSRStorage); DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); BackwardUseNone_<LOP, ROP>(attrs, s, inputs, req, outputs); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 1U); // output grad CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto in_stype = inputs[0].storage_type(); const auto lhs_stype = outputs[0].storage_type(); const auto rhs_stype = outputs[1].storage_type(); // lhs grad if (req[0] != kNullOp) { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output DCHECK_LT(std::fabs(static_cast<float>(LOP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } // rhs grad if (req[1] != kNullOp) { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output DCHECK_LT(std::fabs(static_cast<float>(ROP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); BackwardUseIn_<LOP, ROP>(attrs, s, inputs, req, outputs); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false> static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto out_grad_stype = inputs[0].storage_type(); const auto lhs_grad_stype = outputs[0].storage_type(); const auto rhs_grad_stype = outputs[1].storage_type(); if (ContainsOnlyStorage(inputs, kRowSparseStorage) && (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) && (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) { // rsp, rsp, rsp -> [dns, rsp], [dns, rsp] RspRspOpBackward<xpu, LOP, ROP, in0_ok_dense, in1_ok_dense, in2_ok_dense>( attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>); } if (((lhs_grad_stype == kDefaultStorage && rhs_grad_stype == kCSRStorage) || (lhs_grad_stype == kCSRStorage && rhs_grad_stype == kDefaultStorage)) && out_grad_stype == kDefaultStorage) { // dns, csr, dns -> [csr, dns] / csr, dns, dns -> [dns, csr] DnsCsrCsrOpBackward<xpu, LOP, ROP>(attrs, ctx, inputs, req, outputs); } } }; // class ElemwiseBinaryOp /*! \brief Binary launch */ #define MXNET_OPERATOR_REGISTER_BINARY(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(2) \ .set_num_outputs(1) \ .set_attr<nnvm::FListInputNames>("FListInputNames", \ [](const NodeAttrs& attrs) { \ return std::vector<std::string>{"lhs", "rhs"}; \ }) \ .set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \ }) \ .add_argument("lhs", "NDArray-or-Symbol", "first input") \ .add_argument("rhs", "NDArray-or-Symbol", "second input") /*! \brief Binary launch, with FComputeEx for csr and rsp available */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseStorageType<2, 1, true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, with FComputeEx for csr and rsp available. when inputs contain both sparse and dense, sparse output is preferred. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferSparseStorageType) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, dense result * FInferStorageType attr is not set using this macro. * By default DefaultStorageType is used. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::SparseSparseWithDenseResult) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) /*! \brief Binary launch, with FComputeEx for prefer dense */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) } // namespace op } // namespace mxnet #ifdef __CUDACC__ #include "elemwise_binary_op.cuh" #endif // __CUDACC__ #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
par_csr_matrix.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_ParCSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "../seq_mv/HYPRE_seq_mv.h" #include "../seq_mv/csr_matrix.h" /* In addition to publically accessible interface in HYPRE_mv.h, the implementation in this file uses accessor macros into the sequential matrix structure, and so includes the .h that defines that structure. Should those accessor functions become proper functions at some later date, this will not be necessary. AJC 4/99 */ HYPRE_Int hypre_FillResponseParToCSRMatrix(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*); /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCreate *--------------------------------------------------------------------------*/ /* If create is called and row_starts and col_starts are NOT null, then it is assumed that they are of length 2 containing the start row of the calling processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParCSRMatrix* hypre_ParCSRMatrixCreate( MPI_Comm comm, HYPRE_BigInt global_num_rows, HYPRE_BigInt global_num_cols, HYPRE_BigInt *row_starts_in, HYPRE_BigInt *col_starts_in, HYPRE_Int num_cols_offd, HYPRE_Int num_nonzeros_diag, HYPRE_Int num_nonzeros_offd ) { hypre_ParCSRMatrix *matrix; HYPRE_Int num_procs, my_id; HYPRE_Int local_num_rows; HYPRE_Int local_num_cols; HYPRE_BigInt row_starts[2]; HYPRE_BigInt col_starts[2]; HYPRE_BigInt first_row_index, first_col_diag; matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); if (!row_starts_in) { hypre_GenerateLocalPartitioning(global_num_rows, num_procs, my_id, row_starts); } else { row_starts[0] = row_starts_in[0]; row_starts[1] = row_starts_in[1]; } if (!col_starts_in) { hypre_GenerateLocalPartitioning(global_num_cols, num_procs, my_id, col_starts); } else { col_starts[0] = col_starts_in[0]; col_starts[1] = col_starts_in[1]; } /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = row_starts[0]; local_num_rows = row_starts[1] - first_row_index; first_col_diag = col_starts[0]; local_num_cols = col_starts[1] - first_col_diag; hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixDiag(matrix) = hypre_CSRMatrixCreate(local_num_rows, local_num_cols, num_nonzeros_diag); hypre_ParCSRMatrixOffd(matrix) = hypre_CSRMatrixCreate(local_num_rows, num_cols_offd, num_nonzeros_offd); hypre_ParCSRMatrixDiagT(matrix) = NULL; hypre_ParCSRMatrixOffdT(matrix) = NULL; // JSP: transposed matrices are optional hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixGlobalNumRownnz(matrix) = global_num_rows; hypre_ParCSRMatrixFirstRowIndex(matrix) = first_row_index; hypre_ParCSRMatrixFirstColDiag(matrix) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(matrix) = first_row_index + local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(matrix) = first_col_diag + local_num_cols - 1; hypre_ParCSRMatrixRowStarts(matrix)[0] = row_starts[0]; hypre_ParCSRMatrixRowStarts(matrix)[1] = row_starts[1]; hypre_ParCSRMatrixColStarts(matrix)[0] = col_starts[0]; hypre_ParCSRMatrixColStarts(matrix)[1] = col_starts[1]; hypre_ParCSRMatrixColMapOffd(matrix) = NULL; hypre_ParCSRMatrixDeviceColMapOffd(matrix) = NULL; hypre_ParCSRMatrixProcOrdering(matrix) = NULL; hypre_ParCSRMatrixAssumedPartition(matrix) = NULL; hypre_ParCSRMatrixOwnsAssumedPartition(matrix) = 1; hypre_ParCSRMatrixCommPkg(matrix) = NULL; hypre_ParCSRMatrixCommPkgT(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixRowindices(matrix) = NULL; hypre_ParCSRMatrixRowvalues(matrix) = NULL; hypre_ParCSRMatrixGetrowactive(matrix) = 0; matrix->bdiaginv = NULL; matrix->bdiaginv_comm_pkg = NULL; matrix->bdiag_size = -1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParCSRMatrixSocDiagJ(matrix) = NULL; hypre_ParCSRMatrixSocOffdJ(matrix) = NULL; #endif return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixDestroy( hypre_ParCSRMatrix *matrix ) { if (matrix) { HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(matrix); if ( hypre_ParCSRMatrixOwnsData(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(matrix)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(matrix)); if ( hypre_ParCSRMatrixDiagT(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiagT(matrix)); } if ( hypre_ParCSRMatrixOffdT(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffdT(matrix)); } if (hypre_ParCSRMatrixColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixColMapOffd(matrix), HYPRE_MEMORY_HOST); } if (hypre_ParCSRMatrixDeviceColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixDeviceColMapOffd(matrix), HYPRE_MEMORY_DEVICE); } if (hypre_ParCSRMatrixCommPkg(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(matrix)); } if (hypre_ParCSRMatrixCommPkgT(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkgT(matrix)); } } /* RL: this is actually not correct since the memory_location may have been changed after allocation * put them in containers TODO */ hypre_TFree(hypre_ParCSRMatrixRowindices(matrix), memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(matrix), memory_location); if ( hypre_ParCSRMatrixAssumedPartition(matrix) && hypre_ParCSRMatrixOwnsAssumedPartition(matrix) ) { hypre_AssumedPartitionDestroy(hypre_ParCSRMatrixAssumedPartition(matrix)); } if ( hypre_ParCSRMatrixProcOrdering(matrix) ) { hypre_TFree(hypre_ParCSRMatrixProcOrdering(matrix), HYPRE_MEMORY_HOST); } hypre_TFree(matrix->bdiaginv, HYPRE_MEMORY_HOST); if (matrix->bdiaginv_comm_pkg) { hypre_MatvecCommPkgDestroy(matrix->bdiaginv_comm_pkg); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_TFree(hypre_ParCSRMatrixSocDiagJ(matrix), HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRMatrixSocOffdJ(matrix), HYPRE_MEMORY_DEVICE); #endif hypre_TFree(matrix, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixInitialize_v2( hypre_ParCSRMatrix *matrix, HYPRE_MemoryLocation memory_location ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixDiag(matrix), 0, memory_location); hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixOffd(matrix), 0, memory_location); hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)), HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixInitialize( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixInitialize_v2(matrix, hypre_ParCSRMatrixMemoryLocation(matrix)); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixClone * Creates and returns a new copy S of the argument A * The following variables are not copied because they will be constructed * later if needed: CommPkg, CommPkgT, rowindices, rowvalues *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParCSRMatrixClone_v2(hypre_ParCSRMatrix *A, HYPRE_Int copy_data, HYPRE_MemoryLocation memory_location) { hypre_ParCSRMatrix *S; S = hypre_ParCSRMatrixCreate( hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)) ); hypre_ParCSRMatrixNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixInitialize_v2(S, memory_location); hypre_ParCSRMatrixCopy(A, S, copy_data); return S; } hypre_ParCSRMatrix* hypre_ParCSRMatrixClone(hypre_ParCSRMatrix *A, HYPRE_Int copy_data) { return hypre_ParCSRMatrixClone_v2(A, copy_data, hypre_ParCSRMatrixMemoryLocation(A)); } HYPRE_Int hypre_ParCSRMatrixMigrate(hypre_ParCSRMatrix *A, HYPRE_MemoryLocation memory_location) { if (!A) { return hypre_error_flag; } HYPRE_MemoryLocation old_memory_location = hypre_ParCSRMatrixMemoryLocation(A); if ( hypre_GetActualMemLocation(memory_location) != hypre_GetActualMemLocation( old_memory_location) ) { hypre_CSRMatrix *A_diag = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixDiag(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(A)); hypre_ParCSRMatrixDiag(A) = A_diag; hypre_CSRMatrix *A_offd = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixOffd(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(A)); hypre_ParCSRMatrixOffd(A) = A_offd; hypre_TFree(hypre_ParCSRMatrixRowindices(A), old_memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(A), old_memory_location); } else { hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) = memory_location; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(A)) = memory_location; } return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros_core( hypre_ParCSRMatrix *matrix, const char* format ) { MPI_Comm comm; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); /* TODO in HYPRE_DEBUG ? */ hypre_CSRMatrixCheckSetNumNonzeros(diag); hypre_CSRMatrixCheckSetNumNonzeros(offd); if (format[0] == 'I') { HYPRE_BigInt total_num_nonzeros; HYPRE_BigInt local_num_nonzeros; local_num_nonzeros = (HYPRE_BigInt) ( hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd) ); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixNumNonzeros(matrix) = total_num_nonzeros; } else if (format[0] == 'D') { HYPRE_Real total_num_nonzeros; HYPRE_Real local_num_nonzeros; local_num_nonzeros = (HYPRE_Real) ( hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd) ); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); hypre_ParCSRMatrixDNumNonzeros(matrix) = total_num_nonzeros; } else { hypre_error_in_arg(1); return hypre_error_flag; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Int"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDNumNonzeros( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Double"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumRownnz *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumRownnz( hypre_ParCSRMatrix *matrix ) { MPI_Comm comm = hypre_ParCSRMatrixComm(matrix); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_Int *rownnz_diag = hypre_CSRMatrixRownnz(diag); HYPRE_Int *rownnz_offd = hypre_CSRMatrixRownnz(offd); HYPRE_Int num_rownnz_diag = hypre_CSRMatrixNumRownnz(diag); HYPRE_Int num_rownnz_offd = hypre_CSRMatrixNumRownnz(offd); HYPRE_BigInt local_num_rownnz; HYPRE_BigInt global_num_rownnz; HYPRE_Int i, j; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } local_num_rownnz = i = j = 0; while (i < num_rownnz_diag && j < num_rownnz_offd) { local_num_rownnz++; if (rownnz_diag[i] < rownnz_offd[j]) { i++; } else { j++; } } local_num_rownnz += (HYPRE_BigInt) ((num_rownnz_diag - i) + (num_rownnz_offd - j)); hypre_MPI_Allreduce(&local_num_rownnz, &global_num_rownnz, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixGlobalNumRownnz(matrix) = global_num_rownnz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDataOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_data ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsData(matrix) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRead *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixRead( MPI_Comm comm, const char *file_name ) { hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_Int my_id, num_procs; HYPRE_Int num_cols_offd; HYPRE_Int i, local_num_rows; HYPRE_BigInt row_starts[2]; HYPRE_BigInt col_starts[2]; HYPRE_BigInt *col_map_offd; HYPRE_BigInt row_s, row_e, col_s, col_e; HYPRE_BigInt global_num_rows, global_num_cols; FILE *fp; char new_file_d[80], new_file_o[80], new_file_info[80]; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_file_d, "%s.D.%d", file_name, my_id); hypre_sprintf(new_file_o, "%s.O.%d", file_name, my_id); hypre_sprintf(new_file_info, "%s.INFO.%d", file_name, my_id); fp = fopen(new_file_info, "r"); hypre_fscanf(fp, "%b", &global_num_rows); hypre_fscanf(fp, "%b", &global_num_cols); hypre_fscanf(fp, "%d", &num_cols_offd); /* the bgl input file should only contain the EXACT range for local processor */ hypre_fscanf(fp, "%d %d %d %d", &row_s, &row_e, &col_s, &col_e); row_starts[0] = row_s; row_starts[1] = row_e; col_starts[0] = col_s; col_starts[1] = col_e; col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd; i++) { hypre_fscanf(fp, "%b", &col_map_offd[i]); } fclose(fp); diag = hypre_CSRMatrixRead(new_file_d); local_num_rows = hypre_CSRMatrixNumRows(diag); if (num_cols_offd) { offd = hypre_CSRMatrixRead(new_file_o); } else { offd = hypre_CSRMatrixCreate(local_num_rows, 0, 0); hypre_CSRMatrixInitialize_v2(offd, 0, HYPRE_MEMORY_HOST); } matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixFirstRowIndex(matrix) = row_s; hypre_ParCSRMatrixFirstColDiag(matrix) = col_s; hypre_ParCSRMatrixLastRowIndex(matrix) = row_e - 1; hypre_ParCSRMatrixLastColDiag(matrix) = col_e - 1; hypre_ParCSRMatrixRowStarts(matrix)[0] = row_starts[0]; hypre_ParCSRMatrixRowStarts(matrix)[1] = row_starts[1]; hypre_ParCSRMatrixColStarts(matrix)[0] = col_starts[0]; hypre_ParCSRMatrixColStarts(matrix)[1] = col_starts[1]; hypre_ParCSRMatrixCommPkg(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixDiag(matrix) = diag; hypre_ParCSRMatrixOffd(matrix) = offd; if (num_cols_offd) { hypre_ParCSRMatrixColMapOffd(matrix) = col_map_offd; } else { hypre_ParCSRMatrixColMapOffd(matrix) = NULL; } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrint( hypre_ParCSRMatrix *matrix, const char *file_name ) { MPI_Comm comm; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt *col_map_offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; FILE *fp; HYPRE_Int num_cols_offd = 0; HYPRE_BigInt row_s, row_e, col_s, col_e; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); global_num_rows = hypre_ParCSRMatrixGlobalNumRows(matrix); global_num_cols = hypre_ParCSRMatrixGlobalNumCols(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); if (hypre_ParCSRMatrixOffd(matrix)) { num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)); } hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_file_d, "%s.D.%d", file_name, my_id); hypre_sprintf(new_file_o, "%s.O.%d", file_name, my_id); hypre_sprintf(new_file_info, "%s.INFO.%d", file_name, my_id); hypre_CSRMatrixPrint(hypre_ParCSRMatrixDiag(matrix), new_file_d); if (num_cols_offd != 0) { hypre_CSRMatrixPrint(hypre_ParCSRMatrixOffd(matrix), new_file_o); } fp = fopen(new_file_info, "w"); hypre_fprintf(fp, "%b\n", global_num_rows); hypre_fprintf(fp, "%b\n", global_num_cols); hypre_fprintf(fp, "%d\n", num_cols_offd); row_s = hypre_ParCSRMatrixFirstRowIndex(matrix); row_e = hypre_ParCSRMatrixLastRowIndex(matrix); col_s = hypre_ParCSRMatrixFirstColDiag(matrix); col_e = hypre_ParCSRMatrixLastColDiag(matrix); /* add 1 to the ends because this is a starts partition */ hypre_fprintf(fp, "%b %b %b %b\n", row_s, row_e + 1, col_s, col_e + 1); for (i = 0; i < num_cols_offd; i++) { hypre_fprintf(fp, "%b\n", col_map_offd[i]); } fclose(fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrintIJ( const hypre_ParCSRMatrix *matrix, const HYPRE_Int base_i, const HYPRE_Int base_j, const char *filename ) { MPI_Comm comm; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_Int num_rows; const HYPRE_BigInt *row_starts; const HYPRE_BigInt *col_starts; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Int myid, num_procs, i, j; HYPRE_BigInt I, J; char new_filename[255]; FILE *file; HYPRE_Int num_nonzeros_offd; HYPRE_BigInt ilower, iupper, jlower, jupper; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); num_rows = hypre_ParCSRMatrixNumRows(matrix); row_starts = hypre_ParCSRMatrixRowStarts(matrix); col_starts = hypre_ParCSRMatrixColStarts(matrix); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error: can't open output file %s\n"); return hypre_error_flag; } diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_i = hypre_CSRMatrixI(offd); offd_j = hypre_CSRMatrixJ(offd); } ilower = row_starts[0] + (HYPRE_BigInt) base_i; iupper = row_starts[1] + (HYPRE_BigInt) base_i - 1; jlower = col_starts[0] + (HYPRE_BigInt) base_j; jupper = col_starts[1] + (HYPRE_BigInt) base_j - 1; hypre_fprintf(file, "%b %b %b %b\n", ilower, iupper, jlower, jupper); for (i = 0; i < num_rows; i++) { I = first_row_index + (HYPRE_BigInt)(i + base_i); /* print diag columns */ for (j = diag_i[i]; j < diag_i[i + 1]; j++) { J = first_col_diag + (HYPRE_BigInt)(diag_j[j] + base_j); if (diag_data) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(diag_data[j]), hypre_cimag(diag_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, diag_data[j]); #endif } else { hypre_fprintf(file, "%b %b\n", I, J); } } /* print offd columns */ if (num_nonzeros_offd) { for (j = offd_i[i]; j < offd_i[i + 1]; j++) { J = col_map_offd[offd_j[j]] + (HYPRE_BigInt) base_j; if (offd_data) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(offd_data[j]), hypre_cimag(offd_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, offd_data[j]); #endif } else { hypre_fprintf(file, "%b %b\n", I, J); } } } } fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixReadIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixReadIJ( MPI_Comm comm, const char *filename, HYPRE_Int *base_i_ptr, HYPRE_Int *base_j_ptr, hypre_ParCSRMatrix **matrix_ptr) { HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_BigInt row_starts[2]; HYPRE_BigInt col_starts[2]; HYPRE_Int num_rows; HYPRE_BigInt big_base_i, big_base_j; HYPRE_Int base_i, base_j; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *tmp_j; HYPRE_BigInt *aux_offd_j; HYPRE_BigInt I, J; HYPRE_Int myid, num_procs, i, i2, j; char new_filename[255]; FILE *file; HYPRE_Int num_cols_offd, num_nonzeros_diag, num_nonzeros_offd; HYPRE_Int i_col, num_cols; HYPRE_Int diag_cnt, offd_cnt, row_cnt; HYPRE_Complex data; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%b %b", &global_num_rows, &global_num_cols); hypre_fscanf(file, "%d %d %d", &num_rows, &num_cols, &num_cols_offd); hypre_fscanf(file, "%d %d", &num_nonzeros_diag, &num_nonzeros_offd); hypre_fscanf(file, "%b %b %b %b", &row_starts[0], &col_starts[0], &row_starts[1], &col_starts[1]); big_base_i = row_starts[0]; big_base_j = col_starts[0]; base_i = (HYPRE_Int) row_starts[0]; base_j = (HYPRE_Int) col_starts[0]; matrix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixInitialize(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); tmp_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); } first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); last_col_diag = first_col_diag + (HYPRE_BigInt)num_cols - 1; diag_cnt = 0; offd_cnt = 0; row_cnt = 0; for (i = 0; i < num_nonzeros_diag + num_nonzeros_offd; i++) { /* read values */ hypre_fscanf(file, "%b %b %le", &I, &J, &data); i2 = (HYPRE_Int)(I - big_base_i - first_row_index); J -= big_base_j; if (i2 > row_cnt) { diag_i[i2] = diag_cnt; offd_i[i2] = offd_cnt; row_cnt++; } if (J < first_col_diag || J > last_col_diag) { tmp_j[offd_cnt] = J; offd_data[offd_cnt++] = data; } else { diag_j[diag_cnt] = (HYPRE_Int)(J - first_col_diag); diag_data[diag_cnt++] = data; } } diag_i[num_rows] = diag_cnt; offd_i[num_rows] = offd_cnt; fclose(file); /* generate col_map_offd */ if (num_nonzeros_offd) { aux_offd_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_nonzeros_offd; i++) { aux_offd_j[i] = (HYPRE_BigInt)offd_j[i]; } hypre_BigQsort0(aux_offd_j, 0, num_nonzeros_offd - 1); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); col_map_offd[0] = aux_offd_j[0]; offd_cnt = 0; for (i = 1; i < num_nonzeros_offd; i++) { if (aux_offd_j[i] > col_map_offd[offd_cnt]) { col_map_offd[++offd_cnt] = aux_offd_j[i]; } } for (i = 0; i < num_nonzeros_offd; i++) { offd_j[i] = hypre_BigBinarySearch(col_map_offd, tmp_j[i], num_cols_offd); } hypre_TFree(aux_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); } /* move diagonal element in first position in each row */ for (i = 0; i < num_rows; i++) { i_col = diag_i[i]; for (j = i_col; j < diag_i[i + 1]; j++) { if (diag_j[j] == i) { diag_j[j] = diag_j[i_col]; data = diag_data[j]; diag_data[j] = diag_data[i_col]; diag_data[i_col] = data; diag_j[i_col] = i; break; } } } *base_i_ptr = base_i; *base_j_ptr = base_j; *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetLocalRange * returns the row numbers of the rows stored on this processor. * "End" is actually the row number of the last row on this processor. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetLocalRange( hypre_ParCSRMatrix *matrix, HYPRE_BigInt *row_start, HYPRE_BigInt *row_end, HYPRE_BigInt *col_start, HYPRE_BigInt *col_end ) { HYPRE_Int my_id; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(matrix), &my_id ); *row_start = hypre_ParCSRMatrixFirstRowIndex(matrix); *row_end = hypre_ParCSRMatrixLastRowIndex(matrix); *col_start = hypre_ParCSRMatrixFirstColDiag(matrix); *col_end = hypre_ParCSRMatrixLastColDiag(matrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetRow * Returns global column indices and/or values for a given row in the global * matrix. Global row number is used, but the row must be stored locally or * an error is returned. This implementation copies from the two matrices that * store the local data, storing them in the hypre_ParCSRMatrix structure. * Only a single row can be accessed via this function at any one time; the * corresponding RestoreRow function must be called, to avoid bleeding memory, * and to be able to look at another row. * Either one of col_ind and values can be left null, and those values will * not be returned. * All indices are returned in 0-based indexing, no matter what is used under * the hood. EXCEPTION: currently this only works if the local CSR matrices * use 0-based indexing. * This code, semantics, implementation, etc., are all based on PETSc's hypre_MPI_AIJ * matrix code, adjusted for our data and software structures. * AJC 4/99. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetRowHost( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { HYPRE_Int my_id; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return (-1); } hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(mat), &my_id ); hypre_ParCSRMatrixGetrowactive(mat) = 1; row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; if (row < row_start || row >= row_end) { return (-1); } /* if buffer is not allocated and some information is requested, allocate buffer */ if (!hypre_ParCSRMatrixRowvalues(mat) && ( col_ind || values )) { /* allocate enough space to hold information from the longest row. */ HYPRE_Int max = 1, tmp; HYPRE_Int i; HYPRE_Int m = row_end - row_start; for ( i = 0; i < m; i++ ) { tmp = hypre_CSRMatrixI(Aa)[i + 1] - hypre_CSRMatrixI(Aa)[i] + hypre_CSRMatrixI(Ba)[i + 1] - hypre_CSRMatrixI(Ba)[i]; if (max < tmp) { max = tmp; } } hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_CTAlloc(HYPRE_Complex, max, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_CTAlloc(HYPRE_BigInt, max, hypre_ParCSRMatrixMemoryLocation(mat)); } /* Copy from dual sequential matrices into buffer */ { HYPRE_Complex *vworkA, *vworkB, *v_p; HYPRE_Int i, *cworkA, *cworkB; HYPRE_BigInt cstart = hypre_ParCSRMatrixFirstColDiag(mat); HYPRE_Int nztot, nzA, nzB, lrow = (HYPRE_Int)(row - row_start); HYPRE_BigInt *cmap, *idx_p; nzA = hypre_CSRMatrixI(Aa)[lrow + 1] - hypre_CSRMatrixI(Aa)[lrow]; cworkA = &( hypre_CSRMatrixJ(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] ); vworkA = &( hypre_CSRMatrixData(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] ); nzB = hypre_CSRMatrixI(Ba)[lrow + 1] - hypre_CSRMatrixI(Ba)[lrow]; cworkB = &( hypre_CSRMatrixJ(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] ); vworkB = &( hypre_CSRMatrixData(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] ); nztot = nzA + nzB; cmap = hypre_ParCSRMatrixColMapOffd(mat); if (values || col_ind) { if (nztot) { /* Sort by increasing column numbers, assuming A and B already sorted */ HYPRE_Int imark = -1; if (values) { *values = v_p = hypre_ParCSRMatrixRowvalues(mat); for ( i = 0; i < nzB; i++ ) { if (cmap[cworkB[i]] < cstart) { v_p[i] = vworkB[i]; } else { break; } } imark = i; for ( i = 0; i < nzA; i++ ) { v_p[imark + i] = vworkA[i]; } for ( i = imark; i < nzB; i++ ) { v_p[nzA + i] = vworkB[i]; } } if (col_ind) { *col_ind = idx_p = hypre_ParCSRMatrixRowindices(mat); if (imark > -1) { for ( i = 0; i < imark; i++ ) { idx_p[i] = cmap[cworkB[i]]; } } else { for ( i = 0; i < nzB; i++ ) { if (cmap[cworkB[i]] < cstart) { idx_p[i] = cmap[cworkB[i]]; } else { break; } } imark = i; } for ( i = 0; i < nzA; i++ ) { idx_p[imark + i] = cstart + cworkA[i]; } for ( i = imark; i < nzB; i++ ) { idx_p[nzA + i] = cmap[cworkB[i]]; } } } else { if (col_ind) { *col_ind = 0; } if (values) { *values = 0; } } } *size = nztot; } /* End of copy */ return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixGetRow( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(mat) ); if (exec == HYPRE_EXEC_DEVICE) { return hypre_ParCSRMatrixGetRowDevice(mat, row, size, col_ind, values); } else #endif { return hypre_ParCSRMatrixGetRowHost(mat, row, size, col_ind, values); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRestoreRow *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixRestoreRow( hypre_ParCSRMatrix *matrix, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { if (!hypre_ParCSRMatrixGetrowactive(matrix)) { hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } hypre_ParCSRMatrixGetrowactive(matrix) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixToParCSRMatrix: * * Generates a ParCSRMatrix distributed across the processors in comm * from a CSRMatrix on proc 0 . * *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_CSRMatrixToParCSRMatrix( MPI_Comm comm, hypre_CSRMatrix *A, HYPRE_BigInt *global_row_starts, HYPRE_BigInt *global_col_starts ) { hypre_ParCSRMatrix *parcsr_A; HYPRE_BigInt *global_data; HYPRE_BigInt global_size; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_Int num_procs, my_id; HYPRE_Int *num_rows_proc; HYPRE_Int *num_nonzeros_proc; HYPRE_BigInt row_starts[2]; HYPRE_BigInt col_starts[2]; hypre_CSRMatrix *local_A; HYPRE_Complex *A_data; HYPRE_Int *A_i; HYPRE_Int *A_j; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; hypre_MPI_Datatype *csr_matrix_datatypes; HYPRE_Int free_global_row_starts = 0; HYPRE_Int free_global_col_starts = 0; HYPRE_Int total_size; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; HYPRE_Int num_rows; HYPRE_Int num_nonzeros; HYPRE_Int i, ind; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); total_size = 4; if (my_id == 0) { total_size += 2 * (num_procs + 1); } global_data = hypre_CTAlloc(HYPRE_BigInt, total_size, HYPRE_MEMORY_HOST); if (my_id == 0) { global_size = 3; if (global_row_starts) { if (global_col_starts) { if (global_col_starts != global_row_starts) { /* contains code for what to expect, if 0: global_row_starts = global_col_starts, only global_row_starts given if 1: only global_row_starts given, global_col_starts = NULL if 2: both global_row_starts and global_col_starts given if 3: only global_col_starts given, global_row_starts = NULL */ global_data[3] = 2; global_size += (HYPRE_BigInt) (2 * (num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } for (i = 0; i < (num_procs + 1); i++) { global_data[i + num_procs + 5] = global_col_starts[i]; } } else { global_data[3] = 0; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } } } else { global_data[3] = 1; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } } } else { if (global_col_starts) { global_data[3] = 3; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_col_starts[i]; } } } global_data[0] = (HYPRE_BigInt) hypre_CSRMatrixNumRows(A); global_data[1] = (HYPRE_BigInt) hypre_CSRMatrixNumCols(A); global_data[2] = global_size; A_data = hypre_CSRMatrixData(A); A_i = hypre_CSRMatrixI(A); A_j = hypre_CSRMatrixJ(A); } hypre_MPI_Bcast(global_data, 3, HYPRE_MPI_BIG_INT, 0, comm); global_num_rows = global_data[0]; global_num_cols = global_data[1]; global_size = global_data[2]; if (global_size > 3) { HYPRE_Int send_start; if (global_data[3] == 2) { send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 4 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } else if ((global_data[3] == 0) || (global_data[3] == 1)) { send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); if (global_data[3] == 0) { col_starts[0] = row_starts[0]; col_starts[1] = row_starts[1]; } } else { send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } } hypre_TFree(global_data, HYPRE_MEMORY_HOST); // Create ParCSR matrix parcsr_A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, 0, 0, 0); // Allocate memory for building ParCSR matrix num_rows_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); num_nonzeros_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { if (!global_row_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_row_starts); free_global_row_starts = 1; } if (!global_col_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_col_starts); free_global_col_starts = 1; } for (i = 0; i < num_procs; i++) { num_rows_proc[i] = (HYPRE_Int) (global_row_starts[i + 1] - global_row_starts[i]); num_nonzeros_proc[i] = A_i[(HYPRE_Int)global_row_starts[i + 1]] - A_i[(HYPRE_Int)global_row_starts[i]]; } //num_nonzeros_proc[num_procs-1] = A_i[(HYPRE_Int)global_num_rows] - A_i[(HYPRE_Int)row_starts[num_procs-1]]; } hypre_MPI_Scatter(num_rows_proc, 1, HYPRE_MPI_INT, &num_rows, 1, HYPRE_MPI_INT, 0, comm); hypre_MPI_Scatter(num_nonzeros_proc, 1, HYPRE_MPI_INT, &num_nonzeros, 1, HYPRE_MPI_INT, 0, comm); /* RL: this is not correct: (HYPRE_Int) global_num_cols */ local_A = hypre_CSRMatrixCreate(num_rows, (HYPRE_Int) global_num_cols, num_nonzeros); csr_matrix_datatypes = hypre_CTAlloc(hypre_MPI_Datatype, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { requests = hypre_CTAlloc(hypre_MPI_Request, num_procs - 1, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_procs - 1, HYPRE_MEMORY_HOST); for (i = 1; i < num_procs; i++) { ind = A_i[(HYPRE_Int) global_row_starts[i]]; hypre_BuildCSRMatrixMPIDataType(num_nonzeros_proc[i], num_rows_proc[i], &A_data[ind], &A_i[(HYPRE_Int) global_row_starts[i]], &A_j[ind], &csr_matrix_datatypes[i]); hypre_MPI_Isend(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[i], i, 0, comm, &requests[i - 1]); hypre_MPI_Type_free(&csr_matrix_datatypes[i]); } hypre_CSRMatrixData(local_A) = A_data; hypre_CSRMatrixI(local_A) = A_i; hypre_CSRMatrixJ(local_A) = A_j; hypre_CSRMatrixOwnsData(local_A) = 0; hypre_MPI_Waitall(num_procs - 1, requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(num_rows_proc, HYPRE_MEMORY_HOST); hypre_TFree(num_nonzeros_proc, HYPRE_MEMORY_HOST); if (free_global_row_starts) { hypre_TFree(global_row_starts, HYPRE_MEMORY_HOST); } if (free_global_col_starts) { hypre_TFree(global_col_starts, HYPRE_MEMORY_HOST); } } else { hypre_CSRMatrixInitialize(local_A); hypre_BuildCSRMatrixMPIDataType(num_nonzeros, num_rows, hypre_CSRMatrixData(local_A), hypre_CSRMatrixI(local_A), hypre_CSRMatrixJ(local_A), &csr_matrix_datatypes[0]); hypre_MPI_Recv(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[0], 0, 0, comm, &status0); hypre_MPI_Type_free(csr_matrix_datatypes); } first_col_diag = hypre_ParCSRMatrixFirstColDiag(parcsr_A); last_col_diag = hypre_ParCSRMatrixLastColDiag(parcsr_A); GenerateDiagAndOffd(local_A, parcsr_A, first_col_diag, last_col_diag); /* set pointers back to NULL before destroying */ if (my_id == 0) { hypre_CSRMatrixData(local_A) = NULL; hypre_CSRMatrixI(local_A) = NULL; hypre_CSRMatrixJ(local_A) = NULL; } hypre_CSRMatrixDestroy(local_A); hypre_TFree(csr_matrix_datatypes, HYPRE_MEMORY_HOST); return parcsr_A; } /* RL: XXX this is not a scalable routine, see `marker' therein */ HYPRE_Int GenerateDiagAndOffd(hypre_CSRMatrix *A, hypre_ParCSRMatrix *matrix, HYPRE_BigInt first_col_diag, HYPRE_BigInt last_col_diag) { HYPRE_Int i, j; HYPRE_Int jo, jd; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); HYPRE_Complex *a_data = hypre_CSRMatrixData(A); HYPRE_Int *a_i = hypre_CSRMatrixI(A); /*RL: XXX FIXME if A spans global column space, the following a_j should be bigJ */ HYPRE_Int *a_j = hypre_CSRMatrixJ(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_BigInt *col_map_offd; HYPRE_Complex *diag_data, *offd_data; HYPRE_Int *diag_i, *offd_i; HYPRE_Int *diag_j, *offd_j; HYPRE_Int *marker; HYPRE_Int num_cols_diag, num_cols_offd; HYPRE_Int first_elmt = a_i[0]; HYPRE_Int num_nonzeros = a_i[num_rows] - first_elmt; HYPRE_Int counter; num_cols_diag = (HYPRE_Int)(last_col_diag - first_col_diag + 1); num_cols_offd = 0; HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A); if (num_cols - num_cols_diag) { hypre_CSRMatrixInitialize_v2(diag, 0, memory_location); diag_i = hypre_CSRMatrixI(diag); hypre_CSRMatrixInitialize_v2(offd, 0, memory_location); offd_i = hypre_CSRMatrixI(offd); marker = hypre_CTAlloc(HYPRE_Int, num_cols, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols; i++) { marker[i] = 0; } jo = 0; jd = 0; for (i = 0; i < num_rows; i++) { offd_i[i] = jo; diag_i[i] = jd; for (j = a_i[i] - first_elmt; j < a_i[i + 1] - first_elmt; j++) { if (a_j[j] < first_col_diag || a_j[j] > last_col_diag) { if (!marker[a_j[j]]) { marker[a_j[j]] = 1; num_cols_offd++; } jo++; } else { jd++; } } } offd_i[num_rows] = jo; diag_i[num_rows] = jd; hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); counter = 0; for (i = 0; i < num_cols; i++) { if (marker[i]) { col_map_offd[counter] = (HYPRE_BigInt) i; marker[i] = counter; counter++; } } hypre_CSRMatrixNumNonzeros(diag) = jd; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_j = hypre_CSRMatrixJ(diag); hypre_CSRMatrixNumNonzeros(offd) = jo; hypre_CSRMatrixNumCols(offd) = num_cols_offd; hypre_CSRMatrixInitialize(offd); offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); jo = 0; jd = 0; for (i = 0; i < num_rows; i++) { for (j = a_i[i] - first_elmt; j < a_i[i + 1] - first_elmt; j++) { if (a_j[j] < (HYPRE_Int)first_col_diag || a_j[j] > (HYPRE_Int)last_col_diag) { offd_data[jo] = a_data[j]; offd_j[jo++] = marker[a_j[j]]; } else { diag_data[jd] = a_data[j]; diag_j[jd++] = (HYPRE_Int)(a_j[j] - first_col_diag); } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); } else { hypre_CSRMatrixNumNonzeros(diag) = num_nonzeros; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); for (i = 0; i < num_nonzeros; i++) { diag_data[i] = a_data[i]; diag_j[i] = a_j[i]; } offd_i = hypre_CTAlloc(HYPRE_Int, num_rows + 1, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows + 1; i++) { diag_i[i] = a_i[i]; offd_i[i] = 0; } hypre_CSRMatrixNumCols(offd) = 0; hypre_CSRMatrixI(offd) = offd_i; } return hypre_error_flag; } hypre_CSRMatrix * hypre_MergeDiagAndOffd(hypre_ParCSRMatrix *par_matrix) { hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); hypre_CSRMatrix *matrix; HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(par_matrix); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(diag); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int *matrix_i; HYPRE_BigInt *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int num_nonzeros, i, j; HYPRE_Int count; HYPRE_Int size, rest, num_threads, ii; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(par_matrix); num_nonzeros = diag_i[num_rows] + offd_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = memory_location; hypre_CSRMatrixBigInitialize(matrix); matrix_i = hypre_CSRMatrixI(matrix); matrix_j = hypre_CSRMatrixBigJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); num_threads = hypre_NumThreads(); size = num_rows / num_threads; rest = num_rows - size * num_threads; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { HYPRE_Int ns, ne; if (ii < rest) { ns = ii * size + ii; ne = (ii + 1) * size + ii + 1; } else { ns = ii * size + rest; ne = (ii + 1) * size + rest; } count = diag_i[ns] + offd_i[ns];; for (i = ns; i < ne; i++) { matrix_i[i] = count; for (j = diag_i[i]; j < diag_i[i + 1]; j++) { matrix_data[count] = diag_data[j]; matrix_j[count++] = (HYPRE_BigInt)diag_j[j] + first_col_diag; } for (j = offd_i[i]; j < offd_i[i + 1]; j++) { matrix_data[count] = offd_data[j]; matrix_j[count++] = col_map_offd[offd_j[j]]; } } } /* end parallel region */ matrix_i[num_rows] = num_nonzeros; return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixToCSRMatrixAll: * generates a CSRMatrix from a ParCSRMatrix on all processors that have * parts of the ParCSRMatrix * Warning: this only works for a ParCSRMatrix that is smaller than 2^31-1 *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixToCSRMatrixAll(hypre_ParCSRMatrix *par_matrix) { MPI_Comm comm = hypre_ParCSRMatrixComm(par_matrix); hypre_CSRMatrix *matrix; hypre_CSRMatrix *local_matrix; HYPRE_Int num_rows = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(par_matrix); HYPRE_Int num_cols = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_Int *matrix_i; HYPRE_Int *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int *local_matrix_i; HYPRE_Int *local_matrix_j; HYPRE_Complex *local_matrix_data; HYPRE_Int i, j; HYPRE_Int local_num_rows; HYPRE_Int local_num_nonzeros; HYPRE_Int num_nonzeros; HYPRE_Int num_data; HYPRE_Int num_requests; HYPRE_Int vec_len, offset; HYPRE_Int start_index; HYPRE_Int proc_id; HYPRE_Int num_procs, my_id; HYPRE_Int num_types; HYPRE_Int *used_procs; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf = NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 11112, tag2 = 22223, tag3 = 33334; HYPRE_Int start; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); local_num_rows = (HYPRE_Int)(hypre_ParCSRMatrixLastRowIndex(par_matrix) - hypre_ParCSRMatrixFirstRowIndex(par_matrix) + 1); local_matrix = hypre_MergeDiagAndOffd(par_matrix); /* creates matrix */ hypre_CSRMatrixBigJtoJ(local_matrix); /* copies big_j to j */ local_matrix_i = hypre_CSRMatrixI(local_matrix); local_matrix_j = hypre_CSRMatrixJ(local_matrix); local_matrix_data = hypre_CSRMatrixData(local_matrix); /* determine procs that have vector data and store their ids in used_procs */ /* we need to do an exchange data for this. If I own row then I will contact processor 0 with the endpoint of my local range */ if (local_num_rows > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = (HYPRE_Int)hypre_ParCSRMatrixLastRowIndex(par_matrix); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /*build the response object*/ /*send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToCSRMatrix; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void**) &response_recv_buf, &response_recv_buf_starts); /* now processor 0 should have a list of ranges for processors that have rows - these are in send_proc_obj - it needs to create the new list of processors and also an array of vec starts - and send to those who own row*/ if (my_id) { if (local_num_rows) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types + 1, HYPRE_MEMORY_HOST); for (i = 1; i <= num_types; i++) { used_procs[i - 1] = send_info[i]; } for (i = num_types + 1; i < count; i++) { new_vec_starts[i - num_types - 1] = send_info[i] ; } } else /* clean up and exit */ { hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); if (response_recv_buf) { hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); } if (response_recv_buf_starts) { hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); } if (hypre_CSRMatrixOwnsData(local_matrix)) { hypre_CSRMatrixDestroy(local_matrix); } else { hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); } return NULL; } } else /* my_id ==0 */ { num_types = send_proc_obj.length; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types + 1, HYPRE_MEMORY_HOST); new_vec_starts[0] = 0; for (i = 0; i < num_types; i++) { used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i + 1] = send_proc_obj.elements[i] + 1; } hypre_qsort0(used_procs, 0, num_types - 1); hypre_qsort0(new_vec_starts, 0, num_types); /*now we need to put into an array to send */ count = 2 * num_types + 2; send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); send_info[0] = num_types; for (i = 1; i <= num_types; i++) { send_info[i] = (HYPRE_BigInt)used_procs[i - 1]; } for (i = num_types + 1; i < count; i++) { send_info[i] = new_vec_starts[i - num_types - 1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST); /* don't send to myself - these are sorted so my id would be first*/ start = 0; if (num_types && used_procs[0] == 0) { start = 1; } for (i = start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i - start]); } hypre_MPI_Waitall(num_types - start, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); hypre_TFree(send_info, HYPRE_MEMORY_HOST); if (response_recv_buf) { hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); } if (response_recv_buf_starts) { hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); } /* now proc 0 can exit if it has no rows */ if (!local_num_rows) { if (hypre_CSRMatrixOwnsData(local_matrix)) { hypre_CSRMatrixDestroy(local_matrix); } else { hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); } hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); return NULL; } /* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */ /* this matrix should be rather small */ matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows + 1, HYPRE_MEMORY_HOST); num_requests = 4 * num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* exchange contents of local_matrix_i - here we are sending to ourself also*/ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int)(new_vec_starts[i + 1] - new_vec_starts[i]); hypre_MPI_Irecv(&matrix_i[new_vec_starts[i] + 1], vec_len, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } hypre_MPI_Waitall(j, requests, status); /* generate matrix_i from received data */ /* global numbering?*/ offset = matrix_i[new_vec_starts[1]]; for (i = 1; i < num_types; i++) { for (j = new_vec_starts[i]; j < new_vec_starts[i + 1]; j++) { matrix_i[j + 1] += offset; } offset = matrix_i[new_vec_starts[i + 1]]; } num_nonzeros = matrix_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(matrix) = matrix_i; hypre_CSRMatrixInitialize(matrix); matrix_j = hypre_CSRMatrixJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); /* generate datatypes for further data exchange and exchange remaining data, i.e. column info and actual data */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; start_index = matrix_i[(HYPRE_Int)new_vec_starts[i]]; num_data = matrix_i[(HYPRE_Int)new_vec_starts[i + 1]] - start_index; hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } local_num_nonzeros = local_matrix_i[local_num_rows]; for (i = 0; i < num_types; i++) { hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); if (hypre_CSRMatrixOwnsData(local_matrix)) { hypre_CSRMatrixDestroy(local_matrix); } else { hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); } if (num_requests) { hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCopy, * copies B to A, * if copy_data = 0, only the structure of A is copied to B * the routine does not check whether the dimensions of A and B are compatible *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixCopy( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Int copy_data ) { hypre_CSRMatrix *A_diag; hypre_CSRMatrix *A_offd; HYPRE_BigInt *col_map_offd_A; hypre_CSRMatrix *B_diag; hypre_CSRMatrix *B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int num_cols_offd_A; HYPRE_Int num_cols_offd_B; if (!A) { hypre_error_in_arg(1); return hypre_error_flag; } if (!B) { hypre_error_in_arg(1); return hypre_error_flag; } A_diag = hypre_ParCSRMatrixDiag(A); A_offd = hypre_ParCSRMatrixOffd(A); B_diag = hypre_ParCSRMatrixDiag(B); B_offd = hypre_ParCSRMatrixOffd(B); num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_assert(num_cols_offd_A == num_cols_offd_B); col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrixCopy(A_diag, B_diag, copy_data); hypre_CSRMatrixCopy(A_offd, B_offd, copy_data); /* should not happen if B has been initialized */ if (num_cols_offd_B && col_map_offd_B == NULL) { col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; } hypre_TMemcpy(col_map_offd_B, col_map_offd_A, HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToCSRMatrix * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToCSRMatrix( void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2; hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for ids*/ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length += 10; /*add space for 10 more processors*/ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/ /*send proc*/ send_proc_obj->id[count] = contact_proc; /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_BigInt, elength, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ for (i = 0; i < contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count + 1] = index; send_proc_obj->length++; /*output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixUnion * Creates and returns a new matrix whose elements are the union of A and B. * Data is not copied, only structural information is created. * A and B must have the same communicator, numbers and distributions of rows * and columns (they can differ in which row-column pairs are nonzero, thus * in which columns are in a offd block) *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixUnion( hypre_ParCSRMatrix * A, hypre_ParCSRMatrix * B ) { hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int my_id, p; MPI_Comm comm = hypre_ParCSRMatrixComm( A ); hypre_MPI_Comm_rank(comm, &my_id); C = hypre_CTAlloc( hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm( C ) = hypre_ParCSRMatrixComm( A ); hypre_ParCSRMatrixGlobalNumRows( C ) = hypre_ParCSRMatrixGlobalNumRows( A ); hypre_ParCSRMatrixGlobalNumCols( C ) = hypre_ParCSRMatrixGlobalNumCols( A ); hypre_ParCSRMatrixFirstRowIndex( C ) = hypre_ParCSRMatrixFirstRowIndex( A ); hypre_assert( hypre_ParCSRMatrixFirstRowIndex( B ) == hypre_ParCSRMatrixFirstRowIndex( A ) ); hypre_TMemcpy(hypre_ParCSRMatrixRowStarts(C), hypre_ParCSRMatrixRowStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColStarts(C), hypre_ParCSRMatrixColStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); for (p = 0; p < 2; ++p) hypre_assert( hypre_ParCSRMatrixColStarts(A)[p] == hypre_ParCSRMatrixColStarts(B)[p] ); hypre_ParCSRMatrixFirstColDiag( C ) = hypre_ParCSRMatrixFirstColDiag( A ); hypre_ParCSRMatrixLastRowIndex( C ) = hypre_ParCSRMatrixLastRowIndex( A ); hypre_ParCSRMatrixLastColDiag( C ) = hypre_ParCSRMatrixLastColDiag( A ); hypre_ParCSRMatrixDiag( C ) = hypre_CSRMatrixUnion( hypre_ParCSRMatrixDiag(A), hypre_ParCSRMatrixDiag(B), 0, 0, 0 ); hypre_ParCSRMatrixOffd( C ) = hypre_CSRMatrixUnion( hypre_ParCSRMatrixOffd(A), hypre_ParCSRMatrixOffd(B), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixColMapOffd(B), &col_map_offd_C ); hypre_ParCSRMatrixColMapOffd( C ) = col_map_offd_C; hypre_ParCSRMatrixCommPkg( C ) = NULL; hypre_ParCSRMatrixCommPkgT( C ) = NULL; hypre_ParCSRMatrixOwnsData( C ) = 1; /* SetNumNonzeros, SetDNumNonzeros are global, need hypre_MPI_Allreduce. I suspect, but don't know, that other parts of hypre do not assume that the correct values have been set. hypre_ParCSRMatrixSetNumNonzeros( C ); hypre_ParCSRMatrixSetDNumNonzeros( C );*/ hypre_ParCSRMatrixNumNonzeros( C ) = 0; hypre_ParCSRMatrixDNumNonzeros( C ) = 0.0; hypre_ParCSRMatrixRowindices( C ) = NULL; hypre_ParCSRMatrixRowvalues( C ) = NULL; hypre_ParCSRMatrixGetrowactive( C ) = 0; return C; } /* Perform dual truncation of ParCSR matrix. * This code is adapted from original BoomerAMGInterpTruncate() * A: parCSR matrix to be modified * tol: relative tolerance or truncation factor for dropping small terms * max_row_elmts: maximum number of (largest) nonzero elements to keep. * rescale: Boolean on whether or not to scale resulting matrix. Scaling for * each row satisfies: sum(nonzero values before dropping)/ sum(nonzero values after dropping), * this way, the application of the truncated matrix on a constant vector is the same as that of * the original matrix. * nrm_type: type of norm used for dropping with tol. * -- 0 = infinity-norm * -- 1 = 1-norm * -- 2 = 2-norm */ HYPRE_Int hypre_ParCSRMatrixTruncate(hypre_ParCSRMatrix *A, HYPRE_Real tol, HYPRE_Int max_row_elmts, HYPRE_Int rescale, HYPRE_Int nrm_type) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] -= hypre_MPI_Wtime(); #endif hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_j_new; HYPRE_Real *A_diag_data_new; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j_new; HYPRE_Real *A_offd_data_new; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int i, j, start_j; HYPRE_Int ierr = 0; HYPRE_Int next_open; HYPRE_Int now_checking; HYPRE_Int num_lost; HYPRE_Int num_lost_global = 0; HYPRE_Int next_open_offd; HYPRE_Int now_checking_offd; HYPRE_Int num_lost_offd; HYPRE_Int num_lost_global_offd; HYPRE_Int A_diag_size; HYPRE_Int A_offd_size; HYPRE_Int num_elmts; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Real row_nrm; HYPRE_Real drop_coeff; HYPRE_Real row_sum; HYPRE_Real scale; HYPRE_MemoryLocation memory_location_diag = hypre_CSRMatrixMemoryLocation(A_diag); HYPRE_MemoryLocation memory_location_offd = hypre_CSRMatrixMemoryLocation(A_offd); /* Threading variables. Entry i of num_lost_(offd_)per_thread holds the * number of dropped entries over thread i's row range. Cum_lost_per_thread * will temporarily store the cumulative number of dropped entries up to * each thread. */ HYPRE_Int my_thread_num, num_threads, start, stop; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int * cum_lost_per_thread; HYPRE_Int * num_lost_per_thread; HYPRE_Int * num_lost_offd_per_thread; /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for (i = 0; i < max_num_threads[0]; i++) { num_lost_per_thread[i] = 0; num_lost_offd_per_thread[i] = 0; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,row_nrm, drop_coeff,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt) #endif { my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); /* Compute each thread's range of rows to truncate and compress. Note, * that i, j and data are all compressed as entries are dropped, but * that the compression only occurs locally over each thread's row * range. A_diag_i is only made globally consistent at the end of this * routine. During the dropping phases, A_diag_i[stop] will point to * the start of the next thread's row range. */ /* my row range */ start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } /* * Truncate based on truncation tolerance */ if (tol > 0) { num_lost = 0; num_lost_offd = 0; next_open = A_diag_i[start]; now_checking = A_diag_i[start]; next_open_offd = A_offd_i[start];; now_checking_offd = A_offd_i[start];; for (i = start; i < stop; i++) { row_nrm = 0; /* compute norm for dropping small terms */ if (nrm_type == 0) { /* infty-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { row_nrm = (row_nrm < fabs(A_diag_data[j])) ? fabs(A_diag_data[j]) : row_nrm; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { row_nrm = (row_nrm < fabs(A_offd_data[j])) ? fabs(A_offd_data[j]) : row_nrm; } } if (nrm_type == 1) { /* 1-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { row_nrm += fabs(A_diag_data[j]); } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { row_nrm += fabs(A_offd_data[j]); } } if (nrm_type == 2) { /* 2-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { HYPRE_Complex v = A_diag_data[j]; row_nrm += v * v; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { HYPRE_Complex v = A_offd_data[j]; row_nrm += v * v; } row_nrm = sqrt(row_nrm); } drop_coeff = tol * row_nrm; start_j = A_diag_i[i]; if (num_lost) { A_diag_i[i] -= num_lost; } row_sum = 0; scale = 0; for (j = start_j; j < A_diag_i[i + 1]; j++) { row_sum += A_diag_data[now_checking]; if (fabs(A_diag_data[now_checking]) < drop_coeff) { num_lost++; now_checking++; } else { scale += A_diag_data[now_checking]; A_diag_data[next_open] = A_diag_data[now_checking]; A_diag_j[next_open] = A_diag_j[now_checking]; now_checking++; next_open++; } } start_j = A_offd_i[i]; if (num_lost_offd) { A_offd_i[i] -= num_lost_offd; } for (j = start_j; j < A_offd_i[i + 1]; j++) { row_sum += A_offd_data[now_checking_offd]; if (fabs(A_offd_data[now_checking_offd]) < drop_coeff) { num_lost_offd++; now_checking_offd++; } else { scale += A_offd_data[now_checking_offd]; A_offd_data[next_open_offd] = A_offd_data[now_checking_offd]; A_offd_j[next_open_offd] = A_offd_j[now_checking_offd]; now_checking_offd++; next_open_offd++; } } /* scale row of A */ if (rescale && scale != 0.) { if (scale != row_sum) { scale = row_sum / scale; for (j = A_diag_i[i]; j < (A_diag_i[i + 1] - num_lost); j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < (A_offd_i[i + 1] - num_lost_offd); j++) { A_offd_data[j] *= scale; } } } } /* end loop for (i = 0; i < n_fine; i++) */ /* store number of dropped elements and number of threads */ if (my_thread_num == 0) { max_num_threads[0] = num_threads; } num_lost_per_thread[my_thread_num] = num_lost; num_lost_offd_per_thread[my_thread_num] = num_lost_offd; } /* end if (trunc_factor > 0) */ /* * Truncate based on capping the nnz per row * */ if (max_row_elmts > 0) { HYPRE_Int A_mxnum, cnt1, last_index, last_index_offd; HYPRE_Int *A_aux_j; HYPRE_Real *A_aux_data; /* find maximum row length locally over this row range */ A_mxnum = 0; for (i = start; i < stop; i++) { /* Note A_diag_i[stop] is the starting point for the next thread * in j and data, not the stop point for this thread */ last_index = A_diag_i[i + 1]; last_index_offd = A_offd_i[i + 1]; if (i == stop - 1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } cnt1 = last_index - A_diag_i[i] + last_index_offd - A_offd_i[i]; if (cnt1 > A_mxnum) { A_mxnum = cnt1; } } /* Some rows exceed max_row_elmts, and require truncation. Essentially, * each thread truncates and compresses its range of rows locally. */ if (A_mxnum > max_row_elmts) { num_lost = 0; num_lost_offd = 0; /* two temporary arrays to hold row i for temporary operations */ A_aux_j = hypre_CTAlloc(HYPRE_Int, A_mxnum, HYPRE_MEMORY_HOST); A_aux_data = hypre_CTAlloc(HYPRE_Real, A_mxnum, HYPRE_MEMORY_HOST); cnt_diag = A_diag_i[start]; cnt_offd = A_offd_i[start]; for (i = start; i < stop; i++) { /* Note A_diag_i[stop] is the starting point for the next thread * in j and data, not the stop point for this thread */ last_index = A_diag_i[i + 1]; last_index_offd = A_offd_i[i + 1]; if (i == stop - 1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } row_sum = 0; num_elmts = last_index - A_diag_i[i] + last_index_offd - A_offd_i[i]; if (max_row_elmts < num_elmts) { /* copy both diagonal and off-diag parts of row i to _aux_ arrays */ cnt = 0; for (j = A_diag_i[i]; j < last_index; j++) { A_aux_j[cnt] = A_diag_j[j]; A_aux_data[cnt++] = A_diag_data[j]; row_sum += A_diag_data[j]; } num_lost += cnt; cnt1 = cnt; for (j = A_offd_i[i]; j < last_index_offd; j++) { A_aux_j[cnt] = A_offd_j[j] + num_cols; A_aux_data[cnt++] = A_offd_data[j]; row_sum += A_offd_data[j]; } num_lost_offd += cnt - cnt1; /* sort data */ hypre_qsort2_abs(A_aux_j, A_aux_data, 0, cnt - 1); scale = 0; if (i > start) { A_diag_i[i] = cnt_diag; A_offd_i[i] = cnt_offd; } for (j = 0; j < max_row_elmts; j++) { scale += A_aux_data[j]; if (A_aux_j[j] < num_cols) { A_diag_j[cnt_diag] = A_aux_j[j]; A_diag_data[cnt_diag++] = A_aux_data[j]; } else { A_offd_j[cnt_offd] = A_aux_j[j] - num_cols; A_offd_data[cnt_offd++] = A_aux_data[j]; } } num_lost -= cnt_diag - A_diag_i[i]; num_lost_offd -= cnt_offd - A_offd_i[i]; /* scale row of A */ if (rescale && (scale != 0.)) { if (scale != row_sum) { scale = row_sum / scale; for (j = A_diag_i[i]; j < cnt_diag; j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < cnt_offd; j++) { A_offd_data[j] *= scale; } } } } /* end if (max_row_elmts < num_elmts) */ else { /* nothing dropped from this row, but still have to shift entries back * by the number dropped so far */ if (A_diag_i[i] != cnt_diag) { start_j = A_diag_i[i]; A_diag_i[i] = cnt_diag; for (j = start_j; j < last_index; j++) { A_diag_j[cnt_diag] = A_diag_j[j]; A_diag_data[cnt_diag++] = A_diag_data[j]; } } else { cnt_diag += last_index - A_diag_i[i]; } if (A_offd_i[i] != cnt_offd) { start_j = A_offd_i[i]; A_offd_i[i] = cnt_offd; for (j = start_j; j < last_index_offd; j++) { A_offd_j[cnt_offd] = A_offd_j[j]; A_offd_data[cnt_offd++] = A_offd_data[j]; } } else { cnt_offd += last_index_offd - A_offd_i[i]; } } } /* end for (i = 0; i < n_fine; i++) */ num_lost_per_thread[my_thread_num] += num_lost; num_lost_offd_per_thread[my_thread_num] += num_lost_offd; hypre_TFree(A_aux_j, HYPRE_MEMORY_HOST); hypre_TFree(A_aux_data, HYPRE_MEMORY_HOST); } /* end if (A_mxnum > max_row_elmts) */ } /* end if (max_row_elmts > 0) */ /* Sum up num_lost_global */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { num_lost_global = 0; num_lost_global_offd = 0; for (i = 0; i < max_num_threads[0]; i++) { num_lost_global += num_lost_per_thread[i]; num_lost_global_offd += num_lost_offd_per_thread[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * Synchronize and create new diag data structures */ if (num_lost_global) { /* Each thread has it's own locally compressed CSR matrix from rows start * to stop. Now, we have to copy each thread's chunk into the new * process-wide CSR data structures * * First, we compute the new process-wide number of nonzeros (i.e., * A_diag_size), and compute cum_lost_per_thread[k] so that this * entry holds the cumulative sum of entries dropped up to and * including thread k. */ if (my_thread_num == 0) { A_diag_size = A_diag_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_diag_size -= num_lost_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i - 1]; } else { cum_lost_per_thread[i] = num_lost_per_thread[i]; } } A_diag_j_new = hypre_CTAlloc(HYPRE_Int, A_diag_size, memory_location_diag); A_diag_data_new = hypre_CTAlloc(HYPRE_Real, A_diag_size, memory_location_diag); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* points to next open spot in new data structures for this thread */ if (my_thread_num == 0) { next_open = 0; } else { /* remember, cum_lost_per_thread[k] stores the num dropped up to and * including thread k */ next_open = A_diag_i[start] - cum_lost_per_thread[my_thread_num - 1]; } /* copy the j and data arrays over */ for (i = A_diag_i[start]; i < A_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++) { A_diag_j_new[next_open] = A_diag_j[i]; A_diag_data_new[next_open] = A_diag_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* update A_diag_i with number of dropped entries by all lower ranked * threads */ if (my_thread_num > 0) { for (i = start; i < stop; i++) { A_diag_i[i] -= cum_lost_per_thread[my_thread_num - 1]; } } if (my_thread_num == 0) { /* Set last entry */ A_diag_i[n_fine] = A_diag_size ; hypre_TFree(A_diag_j, memory_location_diag); hypre_TFree(A_diag_data, memory_location_diag); hypre_CSRMatrixJ(A_diag) = A_diag_j_new; hypre_CSRMatrixData(A_diag) = A_diag_data_new; hypre_CSRMatrixNumNonzeros(A_diag) = A_diag_size; } } /* * Synchronize and create new offd data structures */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (num_lost_global_offd) { /* Repeat process for off-diagonal */ if (my_thread_num == 0) { A_offd_size = A_offd_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_offd_size -= num_lost_offd_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i - 1]; } else { cum_lost_per_thread[i] = num_lost_offd_per_thread[i]; } } A_offd_j_new = hypre_CTAlloc(HYPRE_Int, A_offd_size, memory_location_offd); A_offd_data_new = hypre_CTAlloc(HYPRE_Real, A_offd_size, memory_location_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* points to next open spot in new data structures for this thread */ if (my_thread_num == 0) { next_open = 0; } else { /* remember, cum_lost_per_thread[k] stores the num dropped up to and * including thread k */ next_open = A_offd_i[start] - cum_lost_per_thread[my_thread_num - 1]; } /* copy the j and data arrays over */ for (i = A_offd_i[start]; i < A_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++) { A_offd_j_new[next_open] = A_offd_j[i]; A_offd_data_new[next_open] = A_offd_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* update A_offd_i with number of dropped entries by all lower ranked * threads */ if (my_thread_num > 0) { for (i = start; i < stop; i++) { A_offd_i[i] -= cum_lost_per_thread[my_thread_num - 1]; } } if (my_thread_num == 0) { /* Set last entry */ A_offd_i[n_fine] = A_offd_size ; hypre_TFree(A_offd_j, memory_location_offd); hypre_TFree(A_offd_data, memory_location_offd); hypre_CSRMatrixJ(A_offd) = A_offd_j_new; hypre_CSRMatrixData(A_offd) = A_offd_data_new; hypre_CSRMatrixNumNonzeros(A_offd) = A_offd_size; } } } /* end parallel region */ hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); hypre_TFree(cum_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_offd_per_thread, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] += hypre_MPI_Wtime(); #endif return ierr; } HYPRE_Int hypre_ParCSRMatrixSetConstantValues( hypre_ParCSRMatrix *A, HYPRE_Complex value ) { hypre_CSRMatrixSetConstantValues(hypre_ParCSRMatrixDiag(A), value); hypre_CSRMatrixSetConstantValues(hypre_ParCSRMatrixOffd(A), value); return hypre_error_flag; } void hypre_ParCSRMatrixCopyColMapOffdToDevice(hypre_ParCSRMatrix *A) { #if defined(HYPRE_USING_GPU) if (hypre_ParCSRMatrixDeviceColMapOffd(A) == NULL) { const HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); hypre_ParCSRMatrixDeviceColMapOffd(A) = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(hypre_ParCSRMatrixDeviceColMapOffd(A), hypre_ParCSRMatrixColMapOffd(A), HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); } #endif }
hypre_merge_sort.c
/*BHEADER********************************************************************** * Copyright (c) 2017, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * Written by Jongsoo Park et al. CODE-LLNL-738-322. * This file is part of AMG. See files README and COPYRIGHT for details. * * AMG is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * This software is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the * GNU General Public License for more details. * ***********************************************************************EHEADER*/ #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "../seq_mv/HYPRE_seq_mv.h" //#define DBG_MERGE_SORT #ifdef DBG_MERGE_SORT #include <assert.h> #include <algorithm> #include <unordered_map> #endif #define SWAP(T, a, b) do { T tmp = a; a = b; b = tmp; } while (0) static void hypre_merge(HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } static void kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT assert(left <= right && right <= k); assert(i < k); // i == k implies left == right == k that can never happen assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /** * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements */ static void kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_Int *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); kth_element_(out1, out2, a1 + offset1, a2 + offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT assert(*out1 + *out2 == k); #endif } /** * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zeor-based) among the threads that participate in this merge */ static void hypre_parallel_merge( HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num) { HYPRE_Int n1 = last1 - first1; HYPRE_Int n2 = last2 - first2; HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT assert(std::is_sorted(first1, last1)); assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT assert(begin1 <= end1); assert(begin2 <= end2); #endif hypre_merge( first1 + begin1, first1 + end1, first2 + begin2, first2 + end2, out + begin1 + begin2); #ifdef DBG_MERGE_SORT assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } void hypre_merge_sort(HYPRE_Int *in, HYPRE_Int *temp, HYPRE_Int len, HYPRE_Int **out) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_qsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_Int *in_buf = in; HYPRE_Int *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_parallel_merge( in_buf + in_group1_begin, in_buf + in_group1_end, in_buf + in_group2_begin, in_buf + in_group2_end, out_buf + in_group1_begin, num_threads_in_group, id_in_group); HYPRE_Int *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH void hypre_sort_and_create_inverse_map( HYPRE_Int *in, HYPRE_Int len, HYPRE_Int **out, hypre_UnorderedIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_Int *temp = hypre_TAlloc(HYPRE_Int, len); hypre_merge_sort(in, temp, len, out); hypre_UnorderedIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedIntMapPutIfAbsent(inverse_map, (*out)[i], i); assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); assert(false); } } assert(hypre_UnorderedIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp); } else { hypre_TFree(in); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #endif /* vim: set tabstop=8 softtabstop=3 sw=3 expandtab: */
dynamic_enough_threads.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt #include "callback.h" int main() { omp_set_dynamic(1); #pragma omp parallel num_threads(4) { print_ids(0); print_ids(1); } print_fuzzy_address(1); // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_thread_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_thread_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' //team-size of 1-4 is expected // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: parallel_data initially not null // CHECK-NOT: 0: task_data initially not null // CHECK-NOT: 0: thread_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]], team_size={{[1-4]}} // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] return 0; }
Analyzer.h
#ifndef ANALYZER_H #define ANALYZER_H /************************************************************* * Copyright: (C) 2012 by Markus Schordan * * Author : Markus Schordan * * License : see file LICENSE in the CodeThorn distribution * *************************************************************/ #include <iostream> #include <fstream> #include <set> #include <string> #include <sstream> #include <omp.h> #include <boost/unordered_set.hpp> #include "AstTerm.h" #include "Labeler.h" #include "CFAnalysis.h" #include "RoseAst.h" #include "SgNodeHelper.h" #include "ExprAnalyzer.h" #include "StateRepresentations.h" #include "PropertyValueTable.h" // we use INT_MIN, INT_MAX #include "limits.h" namespace CodeThorn { #define DEBUGPRINT_STMT 0x1 #define DEBUGPRINT_STATE 0x2 #define DEBUGPRINT_STATEMOD 0x4 #define DEBUGPRINT_INFO 0x8 /*! * \author Markus Schordan * \date 2012. */ class AstNodeInfo : public AstAttribute { public: AstNodeInfo():label(0),initialLabel(0){} std::string toString() { std::stringstream ss; ss<<"\\n lab:"<<label<<" "; ss<<"init:"<<initialLabel<<" "; ss<<"final:"<<finalLabelsSet.toString(); return ss.str(); } void setLabel(Label l) { label=l; } void setInitialLabel(Label l) { initialLabel=l; } void setFinalLabels(LabelSet lset) { finalLabelsSet=lset; } private: Label label; Label initialLabel; LabelSet finalLabelsSet; }; typedef list<const EState*> EStateWorkList; typedef pair<int, const EState*> FailedAssertion; enum AnalyzerMode { AM_ALL_STATES, AM_LTL_STATES }; class Analyzer; class VariableValueMonitor { public: enum VariableMode { VARMODE_FORCED_TOP, VARMODE_ADAPTIVE_TOP, VARMODE_PRECISE, VARMODE_FORCED_PRECISE}; VariableValueMonitor(); void setThreshold(size_t threshold); size_t getThreshold(); bool isActive(); // the init function only uses the variableIds of a given estate (not its values) for initialization void init(const EState* estate); void init(const PState* pstate); VariableIdSet getHotVariables(Analyzer* analyzer, const EState* estate); VariableIdSet getHotVariables(Analyzer* analyzer, const PState* pstate); VariableIdSet getVariables(); void setVariableMode(VariableMode,VariableId); VariableMode getVariableMode(VariableId); void update(Analyzer* analyzer, EState* estate); bool isHotVariable(Analyzer* analyzer, VariableId varId); std::string toString(VariableIdMapping* variableIdMapping); #if 0 bool isVariableBeyondTreshold(Analyzer* analyzer, VariableId varId); #endif private: std::map<VariableId,std::set<int>* > _variablesMap; std::map<VariableId,VariableMode> _variablesModeMap; long int _threshold; }; /*! * \author Markus Schordan * \date 2012. */ class Analyzer { friend class Visualizer; friend class VariableValueMonitor; public: Analyzer(); ~Analyzer(); void initAstNodeInfo(SgNode* node); bool isActiveGlobalTopify(); static std::string nodeToString(SgNode* node); void initializeSolver1(std::string functionToStartAt,SgNode* root, bool oneFunctionOnly); void initializeTraceSolver(std::string functionToStartAt,SgNode* root); void continueAnalysisFrom(EState* newStartEState); PState analyzeAssignRhs(PState currentPState,VariableId lhsVar, SgNode* rhs,ConstraintSet& cset); EState analyzeVariableDeclaration(SgVariableDeclaration* nextNodeToAnalyze1,EState currentEState, Label targetLabel); list<EState> transferFunction(Edge edge, const EState* estate); void addToWorkList(const EState* estate); const EState* addToWorkListIfNew(EState estate); const EState* takeFromWorkList(); bool isInWorkList(const EState* estate); bool isEmptyWorkList(); const EState* topWorkList(); const EState* popWorkList(); void recordTransition(const EState* sourceEState, Edge e, const EState* targetEState); void printStatusMessage(bool); bool isLTLRelevantLabel(Label label); bool isStdIOLabel(Label label); bool isStartLabel(Label label); std::set<const EState*> nonLTLRelevantEStates(); bool isTerminationRelevantLabel(Label label); // 6 experimental functions // reduces all states different to stdin and stdout. void stdIOFoldingOfTransitionGraph(); void semanticFoldingOfTransitionGraph(); void semanticEliminationOfTransitions(); int semanticEliminationOfSelfInInTransitions(); // eliminates only input states int semanticEliminationOfDeadStates(); int semanticFusionOfInInTransitions(); // requires semantically reduced STG int semanticExplosionOfInputNodesFromOutputNodeConstraints(); bool checkEStateSet(); bool isConsistentEStatePtrSet(std::set<const EState*> estatePtrSet); bool checkTransitionGraph(); // this function requires that no LTL graph is computed void deleteNonRelevantEStates(); // bypasses and removes all states that are not standard I/O states void removeNonIOStates(); // bypasses and removes all states that are not stdIn/stdOut/stdErr/failedAssert states void reduceToObservableBehavior(); // erases transitions that lead directly from one output state to another output state void removeOutputOutputTransitions(); // erases transitions that lead directly from one input state to another input state void removeInputInputTransitions(); // cuts off all paths in the transition graph that lead to leaves // (recursively until only paths of infinite length remain) void pruneLeavesRec(); // connects start, input, output and worklist states according to possible paths in the transition graph. // removes all states and transitions that are not necessary for the graph that only consists of these new transitions. The two parameters allow to select input and/or output states to remain in the STG. void reduceGraphInOutWorklistOnly(bool includeIn=true, bool includeOut=true, bool includeErr=false); // extracts input sequences leading to each discovered failing assertion where discovered for the first time. // stores results in PropertyValueTable "reachabilityResults". // returns length of the longest of these sequences if it can be guaranteed that all processed traces are the // shortest ones leading to the individual failing assertion (returns -1 otherwise). int extractAssertionTraces(); private: /*! if state exists in stateSet, a pointer to the existing state is returned otherwise a new state is entered into stateSet and a pointer to it is returned. */ const PState* processNew(PState& s); const PState* processNewOrExisting(PState& s); const EState* processNew(EState& s); const EState* processNewOrExisting(EState& s); const EState* processCompleteNewOrExisting(const EState* es); void topifyVariable(PState& pstate, ConstraintSet& cset, VariableId varId); EStateSet::ProcessingResult process(EState& s); EStateSet::ProcessingResult process(Label label, PState pstate, ConstraintSet cset, InputOutput io); const ConstraintSet* processNewOrExisting(ConstraintSet& cset); EState createEState(Label label, PState pstate, ConstraintSet cset); EState createEState(Label label, PState pstate, ConstraintSet cset, InputOutput io); //returns a list of transitions representing existing paths from "startState" to all possible input/output/error states (no output -> output) // collection of transitions to worklist states currently disabled. the returned set has to be deleted by the calling function. boost::unordered_set<Transition*>* transitionsToInOutErrAndWorklist( const EState* startState, bool includeIn, bool includeOut, bool includeErr); boost::unordered_set<Transition*>* transitionsToInOutErrAndWorklist( const EState* currentState, const EState* startState, boost::unordered_set<Transition*>* results, boost::unordered_set<const EState*>* visited, bool includeIn, bool includeOut, bool includeErr); // adds a string representation of the shortest input path from start state to assertEState to reachabilityResults. returns the length of the // counterexample input sequence. int addCounterexample(int assertCode, const EState* assertEState); // returns a list of EStates from source to target. Target has to come before source in the STG (reversed trace). list<const EState*>reverseInOutSequenceBreadthFirst(const EState* source, const EState* target, bool counterexampleWithOutput = false); // returns a list of EStates from source to target (shortest input path). // please note: target has to be a predecessor of source (reversed trace) list<const EState*> reverseInOutSequenceDijkstra(const EState* source, const EState* target, bool counterexampleWithOutput = false); list<const EState*> filterStdInOutOnly(list<const EState*>& states, bool counterexampleWithOutput = false) const; std::string reversedInOutRunToString(list<const EState*>& run); //returns the shortest possible number of input states on the path leading to "target". int inputSequenceLength(const EState* target); public: SgNode* getCond(SgNode* node); void generateAstNodeInfo(SgNode* node); std::string generateSpotSTG(); private: void generateSpotTransition(std::stringstream& ss, const Transition& t); //less than comarisions on two states according to (#input transitions * #output transitions) bool indegreeTimesOutdegreeLessThan(const EState* a, const EState* b); public: //stores a backup of the created transitionGraph void storeStgBackup(); //load previous backup of the transitionGraph, storing the current version as a backup instead void swapStgWithBackup(); //solver 8 becomes the active solver used by the analyzer. Deletion of previous data iff "resetAnalyzerData" is set to true. void setAnalyzerToSolver8(EState* startEState, bool resetAnalyzerData); //! requires init void runSolver1(); void runSolver2(); void runSolver3(); void runSolver4(); void runSolver5(); void runSolver6(); void runSolver7(); void runSolver8(); void runSolver(); //! The analyzer requires a CFAnalysis to obtain the ICFG. void setCFAnalyzer(CFAnalysis* cf) { cfanalyzer=cf; } CFAnalysis* getCFAnalyzer() const { return cfanalyzer; } //void initializeVariableIdMapping(SgProject* project) { variableIdMapping.computeVariableSymbolMapping(project); } // access functions for computed information VariableIdMapping* getVariableIdMapping() { return &variableIdMapping; } SPRAY::IOLabeler* getLabeler() const { SPRAY::IOLabeler* ioLabeler=dynamic_cast<SPRAY::IOLabeler*>(cfanalyzer->getLabeler()); ROSE_ASSERT(ioLabeler); return ioLabeler; } Flow* getFlow() { return &flow; } PStateSet* getPStateSet() { return &pstateSet; } EStateSet* getEStateSet() { return &estateSet; } TransitionGraph* getTransitionGraph() { return &transitionGraph; } ConstraintSetMaintainer* getConstraintSetMaintainer() { return &constraintSetMaintainer; } //private: TODO Flow flow; SgNode* startFunRoot; CFAnalysis* cfanalyzer; VariableValueMonitor variableValueMonitor; void setVariableValueThreshold(int threshold) { variableValueMonitor.setThreshold(threshold); } public: //! compute the VariableIds of variable declarations VariableIdMapping::VariableIdSet determineVariableIdsOfVariableDeclarations(set<SgVariableDeclaration*> decls); //! compute the VariableIds of SgInitializedNamePtrList VariableIdMapping::VariableIdSet determineVariableIdsOfSgInitializedNames(SgInitializedNamePtrList& namePtrList); std::set<std::string> variableIdsToVariableNames(VariableIdMapping::VariableIdSet); typedef list<SgVariableDeclaration*> VariableDeclarationList; VariableDeclarationList computeUnusedGlobalVariableDeclarationList(SgProject* root); VariableDeclarationList computeUsedGlobalVariableDeclarationList(SgProject* root); //bool isAssertExpr(SgNode* node); bool isFailedAssertEState(const EState* estate); //! adds a specific code to the io-info of an estate which is checked by isFailedAsserEState and determines a failed-assert estate. Note that the actual assert (and its label) is associated with the previous estate (this information can therefore be obtained from a transition-edge in the transition graph). EState createFailedAssertEState(const EState estate, Label target); //! list of all asserts in a program list<SgNode*> listOfAssertNodes(SgProject *root); //! rers-specific error_x: assert(0) version list<pair<SgLabelStatement*,SgNode*> > listOfLabeledAssertNodes(SgProject *root); void initLabeledAssertNodes(SgProject* root) { _assertNodes=listOfLabeledAssertNodes(root); } size_t getNumberOfErrorLabels(); std::string labelNameOfAssertLabel(Label lab) { std::string labelName; for(list<pair<SgLabelStatement*,SgNode*> >::iterator i=_assertNodes.begin();i!=_assertNodes.end();++i) if(lab==getLabeler()->getLabel((*i).second)) labelName=SgNodeHelper::getLabelName((*i).first); //assert(labelName.size()>0); return labelName; } bool isCppLabeledAssertLabel(Label lab) { return labelNameOfAssertLabel(lab).size()>0; } InputOutput::OpType ioOp(const EState* estate) const; void setDisplayDiff(int diff) { _displayDiff=diff; } void setSolver(int solver) { _solver=solver; } int getSolver() { return _solver;} void setSemanticFoldThreshold(int t) { _semanticFoldThreshold=t; } void setLTLVerifier(int v) { _ltlVerifier=v; } int getLTLVerifier() { return _ltlVerifier; } void setNumberOfThreadsToUse(int n) { _numberOfThreadsToUse=n; } int getNumberOfThreadsToUse() { return _numberOfThreadsToUse; } void insertInputVarValue(int i) { _inputVarValues.insert(i); } void addInputSequenceValue(int i) { _inputSequence.push_back(i); } void resetToEmptyInputSequence() { _inputSequence.clear(); } void resetInputSequenceIterator() { _inputSequenceIterator=_inputSequence.begin(); } const EState* getEstateBeforeMissingInput() {return _estateBeforeMissingInput;} const EState* getLatestErrorEState() {return _latestErrorEState;} void setTreatStdErrLikeFailedAssert(bool x) { _treatStdErrLikeFailedAssert=x; } int numberOfInputVarValues() { return _inputVarValues.size(); } std::set<int> getInputVarValues() { return _inputVarValues; } list<pair<SgLabelStatement*,SgNode*> > _assertNodes; void setCsvAssertLiveFileName(std::string filename) { _csv_assert_live_file=filename; } VariableId globalVarIdByName(std::string varName) { return globalVarName2VarIdMapping[varName]; } void setStgTraceFileName(std::string filename) { _stg_trace_filename=filename; std::ofstream fout; fout.open(_stg_trace_filename.c_str()); // create new file/overwrite existing file fout<<"START"<<endl; fout.close(); // close. Will be used with append. } std::string _csv_assert_live_file; // to become private private: std::string _stg_trace_filename; public: // only used temporarily for binary-binding prototype std::map<std::string,VariableId> globalVarName2VarIdMapping; std::vector<bool> binaryBindingAssert; void setAnalyzerMode(AnalyzerMode am) { _analyzerMode=am; } void setMaxTransitions(size_t maxTransitions) { _maxTransitions=maxTransitions; } void setMaxIterations(size_t maxIterations) { _maxIterations=maxIterations; } void setMaxTransitionsForcedTop(size_t maxTransitions) { _maxTransitionsForcedTop=maxTransitions; } void setMaxIterationsForcedTop(size_t maxIterations) { _maxIterationsForcedTop=maxIterations; } void eventGlobalTopifyTurnedOn(); void setMinimizeStates(bool minimizeStates) { _minimizeStates=minimizeStates; } bool isIncompleteSTGReady(); bool isPrecise(); PropertyValueTable reachabilityResults; int reachabilityAssertCode(const EState* currentEStatePtr); enum ExplorationMode { EXPL_DEPTH_FIRST, EXPL_BREADTH_FIRST, EXPL_LOOP_AWARE }; void setExplorationMode(ExplorationMode em) { _explorationMode=em; } ExplorationMode getExplorationMode() { return _explorationMode; } void setSkipSelectedFunctionCalls(bool defer) { _skipSelectedFunctionCalls=true; exprAnalyzer.setSkipSelectedFunctionCalls(true); } void setSkipArrayAccesses(bool skip) { exprAnalyzer.setSkipArrayAccesses(skip); } bool getSkipArrayAccesses() { return exprAnalyzer.getSkipArrayAccesses(); } ExprAnalyzer* getExprAnalyzer(); list<FailedAssertion> getFirstAssertionOccurences(){return _firstAssertionOccurences;} void incIterations() { if(isPrecise()) { #pragma omp atomic _iterations+=1; } else { #pragma omp atomic _approximated_iterations+=1; } } bool isLoopCondLabel(Label lab); int getApproximatedIterations() { return _approximated_iterations; } int getIterations() { return _iterations; } private: set<int> _inputVarValues; list<int> _inputSequence; list<int>::iterator _inputSequenceIterator; ExprAnalyzer exprAnalyzer; VariableIdMapping variableIdMapping; EStateWorkList estateWorkList; EStateSet estateSet; PStateSet pstateSet; ConstraintSetMaintainer constraintSetMaintainer; TransitionGraph transitionGraph; TransitionGraph backupTransitionGraph; set<const EState*> transitionSourceEStateSetOfLabel(Label lab); int _displayDiff; int _numberOfThreadsToUse; int _ltlVerifier; int _semanticFoldThreshold; VariableIdMapping::VariableIdSet _variablesToIgnore; int _solver; AnalyzerMode _analyzerMode; set<const EState*> _newNodesToFold; long int _maxTransitions; long int _maxIterations; long int _maxTransitionsForcedTop; long int _maxIterationsForcedTop; bool _treatStdErrLikeFailedAssert; bool _skipSelectedFunctionCalls; ExplorationMode _explorationMode; list<FailedAssertion> _firstAssertionOccurences; const EState* _estateBeforeMissingInput; const EState* _latestOutputEState; const EState* _latestErrorEState; bool _minimizeStates; bool _topifyModeActive; int _iterations; int _approximated_iterations; int _curr_iteration_cnt; int _next_iteration_cnt; }; // end of class Analyzer } // end of namespace CodeThorn #define RERS_SPECIALIZATION #ifdef RERS_SPECIALIZATION // RERS-binary-binding-specific declarations #define STR_VALUE(arg) #arg #define COPY_PSTATEVAR_TO_GLOBALVAR(VARNAME) VARNAME[thread_id] = pstate[analyzer->globalVarIdByName(STR_VALUE(VARNAME))].getValue().getIntValue(); //cout<<"PSTATEVAR:"<<pstate[analyzer->globalVarIdByName(STR_VALUE(VARNAME))].toString()<<"="<<pstate[analyzer->globalVarIdByName(STR_VALUE(VARNAME))].getValue().toString()<<endl; #define COPY_GLOBALVAR_TO_PSTATEVAR(VARNAME) pstate[analyzer->globalVarIdByName(STR_VALUE(VARNAME))]=CodeThorn::AType::CppCapsuleConstIntLattice(VARNAME[thread_id]); // macro used to generate the initialization of global variables in the hybrid analyzer (linked binary with threads) #define INIT_GLOBALVAR(VARNAME) VARNAME = new int[numberOfThreads]; namespace RERS_Problem { void rersGlobalVarsCallInit(CodeThorn::Analyzer* analyzer, CodeThorn::PState& pstate, int thread_id); void rersGlobalVarsCallReturnInit(CodeThorn::Analyzer* analyzer, CodeThorn::PState& pstate, int thread_id); void rersGlobalVarsArrayInit(int numberOfThreads); #if 0 // input variable passed as a parameter (obsolete since transformation of "input" into a global varialbe) void calculate_output(int); #endif void calculate_output(int numberOfThreads); extern int* output; } // END OF RERS-binary-binding-specific declarations #endif #endif
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/animate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/delegate.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/timer.h" #include "magick/timer-private.h" #include "magick/token.h" #include "magick/token-private.h" #include "magick/utility.h" #include "magick/version.h" #include "magick/xwindow-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireCriticalMemory(sizeof(*image)); (void) memset(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MaxTextExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; image->blur=1.0; InitializeExceptionInfo(&image->exception); (void) QueryColorDatabase(BackgroundColor,&image->background_color, &image->exception); (void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception); (void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception); (void) QueryColorDatabase(TransparentColor,&image->transparent_color, &image->exception); GetTimerInfo(&image->timer); image->ping=MagickFalse; image->cache=AcquirePixelCache(0); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=GetMagickTime(); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AllocateSemaphoreInfo(); image->signature=MagickCoreSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MaxTextExtent); (void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; (void) memset(&geometry,0,sizeof(geometry)); flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); if ((flags & RhoValue) != 0) image->x_resolution=geometry_info.rho; image->y_resolution=image->x_resolution; if ((flags & SigmaValue) != 0) image->y_resolution=geometry_info.sigma; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->matte_color=image_info->matte_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); (void) SyncImageSettings(image_info,image); option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if ((double) image->delay > floor(geometry_info.rho+0.5)) image->delay=(size_t) CastDoubleToLong(floor( geometry_info.rho+0.5)); } else if ((flags & LessValue) != 0) { if ((double) image->delay < floor(geometry_info.rho+0.5)) image->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } else image->delay=(size_t) CastDoubleToLong(floor( geometry_info.rho+0.5)); if ((flags & SigmaValue) != 0) image->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info)); if (image_info == (ImageInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MaxTextExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MaxTextExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; MagickBooleanType homogeneous_colorspace, matte, status; MagickOffsetType n; RectangleInfo geometry; register const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); matte=images->matte; number_images=1; width=images->columns; height=images->rows; depth=images->depth; homogeneous_colorspace=MagickTrue; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->colorspace != images->colorspace) homogeneous_colorspace=MagickFalse; if (next->matte != MagickFalse) matte=MagickTrue; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(append_image,DirectClass) == MagickFalse) { InheritException(exception,&append_image->exception); append_image=DestroyImage(append_image); return((Image *) NULL); } if (homogeneous_colorspace == MagickFalse) (void) SetImageColorspace(append_image,sRGBColorspace); append_image->depth=depth; append_image->matte=matte; append_image->page=images->page; (void) SetImageBackgroundColor(append_image); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict append_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); append_indexes=GetCacheViewAuthenticIndexQueue(append_view); for (x=0; x < (ssize_t) next->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (next->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if ((next->colorspace == CMYKColorspace) && (append_image->colorspace == CMYKColorspace)) SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x)); p++; q++; } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); GetImageException(image,exception); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ MagickExport MagickBooleanType ClipImage(Image *image) { return(ClipImagePath(image,"#1",MagickTrue)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(&image->exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent); (void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent); clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask); if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse) return(MagickFalse); } if (inside == MagickFalse) (void) NegateImage(clip_mask,MagickFalse); (void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageClipMask(image,clip_mask); clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { double scale; Image *clone_image; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image)); (void) memset(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickCoreSignature; clone_image->storage_class=image->storage_class; clone_image->channels=image->channels; clone_image->colorspace=image->colorspace; clone_image->matte=image->matte; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); InitializeExceptionInfo(&clone_image->exception); InheritException(&clone_image->exception,&image->exception); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->extent=image->extent; clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MaxTextExtent); (void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent); (void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); clone_image->clip_mask=NewImageList(); clone_image->mask=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AllocateSemaphoreInfo(); if (image->colormap != (PixelPacket *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length+1, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelPacket *) NULL) { clone_image=DestroyImage(clone_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memcpy(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); if (image->clip_mask != (Image *) NULL) clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue, exception); if (image->mask != (Image *) NULL) clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } if ((columns == image->columns) && (rows == image->rows)) { if (image->clip_mask != (Image *) NULL) clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue, exception); if (image->mask != (Image *) NULL) clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception); } scale=1.0; if (image->columns != 0) scale=(double) columns/(double) image->columns; clone_image->page.width=(size_t) CastDoubleToLong(floor(scale* image->page.width+0.5)); clone_image->page.x=CastDoubleToLong(ceil(scale*image->page.x-0.5)); clone_image->tile_offset.x=CastDoubleToLong(ceil(scale* image->tile_offset.x-0.5)); scale=1.0; if (image->rows != 0) scale=(double) rows/(double) image->rows; clone_image->page.height=(size_t) CastDoubleToLong(floor(scale* image->page.height+0.5)); clone_image->page.y=CastDoubleToLong(ceil(scale*image->page.y-0.5)); clone_image->tile_offset.y=CastDoubleToLong(ceil(scale* image->tile_offset.y-0.5)); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows) == MagickFalse) { InheritException(exception,&clone_image->exception); clone_image=DestroyImage(clone_image); } return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; if (image_info->size != (char *) NULL) (void) CloneString(&clone_info->size,image_info->size); if (image_info->extract != (char *) NULL) (void) CloneString(&clone_info->extract,image_info->extract); if (image_info->scenes != (char *) NULL) (void) CloneString(&clone_info->scenes,image_info->scenes); if (image_info->page != (char *) NULL) (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; if (image_info->sampling_factor != (char *) NULL) (void) CloneString(&clone_info->sampling_factor, image_info->sampling_factor); if (image_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,image_info->server_name); if (image_info->font != (char *) NULL) (void) CloneString(&clone_info->font,image_info->font); if (image_info->texture != (char *) NULL) (void) CloneString(&clone_info->texture,image_info->texture); if (image_info->density != (char *) NULL) (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->pen=image_info->pen; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->matte_color=image_info->matte_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colors=image_info->colors; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->preview_type=image_info->preview_type; clone_info->group=image_info->group; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; if (image_info->view != (char *) NULL) (void) CloneString(&clone_info->view,image_info->view); if (image_info->authenticate != (char *) NULL) (void) CloneString(&clone_info->authenticate,image_info->authenticate); (void) CloneImageOptions(clone_info,image_info); clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->virtual_pixel_method=image_info->virtual_pixel_method; (void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent); (void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent); (void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MaxTextExtent); clone_info->subimage=image_info->scene; /* deprecated */ clone_info->subrange=image_info->number_scenes; /* deprecated */ clone_info->channel=image_info->channel; clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return the highest severity exception. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { register const IndexPacket *magick_restrict source_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } source_indexes=GetCacheViewVirtualIndexQueue(source_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) geometry->width; x++) { *q=(*p); if (image->colorspace == CMYKColorspace) indexes[x]=source_indexes[x]; p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CopyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); source_view=DestroyCacheView(source_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); if (image->clip_mask != (Image *) NULL) image->clip_mask=DestroyImage(image->clip_mask); if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelPacket *) NULL) image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info*) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); DestroyBlob(image); (void) ClearExceptionInfo(&image->exception,MagickTrue); if (image->semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&image->semaphore); image->signature=(~MagickCoreSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->view != (char *) NULL) image_info->view=DestroyString(image_info->view); if (image_info->authenticate != (char *) NULL) image_info->authenticate=DestroyString( image_info->authenticate); DestroyImageOptions(image_info); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); image_info->signature=(~MagickCoreSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C l i p M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageClipMask() returns the clip path associated with the image. % % The format of the GetImageClipMask method is: % % Image *GetImageClipMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageClipMask(const Image *image, ExceptionInfo *exception) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->clip_mask == (Image *) NULL) return((Image *) NULL); return(CloneImage(image->clip_mask,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageException() traverses an image sequence and returns any % error more severe than noted by the exception parameter. % % The format of the GetImageException method is: % % void GetImageException(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to a list of one or more images. % % o exception: return the highest severity exception. % */ MagickExport void GetImageException(Image *image,ExceptionInfo *exception) { register Image *next; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->exception.severity == UndefinedException) continue; if (next->exception.severity > exception->severity) InheritException(exception,&next->exception); next->exception.severity=UndefinedException; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) memset(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorDatabase(BackgroundColor,&image_info->background_color, exception); (void) QueryColorDatabase(BorderColor,&image_info->border_color,exception); (void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception); (void) QueryColorDatabase(TransparentColor,&image_info->transparent_color, exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->mask == (Image *) NULL) return((Image *) NULL); return(CloneImage(image->mask,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannels() returns the number of pixel channels associated with the % specified image. % % The format of the GetChannels method is: % % size_t GetImageChannels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport size_t GetImageChannels(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(image->channels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename) { char *q; int c; MagickBooleanType canonical; register const char *p; ssize_t field_width, offset; canonical=MagickFalse; offset=0; (void) CopyMagickString(filename,format,MaxTextExtent); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } field_width=0; if (*q == '0') field_width=(ssize_t) strtol(q,&q,10); switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format-offset),(size_t) (MaxTextExtent-(p-format-offset)),p,value); offset+=(4-field_width); *q=c; (void) ConcatenateMagickString(filename,q,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MaxTextExtent]; const char *value; register char *r; register ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; value=(const char *) NULL; if (image != (Image *) NULL) value=GetImageProperty(image,pattern); if ((value == (const char *) NULL) && (image != (Image *) NULL)) value=GetImageArtifact(image,pattern); if ((value == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) value=GetImageOption(image_info,pattern); if (value == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-offset),value,(size_t) (MaxTextExtent-(p-format-offset))); offset+=strlen(pattern)-strlen(value)+3; *q=c; (void) ConcatenateMagickString(filename,r+1,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MaxTextExtent); else for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) (void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename))); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((pixel.red < 0.0) || (pixel.red > QuantumRange) || (pixel.red != (QuantumAny) pixel.red)) break; if ((pixel.green < 0.0) || (pixel.green > QuantumRange) || (pixel.green != (QuantumAny) pixel.green)) break; if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) || (pixel.blue != (QuantumAny) pixel.blue)) break; if (pixel.matte != MagickFalse) { if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) || (pixel.opacity != (QuantumAny) pixel.opacity)) break; } if (pixel.colorspace == CMYKColorspace) { if ((pixel.index < 0.0) || (pixel.index > QuantumRange) || (pixel.index != (QuantumAny) pixel.index)) break; } p++; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MaxTextExtent], filename[MaxTextExtent]; register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); (void) CopyMagickString(magick,image->magick,MaxTextExtent); (void) CopyMagickString(filename,image->filename,MaxTextExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const MagickPixelPacket *background) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const MagickPixelPacket *background) { CacheView *image_view; ExceptionInfo *exception; Image *image; ssize_t y; MagickBooleanType status; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickCoreSignature); assert(background != (const MagickPixelPacket *) NULL); image=AcquireImage(image_info); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->matte=background->matte; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,background,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePixels() reset the image pixels, that is, all the pixel components % are zereod. % % The format of the SetImage method is: % % MagickBooleanType ResetImagePixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResetImagePixels(Image *image, ExceptionInfo *exception) { CacheView *image_view; const void *pixels; MagickBooleanType status; MagickSizeType length; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); pixels=AcquirePixelCachePixels(image,&length,exception); if (pixels != (void *) NULL) { /* Reset in-core image pixels. */ (void) memset((void *) pixels,0,(size_t) length); return(MagickTrue); } /* Reset image pixels. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { (void) memset(q,0,sizeof(PixelPacket)); if ((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) indexes[x]=0; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsPixelGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) TransformImageColorspace(image,RGBColorspace); if ((image->background_color.opacity != OpaqueOpacity) && (image->matte == MagickFalse)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; pixel.opacity=OpaqueOpacity; SetPixelPacket(image,&background,&pixel,&index); /* Set image background color. */ status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) *q++=pixel; if (image->colorspace == CMYKColorspace) { register IndexPacket *magick_restrict indexes; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannels() sets the number of pixels channels associated with the % image. % % The format of the SetImageChannels method is: % % MagickBooleanType SetImageChannels(Image *image,const size_t channels) % % A description of each parameter follows: % % o image: the image. % % o channels: The number of pixel channels. % */ MagickExport MagickBooleanType SetImageChannels(Image *image, const size_t channels) { image->channels=channels; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image, % const MagickPixelPacket *color) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const MagickPixelPacket *color) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); assert(color != (const MagickPixelPacket *) NULL); image->colorspace=color->colorspace; image->matte=color->matte; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,color,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->storage_class=storage_class; return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C l i p M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageClipMask() associates a clip path with the image. The clip path % must be the same dimensions as the image. Set any pixel component of % the clip path to TransparentOpacity to prevent that corresponding image % pixel component from being updated when SyncAuthenticPixels() is applied. % % The format of the SetImageClipMask method is: % % MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask) % % A description of each parameter follows: % % o image: the image. % % o clip_mask: the image clip path. % */ MagickExport MagickBooleanType SetImageClipMask(Image *image, const Image *clip_mask) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (clip_mask != (const Image *) NULL) if ((clip_mask->columns != image->columns) || (clip_mask->rows != image->rows)) ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename); if (image->clip_mask != (Image *) NULL) image->clip_mask=DestroyImage(image->clip_mask); image->clip_mask=NewImageList(); if (clip_mask == (Image *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception); if (image->clip_mask == (Image *) NULL) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows) { if ((columns == 0) || (rows == 0)) ThrowBinaryImageException(ImageError,"NegativeOrZeroImageSize", image->filename); image->columns=columns; image->rows=rows; if (image->depth == 0) { image->depth=8; (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageError,"ImageDepthNotSupported","`%s'",image->filename); } if (image->depth > (8*sizeof(MagickSizeType))) { image->depth=8*sizeof(MagickSizeType); (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageError,"ImageDepthNotSupported","`%s'",image->filename); } return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the `magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, `ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: `image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char extension[MaxTextExtent], filename[MaxTextExtent], magic[MaxTextExtent], *q, subimage[MaxTextExtent]; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; register const char *p; ssize_t count; unsigned char magick[2*MaxTextExtent]; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *subimage='\0'; GetPathComponent(image_info->filename,SubimagePath,subimage); if (*subimage != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse) { if (IsGeometry(subimage) != MagickFalse) (void) CloneString(&image_info->extract,subimage); } else { size_t first, last; (void) CloneString(&image_info->scenes,subimage); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; image_info->subimage=image_info->scene; image_info->subrange=image_info->number_scenes; } } *extension='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,extension); #if defined(MAGICKCORE_ZLIB_DELEGATE) if (*extension != '\0') if ((LocaleCompare(extension,"gz") == 0) || (LocaleCompare(extension,"Z") == 0) || (LocaleCompare(extension,"svgz") == 0) || (LocaleCompare(extension,"wmz") == 0)) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif #if defined(MAGICKCORE_BZLIB_DELEGATE) if (*extension != '\0') if (LocaleCompare(extension,"bz2") == 0) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if ((*extension != '\0') && (IsGlob(extension) == MagickFalse)) { MagickFormatType format_type; register ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,extension,MaxTextExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } magick_info=GetMagickInfo(magic,sans_exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ } /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MaxTextExtent); magick_info=GetMagickInfo(magic,sans_exception); if (frames == 0) GetPathComponent(image_info->filename,CanonicalPath,filename); else GetPathComponent(image_info->filename,SubcanonicalPath,filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); GetPathComponent(image_info->filename,CanonicalPath,filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,filename); if ((LocaleCompare(filename,image_info->filename) != 0) && (strchr(filename,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { /* Determine the image format from the first few bytes of the file. */ image=AcquireImage(image_info); (void) CopyMagickString(image->filename,image_info->filename, MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy image to a seekable temporary file. */ *filename='\0'; status=ImageToFile(image,filename,exception); (void) CloseBlob(image); if (status == MagickFalse) { (void) RelinquishUniqueFileResource(filename); image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,filename,MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { (void) RelinquishUniqueFileResource(filename); image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); image_info->temporary=MagickTrue; } (void) memset(magick,0,sizeof(magick)); count=ReadBlob(image,2*MaxTextExtent,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic.xml configuration file. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { (void) CopyMagickString(image_info->magick,GetMagicName(magic_info), MaxTextExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const Image *mask) % % A description of each parameter follows: % % o image: the image. % % o mask: the image mask. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (mask != (const Image *) NULL) if ((mask->columns != image->columns) || (mask->rows != image->rows)) ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename); if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); image->mask=NewImageList(); if (mask == (Image *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception); if (image->mask == (Image *) NULL) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e O p a c i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageOpacity() sets the opacity levels of the image. % % The format of the SetImageOpacity method is: % % MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o opacity: the level of transparency: 0 is fully opaque and QuantumRange is % fully transparent. % */ MagickExport MagickBooleanType SetImageOpacity(Image *image, const Quantum opacity) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); image->matte=MagickTrue; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelOpacity(q,opacity); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image, % const VirtualPixelMethod virtual_pixel_method) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image, const VirtualPixelMethod virtual_pixel_method) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; register const PixelPacket *p; register ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; register const PixelPacket *p; register ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" CacheView *smush_view; const Image *image; Image *smush_image; MagickBooleanType matte, proceed, status; MagickOffsetType n; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; matte=image->matte; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->matte != MagickFalse) matte=MagickTrue; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse) { InheritException(exception,&smush_image->exception); smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->matte=matte; (void) SetImageBackgroundColor(smush_image); status=MagickTrue; x_offset=0; y_offset=0; smush_view=AcquireVirtualCacheView(smush_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; smush_view=DestroyCacheView(smush_view); if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType StripImage(Image *image) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline IndexPacket PushColormapIndex(Image *image, const size_t index,MagickBooleanType *range_exception) { if (index < image->colors) return((IndexPacket) index); *range_exception=MagickTrue; return((IndexPacket) 0); } MagickExport MagickBooleanType SyncImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->ping != MagickFalse) return(MagickTrue); if (image->storage_class != PseudoClass) return(MagickFalse); assert(image->colormap != (PixelPacket *) NULL); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(range_exception,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket index; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x), &range_exception); if (image->matte == MagickFalse) SetPixelRgb(q,image->colormap+(ssize_t) index) else SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs image_info options into per-image attributes. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image) { char property[MaxTextExtent]; const char *option, *value; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->background_color, &image->exception); option=GetImageOption(image_info,"bias"); if (option != (const char *) NULL) image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->border_color,&image->exception); option=GetImageOption(image_info,"colors"); if (option != (const char *) NULL) image->colors=StringToUnsignedLong(option); option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { GeometryInfo geometry_info; /* Set image density. */ flags=ParseGeometry(option,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; } option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(InterpolatePixelMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->matte_color,&image->exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->transparent_color, &image->exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); units=image_info->units; if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->x_resolution/=2.54; image->y_resolution/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->x_resolution=(double) ((size_t) (100.0*2.54* image->x_resolution+0.5))/100.0; image->y_resolution=(double) ((size_t) (100.0*2.54* image->y_resolution+0.5))/100.0; } break; } default: break; } image->units=units; option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } } option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y=image->chromaticity.white_point.x; } ResetImageOptionIterator(image_info); for (option=GetNextImageOption(image_info); option != (const char *) NULL; ) { value=GetImageOption(image_info,option); if (value != (const char *) NULL) { (void) FormatLocaleString(property,MaxTextExtent,"%s",option); (void) SetImageArtifact(image,property,value); } option=GetNextImageOption(image_info); } return(MagickTrue); }
c-omp.c
/* This file contains routines to construct OpenACC and OpenMP constructs, called from parsing in the C and C++ front ends. Copyright (C) 2005-2020 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>, Diego Novillo <dnovillo@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "options.h" #include "c-common.h" #include "gimple-expr.h" #include "c-pragma.h" #include "stringpool.h" #include "omp-general.h" #include "gomp-constants.h" #include "memmodel.h" #include "attribs.h" #include "gimplify.h" #include "langhooks.h" /* Complete a #pragma oacc wait construct. LOC is the location of the #pragma. */ tree c_finish_oacc_wait (location_t loc, tree parms, tree clauses) { const int nparms = list_length (parms); tree stmt, t; vec<tree, va_gc> *args; vec_alloc (args, nparms + 2); stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT); if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC)) t = OMP_CLAUSE_ASYNC_EXPR (clauses); else t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC); args->quick_push (t); args->quick_push (build_int_cst (integer_type_node, nparms)); for (t = parms; t; t = TREE_CHAIN (t)) { if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST) args->quick_push (build_int_cst (integer_type_node, TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t)))); else args->quick_push (OMP_CLAUSE_WAIT_EXPR (t)); } stmt = build_call_expr_loc_vec (loc, stmt, args); vec_free (args); return stmt; } /* Complete a #pragma omp master construct. STMT is the structured-block that follows the pragma. LOC is the location of the #pragma. */ tree c_finish_omp_master (location_t loc, tree stmt) { tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt)); SET_EXPR_LOCATION (t, loc); return t; } /* Complete a #pragma omp taskgroup construct. BODY is the structured-block that follows the pragma. LOC is the location of the #pragma. */ tree c_finish_omp_taskgroup (location_t loc, tree body, tree clauses) { tree stmt = make_node (OMP_TASKGROUP); TREE_TYPE (stmt) = void_type_node; OMP_TASKGROUP_BODY (stmt) = body; OMP_TASKGROUP_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* Complete a #pragma omp critical construct. BODY is the structured-block that follows the pragma, NAME is the identifier in the pragma, or null if it was omitted. LOC is the location of the #pragma. */ tree c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses) { gcc_assert (!clauses || OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_HINT); if (name == NULL_TREE && clauses != NULL_TREE && integer_nonzerop (OMP_CLAUSE_HINT_EXPR (clauses))) { error_at (OMP_CLAUSE_LOCATION (clauses), "%<#pragma omp critical%> with %<hint%> clause requires " "a name, except when %<omp_sync_hint_none%> is used"); return error_mark_node; } tree stmt = make_node (OMP_CRITICAL); TREE_TYPE (stmt) = void_type_node; OMP_CRITICAL_BODY (stmt) = body; OMP_CRITICAL_NAME (stmt) = name; OMP_CRITICAL_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* Complete a #pragma omp ordered construct. STMT is the structured-block that follows the pragma. LOC is the location of the #pragma. */ tree c_finish_omp_ordered (location_t loc, tree clauses, tree stmt) { tree t = make_node (OMP_ORDERED); TREE_TYPE (t) = void_type_node; OMP_ORDERED_BODY (t) = stmt; if (!flag_openmp /* flag_openmp_simd */ && (OMP_CLAUSE_CODE (clauses) != OMP_CLAUSE_SIMD || OMP_CLAUSE_CHAIN (clauses))) clauses = build_omp_clause (loc, OMP_CLAUSE_SIMD); OMP_ORDERED_CLAUSES (t) = clauses; SET_EXPR_LOCATION (t, loc); return add_stmt (t); } /* Complete a #pragma omp barrier construct. LOC is the location of the #pragma. */ void c_finish_omp_barrier (location_t loc) { tree x; x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER); x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp taskwait construct. LOC is the location of the pragma. */ void c_finish_omp_taskwait (location_t loc) { tree x; x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT); x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp taskyield construct. LOC is the location of the pragma. */ void c_finish_omp_taskyield (location_t loc) { tree x; x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD); x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC the expression to be implemented atomically is LHS opcode= RHS. For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS opcode= RHS with the new or old content of LHS returned. LOC is the location of the atomic statement. The value returned is either error_mark_node (if the construct was erroneous) or an OMP_ATOMIC* node which should be added to the current statement tree with add_stmt. If TEST is set, avoid calling save_expr or create_tmp_var*. */ tree c_finish_omp_atomic (location_t loc, enum tree_code code, enum tree_code opcode, tree lhs, tree rhs, tree v, tree lhs1, tree rhs1, bool swapped, enum omp_memory_order memory_order, bool test) { tree x, type, addr, pre = NULL_TREE; HOST_WIDE_INT bitpos = 0, bitsize = 0; if (lhs == error_mark_node || rhs == error_mark_node || v == error_mark_node || lhs1 == error_mark_node || rhs1 == error_mark_node) return error_mark_node; /* ??? According to one reading of the OpenMP spec, complex type are supported, but there are no atomic stores for any architecture. But at least icc 9.0 doesn't support complex types here either. And lets not even talk about vector types... */ type = TREE_TYPE (lhs); if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type) && !SCALAR_FLOAT_TYPE_P (type)) { error_at (loc, "invalid expression type for %<#pragma omp atomic%>"); return error_mark_node; } if (TYPE_ATOMIC (type)) { error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>"); return error_mark_node; } if (opcode == RDIV_EXPR) opcode = TRUNC_DIV_EXPR; /* ??? Validate that rhs does not overlap lhs. */ tree blhs = NULL; if (TREE_CODE (lhs) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (lhs, 1)) == FIELD_DECL && DECL_C_BIT_FIELD (TREE_OPERAND (lhs, 1)) && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs, 1))) { tree field = TREE_OPERAND (lhs, 1); tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field); if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)) && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))) bitpos = (tree_to_uhwi (DECL_FIELD_OFFSET (field)) - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT; else bitpos = 0; bitpos += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); gcc_assert (tree_fits_shwi_p (DECL_SIZE (field))); bitsize = tree_to_shwi (DECL_SIZE (field)); blhs = lhs; type = TREE_TYPE (repr); lhs = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs, 0), repr, TREE_OPERAND (lhs, 2)); } /* Take and save the address of the lhs. From then on we'll reference it via indirection. */ addr = build_unary_op (loc, ADDR_EXPR, lhs, false); if (addr == error_mark_node) return error_mark_node; if (!test) addr = save_expr (addr); if (!test && TREE_CODE (addr) != SAVE_EXPR && (TREE_CODE (addr) != ADDR_EXPR || !VAR_P (TREE_OPERAND (addr, 0)))) { /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize it even after unsharing function body. */ tree var = create_tmp_var_raw (TREE_TYPE (addr)); DECL_CONTEXT (var) = current_function_decl; addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL); } tree orig_lhs = lhs; lhs = build_indirect_ref (loc, addr, RO_NULL); tree new_lhs = lhs; if (code == OMP_ATOMIC_READ) { x = build1 (OMP_ATOMIC_READ, type, addr); SET_EXPR_LOCATION (x, loc); OMP_ATOMIC_MEMORY_ORDER (x) = memory_order; if (blhs) x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x, bitsize_int (bitsize), bitsize_int (bitpos)); return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, loc, x, NULL_TREE); } /* There are lots of warnings, errors, and conversions that need to happen in the course of interpreting a statement. Use the normal mechanisms to do this, and then take it apart again. */ if (blhs) { lhs = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), lhs, bitsize_int (bitsize), bitsize_int (bitpos)); if (swapped) rhs = build_binary_op (loc, opcode, rhs, lhs, true); else if (opcode != NOP_EXPR) rhs = build_binary_op (loc, opcode, lhs, rhs, true); opcode = NOP_EXPR; } else if (swapped) { rhs = build_binary_op (loc, opcode, rhs, lhs, true); opcode = NOP_EXPR; } bool save = in_late_binary_op; in_late_binary_op = true; x = build_modify_expr (loc, blhs ? blhs : lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE); in_late_binary_op = save; if (x == error_mark_node) return error_mark_node; if (TREE_CODE (x) == COMPOUND_EXPR) { pre = TREE_OPERAND (x, 0); gcc_assert (TREE_CODE (pre) == SAVE_EXPR || tree_invariant_p (pre)); x = TREE_OPERAND (x, 1); } gcc_assert (TREE_CODE (x) == MODIFY_EXPR); rhs = TREE_OPERAND (x, 1); if (blhs) rhs = build3_loc (loc, BIT_INSERT_EXPR, type, new_lhs, rhs, bitsize_int (bitpos)); /* Punt the actual generation of atomic operations to common code. */ if (code == OMP_ATOMIC) type = void_type_node; x = build2 (code, type, addr, rhs); SET_EXPR_LOCATION (x, loc); OMP_ATOMIC_MEMORY_ORDER (x) = memory_order; /* Generally it is hard to prove lhs1 and lhs are the same memory location, just diagnose different variables. */ if (rhs1 && VAR_P (rhs1) && VAR_P (orig_lhs) && rhs1 != orig_lhs && !test) { if (code == OMP_ATOMIC) error_at (loc, "%<#pragma omp atomic update%> uses two different " "variables for memory"); else error_at (loc, "%<#pragma omp atomic capture%> uses two different " "variables for memory"); return error_mark_node; } if (lhs1 && lhs1 != orig_lhs && TREE_CODE (lhs1) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (lhs1, 1)) == FIELD_DECL && DECL_C_BIT_FIELD (TREE_OPERAND (lhs1, 1)) && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs1, 1))) { tree field = TREE_OPERAND (lhs1, 1); tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field); lhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs1, 0), repr, TREE_OPERAND (lhs1, 2)); } if (rhs1 && rhs1 != orig_lhs && TREE_CODE (rhs1) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (rhs1, 1)) == FIELD_DECL && DECL_C_BIT_FIELD (TREE_OPERAND (rhs1, 1)) && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (rhs1, 1))) { tree field = TREE_OPERAND (rhs1, 1); tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field); rhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (rhs1, 0), repr, TREE_OPERAND (rhs1, 2)); } if (code != OMP_ATOMIC) { /* Generally it is hard to prove lhs1 and lhs are the same memory location, just diagnose different variables. */ if (lhs1 && VAR_P (lhs1) && VAR_P (orig_lhs)) { if (lhs1 != orig_lhs && !test) { error_at (loc, "%<#pragma omp atomic capture%> uses two " "different variables for memory"); return error_mark_node; } } if (blhs) { x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x, bitsize_int (bitsize), bitsize_int (bitpos)); type = TREE_TYPE (blhs); } x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, loc, x, NULL_TREE); if (rhs1 && rhs1 != orig_lhs) { tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false); if (rhs1addr == error_mark_node) return error_mark_node; x = omit_one_operand_loc (loc, type, x, rhs1addr); } if (lhs1 && lhs1 != orig_lhs) { tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false); if (lhs1addr == error_mark_node) return error_mark_node; if (code == OMP_ATOMIC_CAPTURE_OLD) x = omit_one_operand_loc (loc, type, x, lhs1addr); else { if (!test) x = save_expr (x); x = omit_two_operands_loc (loc, type, x, x, lhs1addr); } } } else if (rhs1 && rhs1 != orig_lhs) { tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false); if (rhs1addr == error_mark_node) return error_mark_node; x = omit_one_operand_loc (loc, type, x, rhs1addr); } if (pre) x = omit_one_operand_loc (loc, type, x, pre); return x; } /* Return true if TYPE is the implementation's omp_depend_t. */ bool c_omp_depend_t_p (tree type) { type = TYPE_MAIN_VARIANT (type); return (TREE_CODE (type) == RECORD_TYPE && TYPE_NAME (type) && ((TREE_CODE (TYPE_NAME (type)) == TYPE_DECL ? DECL_NAME (TYPE_NAME (type)) : TYPE_NAME (type)) == get_identifier ("omp_depend_t")) && (!TYPE_CONTEXT (type) || TREE_CODE (TYPE_CONTEXT (type)) == TRANSLATION_UNIT_DECL) && COMPLETE_TYPE_P (type) && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST && !compare_tree_int (TYPE_SIZE (type), 2 * tree_to_uhwi (TYPE_SIZE (ptr_type_node)))); } /* Complete a #pragma omp depobj construct. LOC is the location of the #pragma. */ void c_finish_omp_depobj (location_t loc, tree depobj, enum omp_clause_depend_kind kind, tree clause) { tree t = NULL_TREE; if (!error_operand_p (depobj)) { if (!c_omp_depend_t_p (TREE_TYPE (depobj))) { error_at (EXPR_LOC_OR_LOC (depobj, loc), "type of %<depobj%> expression is not %<omp_depend_t%>"); depobj = error_mark_node; } else if (TYPE_READONLY (TREE_TYPE (depobj))) { error_at (EXPR_LOC_OR_LOC (depobj, loc), "%<const%> qualified %<depobj%> expression"); depobj = error_mark_node; } } else depobj = error_mark_node; if (clause == error_mark_node) return; if (clause) { gcc_assert (TREE_CODE (clause) == OMP_CLAUSE && OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_DEPEND); if (OMP_CLAUSE_CHAIN (clause)) error_at (OMP_CLAUSE_LOCATION (clause), "more than one locator in %<depend%> clause on %<depobj%> " "construct"); switch (OMP_CLAUSE_DEPEND_KIND (clause)) { case OMP_CLAUSE_DEPEND_DEPOBJ: error_at (OMP_CLAUSE_LOCATION (clause), "%<depobj%> dependence type specified in %<depend%> " "clause on %<depobj%> construct"); return; case OMP_CLAUSE_DEPEND_SOURCE: case OMP_CLAUSE_DEPEND_SINK: error_at (OMP_CLAUSE_LOCATION (clause), "%<depend(%s)%> is only allowed in %<omp ordered%>", OMP_CLAUSE_DEPEND_KIND (clause) == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink"); return; case OMP_CLAUSE_DEPEND_IN: case OMP_CLAUSE_DEPEND_OUT: case OMP_CLAUSE_DEPEND_INOUT: case OMP_CLAUSE_DEPEND_MUTEXINOUTSET: kind = OMP_CLAUSE_DEPEND_KIND (clause); t = OMP_CLAUSE_DECL (clause); gcc_assert (t); if (TREE_CODE (t) == TREE_LIST && TREE_PURPOSE (t) && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC) { error_at (OMP_CLAUSE_LOCATION (clause), "%<iterator%> modifier may not be specified on " "%<depobj%> construct"); return; } if (TREE_CODE (t) == COMPOUND_EXPR) { tree t1 = build_fold_addr_expr (TREE_OPERAND (t, 1)); t = build2 (COMPOUND_EXPR, TREE_TYPE (t1), TREE_OPERAND (t, 0), t1); } else t = build_fold_addr_expr (t); break; default: gcc_unreachable (); } } else gcc_assert (kind != OMP_CLAUSE_DEPEND_SOURCE); if (depobj == error_mark_node) return; depobj = build_fold_addr_expr_loc (EXPR_LOC_OR_LOC (depobj, loc), depobj); tree dtype = build_pointer_type_for_mode (ptr_type_node, TYPE_MODE (ptr_type_node), true); depobj = fold_convert (dtype, depobj); tree r; if (clause) { depobj = save_expr (depobj); r = build_indirect_ref (loc, depobj, RO_UNARY_STAR); add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t)); } int k; switch (kind) { case OMP_CLAUSE_DEPEND_IN: k = GOMP_DEPEND_IN; break; case OMP_CLAUSE_DEPEND_OUT: k = GOMP_DEPEND_OUT; break; case OMP_CLAUSE_DEPEND_INOUT: k = GOMP_DEPEND_INOUT; break; case OMP_CLAUSE_DEPEND_MUTEXINOUTSET: k = GOMP_DEPEND_MUTEXINOUTSET; break; case OMP_CLAUSE_DEPEND_LAST: k = -1; break; default: gcc_unreachable (); } t = build_int_cst (ptr_type_node, k); depobj = build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (depobj), depobj, TYPE_SIZE_UNIT (ptr_type_node)); r = build_indirect_ref (loc, depobj, RO_UNARY_STAR); add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t)); } /* Complete a #pragma omp flush construct. We don't do anything with the variable list that the syntax allows. LOC is the location of the #pragma. */ void c_finish_omp_flush (location_t loc, int mo) { tree x; if (mo == MEMMODEL_LAST) { x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE); x = build_call_expr_loc (loc, x, 0); } else { x = builtin_decl_explicit (BUILT_IN_ATOMIC_THREAD_FENCE); x = build_call_expr_loc (loc, x, 1, build_int_cst (integer_type_node, mo)); } add_stmt (x); } /* Check and canonicalize OMP_FOR increment expression. Helper function for c_finish_omp_for. */ static tree check_omp_for_incr_expr (location_t loc, tree exp, tree decl) { tree t; if (!INTEGRAL_TYPE_P (TREE_TYPE (exp)) || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl))) return error_mark_node; if (exp == decl) return build_int_cst (TREE_TYPE (exp), 0); switch (TREE_CODE (exp)) { CASE_CONVERT: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_convert_loc (loc, TREE_TYPE (exp), t); break; case MINUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); break; case PLUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), t); break; case COMPOUND_EXPR: { /* cp_build_modify_expr forces preevaluation of the RHS to make sure that it is evaluated before the lvalue-rvalue conversion is applied to the LHS. Reconstruct the original expression. */ tree op0 = TREE_OPERAND (exp, 0); if (TREE_CODE (op0) == TARGET_EXPR && !VOID_TYPE_P (TREE_TYPE (op0))) { tree op1 = TREE_OPERAND (exp, 1); tree temp = TARGET_EXPR_SLOT (op0); if (BINARY_CLASS_P (op1) && TREE_OPERAND (op1, 1) == temp) { op1 = copy_node (op1); TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0); return check_omp_for_incr_expr (loc, op1, decl); } } break; } default: break; } return error_mark_node; } /* If the OMP_FOR increment expression in INCR is of pointer type, canonicalize it into an expression handled by gimplify_omp_for() and return it. DECL is the iteration variable. */ static tree c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr) { if (POINTER_TYPE_P (TREE_TYPE (decl)) && TREE_OPERAND (incr, 1)) { tree t = fold_convert_loc (loc, sizetype, TREE_OPERAND (incr, 1)); if (TREE_CODE (incr) == POSTDECREMENT_EXPR || TREE_CODE (incr) == PREDECREMENT_EXPR) t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t); t = fold_build_pointer_plus (decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } return incr; } /* Validate and generate OMP_FOR. DECLV is a vector of iteration variables, for each collapsed loop. ORIG_DECLV, if non-NULL, is a vector with the original iteration variables (prior to any transformations, by say, C++ iterators). INITV, CONDV and INCRV are vectors containing initialization expressions, controlling predicates and increment expressions. BODY is the body of the loop and PRE_BODY statements that go before the loop. */ tree c_finish_omp_for (location_t locus, enum tree_code code, tree declv, tree orig_declv, tree initv, tree condv, tree incrv, tree body, tree pre_body, bool final_p) { location_t elocus; bool fail = false; int i; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); /* Validate the iteration variable. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE) { error_at (elocus, "invalid type for iteration variable %qE", decl); fail = true; } else if (TYPE_ATOMIC (TREE_TYPE (decl))) { error_at (elocus, "%<_Atomic%> iteration variable %qE", decl); fail = true; /* _Atomic iterator confuses stuff too much, so we risk ICE trying to diagnose it further. */ continue; } /* In the case of "for (int i = 0...)", init will be a decl. It should have a DECL_INITIAL that we can turn into an assignment. */ if (init == decl) { elocus = DECL_SOURCE_LOCATION (decl); init = DECL_INITIAL (decl); if (init == NULL) { error_at (elocus, "%qE is not initialized", decl); init = integer_zero_node; fail = true; } DECL_INITIAL (decl) = NULL_TREE; init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR, /* FIXME diagnostics: This should be the location of the INIT. */ elocus, init, NULL_TREE); } if (init != error_mark_node) { gcc_assert (TREE_CODE (init) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (init, 0) == decl); } if (cond == NULL_TREE) { error_at (elocus, "missing controlling predicate"); fail = true; } else { bool cond_ok = false; /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with evaluation of the vla VAR_DECL. We need to readd them to the non-decl operand. See PR45784. */ while (TREE_CODE (cond) == COMPOUND_EXPR) cond = TREE_OPERAND (cond, 1); if (EXPR_HAS_LOCATION (cond)) elocus = EXPR_LOCATION (cond); if (TREE_CODE (cond) == LT_EXPR || TREE_CODE (cond) == LE_EXPR || TREE_CODE (cond) == GT_EXPR || TREE_CODE (cond) == GE_EXPR || TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { tree op0 = TREE_OPERAND (cond, 0); tree op1 = TREE_OPERAND (cond, 1); /* 2.5.1. The comparison in the condition is computed in the type of DECL, otherwise the behavior is undefined. For example: long n; int i; i < n; according to ISO will be evaluated as: (long)i < n; We want to force: i < (int)n; */ if (TREE_CODE (op0) == NOP_EXPR && decl == TREE_OPERAND (op0, 0)) { TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); TREE_OPERAND (cond, 1) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 1)); } else if (TREE_CODE (op1) == NOP_EXPR && decl == TREE_OPERAND (op1, 0)) { TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); TREE_OPERAND (cond, 0) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 0)); } if (decl == TREE_OPERAND (cond, 0)) cond_ok = true; else if (decl == TREE_OPERAND (cond, 1)) { TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond))); TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); TREE_OPERAND (cond, 0) = decl; cond_ok = true; } if (TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) { if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR) cond_ok = false; } else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MIN_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? GT_EXPR : LE_EXPR); else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MAX_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? LT_EXPR : GE_EXPR); else if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR) cond_ok = false; } if (cond_ok && TREE_VEC_ELT (condv, i) != cond) { tree ce = NULL_TREE, *pce = &ce; tree type = TREE_TYPE (TREE_OPERAND (cond, 1)); for (tree c = TREE_VEC_ELT (condv, i); c != cond; c = TREE_OPERAND (c, 1)) { *pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0), TREE_OPERAND (cond, 1)); pce = &TREE_OPERAND (*pce, 1); } TREE_OPERAND (cond, 1) = ce; TREE_VEC_ELT (condv, i) = cond; } } if (!cond_ok) { error_at (elocus, "invalid controlling predicate"); fail = true; } } if (incr == NULL_TREE) { error_at (elocus, "missing increment expression"); fail = true; } else { bool incr_ok = false; if (EXPR_HAS_LOCATION (incr)) elocus = EXPR_LOCATION (incr); /* Check all the valid increment expressions: v++, v--, ++v, --v, v = v + incr, v = incr + v and v = v - incr. */ switch (TREE_CODE (incr)) { case POSTINCREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; incr_ok = true; if (!fail && TREE_CODE (cond) == NE_EXPR && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE && TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))) && (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)))) != INTEGER_CST)) { /* For pointer to VLA, transform != into < or > depending on whether incr is increment or decrement. */ if (TREE_CODE (incr) == PREINCREMENT_EXPR || TREE_CODE (incr) == POSTINCREMENT_EXPR) TREE_SET_CODE (cond, LT_EXPR); else TREE_SET_CODE (cond, GT_EXPR); } incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr); break; case COMPOUND_EXPR: if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR) break; incr = TREE_OPERAND (incr, 1); /* FALLTHRU */ case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; if (TREE_OPERAND (incr, 1) == decl) break; if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) incr_ok = true; else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR || (TREE_CODE (TREE_OPERAND (incr, 1)) == POINTER_PLUS_EXPR)) && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) incr_ok = true; else { tree t = check_omp_for_incr_expr (elocus, TREE_OPERAND (incr, 1), decl); if (t != error_mark_node) { incr_ok = true; t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } } if (!fail && incr_ok && TREE_CODE (cond) == NE_EXPR) { tree i = TREE_OPERAND (incr, 1); i = TREE_OPERAND (i, TREE_OPERAND (i, 0) == decl); i = c_fully_fold (i, false, NULL); if (!final_p && TREE_CODE (i) != INTEGER_CST) ; else if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE) { tree unit = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))); if (unit) { enum tree_code ccode = GT_EXPR; unit = c_fully_fold (unit, false, NULL); i = fold_convert (TREE_TYPE (unit), i); if (operand_equal_p (unit, i, 0)) ccode = LT_EXPR; if (ccode == GT_EXPR) { i = fold_unary (NEGATE_EXPR, TREE_TYPE (i), i); if (i == NULL_TREE || !operand_equal_p (unit, i, 0)) { error_at (elocus, "increment is not constant 1 or " "-1 for %<!=%> condition"); fail = true; } } if (TREE_CODE (unit) != INTEGER_CST) /* For pointer to VLA, transform != into < or > depending on whether the pointer is incremented or decremented in each iteration. */ TREE_SET_CODE (cond, ccode); } } else { if (!integer_onep (i) && !integer_minus_onep (i)) { error_at (elocus, "increment is not constant 1 or -1 for" " %<!=%> condition"); fail = true; } } } break; default: break; } if (!incr_ok) { error_at (elocus, "invalid increment expression"); fail = true; } } TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (incrv, i) = incr; } if (fail) return NULL; else { tree t = make_node (code); TREE_TYPE (t) = void_type_node; OMP_FOR_INIT (t) = initv; OMP_FOR_COND (t) = condv; OMP_FOR_INCR (t) = incrv; OMP_FOR_BODY (t) = body; OMP_FOR_PRE_BODY (t) = pre_body; OMP_FOR_ORIG_DECLS (t) = orig_declv; SET_EXPR_LOCATION (t, locus); return t; } } /* Type for passing data in between c_omp_check_loop_iv and c_omp_check_loop_iv_r. */ struct c_omp_check_loop_iv_data { tree declv; bool fail; bool maybe_nonrect; location_t stmt_loc; location_t expr_loc; int kind; int idx; walk_tree_lh lh; hash_set<tree> *ppset; }; /* Return -1 if DECL is not a loop iterator in loop nest D, otherwise return the index of the loop in which it is an iterator. Return TREE_VEC_LENGTH (d->declv) if it is a C++ range for iterator. */ static int c_omp_is_loop_iterator (tree decl, struct c_omp_check_loop_iv_data *d) { for (int i = 0; i < TREE_VEC_LENGTH (d->declv); i++) if (decl == TREE_VEC_ELT (d->declv, i) || (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST && decl == TREE_PURPOSE (TREE_VEC_ELT (d->declv, i)))) return i; else if (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST && TREE_CHAIN (TREE_VEC_ELT (d->declv, i)) && (TREE_CODE (TREE_CHAIN (TREE_VEC_ELT (d->declv, i))) == TREE_VEC) && decl == TREE_VEC_ELT (TREE_CHAIN (TREE_VEC_ELT (d->declv, i)), 2)) return TREE_VEC_LENGTH (d->declv); return -1; } /* Helper function called via walk_tree, to diagnose uses of associated loop IVs inside of lb, b and incr expressions of OpenMP loops. */ static tree c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data) { struct c_omp_check_loop_iv_data *d = (struct c_omp_check_loop_iv_data *) data; if (DECL_P (*tp)) { int idx = c_omp_is_loop_iterator (*tp, d); if (idx == -1) return NULL_TREE; if ((d->kind & 4) && idx < d->idx) { d->maybe_nonrect = true; return NULL_TREE; } if (d->ppset->add (*tp)) return NULL_TREE; location_t loc = d->expr_loc; if (loc == UNKNOWN_LOCATION) loc = d->stmt_loc; switch (d->kind & 3) { case 0: error_at (loc, "initializer expression refers to " "iteration variable %qD", *tp); break; case 1: error_at (loc, "condition expression refers to " "iteration variable %qD", *tp); break; case 2: error_at (loc, "increment expression refers to " "iteration variable %qD", *tp); break; } d->fail = true; } else if (d->ppset->add (*tp)) *walk_subtrees = 0; /* Don't walk dtors added by C++ wrap_cleanups_r. */ else if (TREE_CODE (*tp) == TRY_CATCH_EXPR && TRY_CATCH_IS_CLEANUP (*tp)) { *walk_subtrees = 0; return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data, NULL, d->lh); } return NULL_TREE; } /* Check the allowed expressions for non-rectangular loop nest lb and b expressions. Return the outer var decl referenced in the expression. */ static tree c_omp_check_nonrect_loop_iv (tree *tp, struct c_omp_check_loop_iv_data *d, walk_tree_lh lh) { d->maybe_nonrect = false; if (d->fail) return NULL_TREE; hash_set<tree> pset; hash_set<tree> *ppset = d->ppset; d->ppset = &pset; tree t = *tp; if (TREE_CODE (t) == TREE_VEC && TREE_VEC_LENGTH (t) == 3 && DECL_P (TREE_VEC_ELT (t, 0)) && c_omp_is_loop_iterator (TREE_VEC_ELT (t, 0), d) >= 0) { d->kind &= 3; walk_tree_1 (&TREE_VEC_ELT (t, 1), c_omp_check_loop_iv_r, d, NULL, lh); walk_tree_1 (&TREE_VEC_ELT (t, 1), c_omp_check_loop_iv_r, d, NULL, lh); d->ppset = ppset; return d->fail ? NULL_TREE : TREE_VEC_ELT (t, 0); } while (CONVERT_EXPR_P (t)) t = TREE_OPERAND (t, 0); tree a1 = t, a2 = integer_zero_node; bool neg_a1 = false, neg_a2 = false; switch (TREE_CODE (t)) { case PLUS_EXPR: case MINUS_EXPR: a1 = TREE_OPERAND (t, 0); a2 = TREE_OPERAND (t, 1); while (CONVERT_EXPR_P (a1)) a1 = TREE_OPERAND (a1, 0); while (CONVERT_EXPR_P (a2)) a2 = TREE_OPERAND (a2, 0); if (DECL_P (a1) && c_omp_is_loop_iterator (a1, d) >= 0) { a2 = TREE_OPERAND (t, 1); if (TREE_CODE (t) == MINUS_EXPR) neg_a2 = true; t = a1; break; } if (DECL_P (a2) && c_omp_is_loop_iterator (a2, d) >= 0) { a1 = TREE_OPERAND (t, 0); if (TREE_CODE (t) == MINUS_EXPR) neg_a1 = true; t = a2; a2 = a1; break; } if (TREE_CODE (a1) == MULT_EXPR && TREE_CODE (a2) == MULT_EXPR) { tree o1 = TREE_OPERAND (a1, 0); tree o2 = TREE_OPERAND (a1, 1); while (CONVERT_EXPR_P (o1)) o1 = TREE_OPERAND (o1, 0); while (CONVERT_EXPR_P (o2)) o2 = TREE_OPERAND (o2, 0); if ((DECL_P (o1) && c_omp_is_loop_iterator (o1, d) >= 0) || (DECL_P (o2) && c_omp_is_loop_iterator (o2, d) >= 0)) { a2 = TREE_OPERAND (t, 1); if (TREE_CODE (t) == MINUS_EXPR) neg_a2 = true; t = a1; break; } } if (TREE_CODE (a2) == MULT_EXPR) { a1 = TREE_OPERAND (t, 0); if (TREE_CODE (t) == MINUS_EXPR) neg_a1 = true; t = a2; a2 = a1; break; } if (TREE_CODE (a1) == MULT_EXPR) { a2 = TREE_OPERAND (t, 1); if (TREE_CODE (t) == MINUS_EXPR) neg_a2 = true; t = a1; break; } a2 = integer_zero_node; break; default: break; } a1 = integer_one_node; if (TREE_CODE (t) == MULT_EXPR) { tree o1 = TREE_OPERAND (t, 0); tree o2 = TREE_OPERAND (t, 1); while (CONVERT_EXPR_P (o1)) o1 = TREE_OPERAND (o1, 0); while (CONVERT_EXPR_P (o2)) o2 = TREE_OPERAND (o2, 0); if (DECL_P (o1) && c_omp_is_loop_iterator (o1, d) >= 0) { a1 = TREE_OPERAND (t, 1); t = o1; } else if (DECL_P (o2) && c_omp_is_loop_iterator (o2, d) >= 0) { a1 = TREE_OPERAND (t, 0); t = o2; } } d->kind &= 3; tree ret = NULL_TREE; if (DECL_P (t) && c_omp_is_loop_iterator (t, d) >= 0) { location_t loc = d->expr_loc; if (loc == UNKNOWN_LOCATION) loc = d->stmt_loc; if (!lang_hooks.types_compatible_p (TREE_TYPE (*tp), TREE_TYPE (t))) { if (d->kind == 0) error_at (loc, "outer iteration variable %qD used in initializer" " expression has type other than %qT", t, TREE_TYPE (*tp)); else error_at (loc, "outer iteration variable %qD used in condition" " expression has type other than %qT", t, TREE_TYPE (*tp)); d->fail = true; } else if (!INTEGRAL_TYPE_P (TREE_TYPE (a1))) { error_at (loc, "outer iteration variable %qD multiplier expression" " %qE is not integral", t, a1); d->fail = true; } else if (!INTEGRAL_TYPE_P (TREE_TYPE (a2))) { error_at (loc, "outer iteration variable %qD addend expression" " %qE is not integral", t, a2); d->fail = true; } else { walk_tree_1 (&a1, c_omp_check_loop_iv_r, d, NULL, lh); walk_tree_1 (&a2, c_omp_check_loop_iv_r, d, NULL, lh); } if (!d->fail) { a1 = fold_convert (TREE_TYPE (*tp), a1); a2 = fold_convert (TREE_TYPE (*tp), a2); if (neg_a1) a1 = fold_build1 (NEGATE_EXPR, TREE_TYPE (a1), a1); if (neg_a2) a2 = fold_build1 (NEGATE_EXPR, TREE_TYPE (a2), a2); ret = t; *tp = make_tree_vec (3); TREE_VEC_ELT (*tp, 0) = t; TREE_VEC_ELT (*tp, 1) = a1; TREE_VEC_ELT (*tp, 2) = a2; } } else walk_tree_1 (&t, c_omp_check_loop_iv_r, d, NULL, lh); d->ppset = ppset; return ret; } /* Diagnose invalid references to loop iterators in lb, b and incr expressions. */ bool c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh) { hash_set<tree> pset; struct c_omp_check_loop_iv_data data; int i; data.declv = declv; data.fail = false; data.maybe_nonrect = false; data.stmt_loc = EXPR_LOCATION (stmt); data.lh = lh; data.ppset = &pset; for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++) { tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i); gcc_assert (TREE_CODE (init) == MODIFY_EXPR); tree decl = TREE_OPERAND (init, 0); tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i); gcc_assert (COMPARISON_CLASS_P (cond)); gcc_assert (TREE_OPERAND (cond, 0) == decl); tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i); data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1)); tree vec_outer1 = NULL_TREE, vec_outer2 = NULL_TREE; int kind = 0; if (i > 0 && (unsigned) c_omp_is_loop_iterator (decl, &data) < (unsigned) i) { location_t loc = data.expr_loc; if (loc == UNKNOWN_LOCATION) loc = data.stmt_loc; error_at (loc, "the same loop iteration variables %qD used in " "multiple associated loops", decl); data.fail = true; } /* Handle non-rectangular loop nests. */ if (TREE_CODE (stmt) != OACC_LOOP && (TREE_CODE (TREE_OPERAND (init, 1)) == TREE_VEC || INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (init, 1)))) && i > 0) kind = 4; data.kind = kind; data.idx = i; walk_tree_1 (&TREE_OPERAND (init, 1), c_omp_check_loop_iv_r, &data, NULL, lh); if (data.maybe_nonrect) vec_outer1 = c_omp_check_nonrect_loop_iv (&TREE_OPERAND (init, 1), &data, lh); /* Don't warn for C++ random access iterators here, the expression then involves the subtraction and always refers to the original value. The C++ FE needs to warn on those earlier. */ if (decl == TREE_VEC_ELT (declv, i) || (TREE_CODE (TREE_VEC_ELT (declv, i)) == TREE_LIST && decl == TREE_PURPOSE (TREE_VEC_ELT (declv, i)))) { data.expr_loc = EXPR_LOCATION (cond); data.kind = kind | 1; walk_tree_1 (&TREE_OPERAND (cond, 1), c_omp_check_loop_iv_r, &data, NULL, lh); if (data.maybe_nonrect) vec_outer2 = c_omp_check_nonrect_loop_iv (&TREE_OPERAND (cond, 1), &data, lh); } if (vec_outer1 && vec_outer2 && vec_outer1 != vec_outer2) { location_t loc = data.expr_loc; if (loc == UNKNOWN_LOCATION) loc = data.stmt_loc; error_at (loc, "two different outer iteration variables %qD and %qD" " used in a single loop", vec_outer1, vec_outer2); data.fail = true; } if (vec_outer1 || vec_outer2) OMP_FOR_NON_RECTANGULAR (stmt) = 1; if (TREE_CODE (incr) == MODIFY_EXPR) { gcc_assert (TREE_OPERAND (incr, 0) == decl); incr = TREE_OPERAND (incr, 1); data.kind = 2; if (TREE_CODE (incr) == PLUS_EXPR && TREE_OPERAND (incr, 1) == decl) { data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0)); walk_tree_1 (&TREE_OPERAND (incr, 0), c_omp_check_loop_iv_r, &data, NULL, lh); } else { data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1)); walk_tree_1 (&TREE_OPERAND (incr, 1), c_omp_check_loop_iv_r, &data, NULL, lh); } } } return !data.fail; } /* Similar, but allows to check the init or cond expressions individually. */ bool c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, int i, tree decl, tree init, tree cond, walk_tree_lh lh) { hash_set<tree> pset; struct c_omp_check_loop_iv_data data; data.declv = declv; data.fail = false; data.maybe_nonrect = false; data.stmt_loc = stmt_loc; data.lh = lh; data.ppset = &pset; data.idx = i; if (i > 0 && (unsigned) c_omp_is_loop_iterator (decl, &data) < (unsigned) i) { error_at (stmt_loc, "the same loop iteration variables %qD used in " "multiple associated loops", decl); data.fail = true; } if (init) { data.expr_loc = EXPR_LOCATION (init); data.kind = 0; walk_tree_1 (&init, c_omp_check_loop_iv_r, &data, NULL, lh); } if (cond) { gcc_assert (COMPARISON_CLASS_P (cond)); data.expr_loc = EXPR_LOCATION (init); data.kind = 1; if (TREE_OPERAND (cond, 0) == decl) walk_tree_1 (&TREE_OPERAND (cond, 1), c_omp_check_loop_iv_r, &data, NULL, lh); else walk_tree_1 (&TREE_OPERAND (cond, 0), c_omp_check_loop_iv_r, &data, NULL, lh); } return !data.fail; } /* This function splits clauses for OpenACC combined loop constructs. OpenACC combined loop constructs are: #pragma acc kernels loop #pragma acc parallel loop */ tree c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses, bool is_parallel) { tree next, loop_clauses, nc; loop_clauses = *not_loop_clauses = NULL_TREE; for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { /* Loop clauses. */ case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_TILE: case OMP_CLAUSE_GANG: case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_SEQ: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_PRIVATE: OMP_CLAUSE_CHAIN (clauses) = loop_clauses; loop_clauses = clauses; break; /* Reductions must be duplicated on both constructs. */ case OMP_CLAUSE_REDUCTION: if (is_parallel) { nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (nc) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses; *not_loop_clauses = nc; } OMP_CLAUSE_CHAIN (clauses) = loop_clauses; loop_clauses = clauses; break; /* Parallel/kernels clauses. */ default: OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses; *not_loop_clauses = clauses; break; } } return loop_clauses; } /* This function attempts to split or duplicate clauses for OpenMP combined/composite constructs. Right now there are 30 different constructs. CODE is the innermost construct in the combined construct, and MASK allows to determine which constructs are combined together, as every construct has at least one clause that no other construct has (except for OMP_SECTIONS, but that can be only combined with parallel, and OMP_MASTER, which doesn't have any clauses at all). OpenMP combined/composite constructs are: #pragma omp distribute parallel for #pragma omp distribute parallel for simd #pragma omp distribute simd #pragma omp for simd #pragma omp master taskloop #pragma omp master taskloop simd #pragma omp parallel for #pragma omp parallel for simd #pragma omp parallel loop #pragma omp parallel master #pragma omp parallel master taskloop #pragma omp parallel master taskloop simd #pragma omp parallel sections #pragma omp target parallel #pragma omp target parallel for #pragma omp target parallel for simd #pragma omp target parallel loop #pragma omp target teams #pragma omp target teams distribute #pragma omp target teams distribute parallel for #pragma omp target teams distribute parallel for simd #pragma omp target teams distribute simd #pragma omp target teams loop #pragma omp target simd #pragma omp taskloop simd #pragma omp teams distribute #pragma omp teams distribute parallel for #pragma omp teams distribute parallel for simd #pragma omp teams distribute simd #pragma omp teams loop */ void c_omp_split_clauses (location_t loc, enum tree_code code, omp_clause_mask mask, tree clauses, tree *cclauses) { tree next, c; enum c_omp_clause_split s; int i; for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) cclauses[i] = NULL; /* Add implicit nowait clause on #pragma omp parallel {for,for simd,sections}. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) switch (code) { case OMP_FOR: case OMP_SIMD: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) cclauses[C_OMP_CLAUSE_SPLIT_FOR] = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); break; case OMP_SECTIONS: cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS] = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); break; default: break; } for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { /* First the clauses that are unique to some constructs. */ case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_MAP: case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_DEFAULTMAP: case OMP_CLAUSE_DEPEND: s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_DIST_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_PROC_BIND: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_CLAUSE_ORDERED: s = C_OMP_CLAUSE_SPLIT_FOR; break; case OMP_CLAUSE_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_FOR; if (code != OMP_SIMD) OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0; break; case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_ALIGNED: case OMP_CLAUSE_NONTEMPORAL: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_CLAUSE_GRAINSIZE: case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_PRIORITY: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; case OMP_CLAUSE_BIND: s = C_OMP_CLAUSE_SPLIT_LOOP; break; /* Duplicate this to all of taskloop, distribute, for, simd and loop. */ case OMP_CLAUSE_COLLAPSE: if (code == OMP_SIMD) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = OMP_CLAUSE_COLLAPSE_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } else { /* This must be #pragma omp target simd */ s = C_OMP_CLAUSE_SPLIT_SIMD; break; } } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = OMP_CLAUSE_COLLAPSE_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else s = C_OMP_CLAUSE_SPLIT_FOR; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else if (code == OMP_LOOP) s = C_OMP_CLAUSE_SPLIT_LOOP; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; /* Private clause is supported on all constructs but master, it is enough to put it on the innermost one other than master. For #pragma omp {for,sections} put it on parallel though, as that's what we did for OpenMP 3.1. */ case OMP_CLAUSE_PRIVATE: switch (code) { case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_FOR: case OMP_SECTIONS: case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_MASTER: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_TASKLOOP: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; case OMP_LOOP: s = C_OMP_CLAUSE_SPLIT_LOOP; break; default: gcc_unreachable (); } break; /* Firstprivate clause is supported on all constructs but simd, master and loop. Put it on the outermost of those and duplicate on teams and parallel. */ case OMP_CLAUSE_FIRSTPRIVATE: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { if (code == OMP_SIMD && (mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0) { /* This must be #pragma omp target simd. */ s = C_OMP_CLAUSE_SPLIT_TARGET; break; } c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) s = C_OMP_CLAUSE_SPLIT_TEAMS; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) /* This must be #pragma omp parallel master taskloop{, simd}. */ s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else /* This must be #pragma omp parallel{, for{, simd}, sections,loop} or #pragma omp target parallel. */ s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { /* This must be one of #pragma omp {,target }teams {distribute,loop} #pragma omp target teams #pragma omp {,target }teams distribute simd. */ gcc_assert (code == OMP_DISTRIBUTE || code == OMP_LOOP || code == OMP_TEAMS || code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_TEAMS; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { /* This must be #pragma omp distribute simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { /* This must be #pragma omp {,{,parallel }master }taskloop simd or #pragma omp {,parallel }master taskloop. */ gcc_assert (code == OMP_SIMD || code == OMP_TASKLOOP); s = C_OMP_CLAUSE_SPLIT_TASKLOOP; } else { /* This must be #pragma omp for simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_FOR; } break; /* Lastprivate is allowed on distribute, for, sections, taskloop, loop and simd. In parallel {for{, simd},sections} we actually want to put it on parallel rather than for or sections. */ case OMP_CLAUSE_LASTPRIVATE: if (code == OMP_DISTRIBUTE) { s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses); cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c; } if (code == OMP_FOR || code == OMP_SECTIONS) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; break; } if (code == OMP_TASKLOOP) { s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; } if (code == OMP_LOOP) { s = C_OMP_CLAUSE_SPLIT_LOOP; break; } gcc_assert (code == OMP_SIMD); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; OMP_CLAUSE_CHAIN (c) = cclauses[s]; cclauses[s] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP]; cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c; } s = C_OMP_CLAUSE_SPLIT_SIMD; break; /* Shared and default clauses are allowed on parallel, teams and taskloop. */ case OMP_CLAUSE_SHARED: case OMP_CLAUSE_DEFAULT: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_CODE (clauses)); if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); else OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_KIND (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; } s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) { s = C_OMP_CLAUSE_SPLIT_TEAMS; break; } c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_CODE (clauses)); if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); else OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_KIND (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; } s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; /* order clauses are allowed on for, simd and loop. */ case OMP_CLAUSE_ORDER: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if (code == OMP_SIMD) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_ORDER); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; s = C_OMP_CLAUSE_SPLIT_SIMD; } else s = C_OMP_CLAUSE_SPLIT_FOR; } else if (code == OMP_LOOP) s = C_OMP_CLAUSE_SPLIT_LOOP; else s = C_OMP_CLAUSE_SPLIT_SIMD; break; /* Reduction is allowed on simd, for, parallel, sections, taskloop, teams and loop. Duplicate it on all of them, but omit on for or sections if parallel is present (unless inscan, in that case omit on parallel). If taskloop or loop is combined with parallel, omit it on parallel. */ case OMP_CLAUSE_REDUCTION: if (OMP_CLAUSE_REDUCTION_TASK (clauses)) { if (code == OMP_SIMD || code == OMP_LOOP) { error_at (OMP_CLAUSE_LOCATION (clauses), "invalid %<task%> reduction modifier on construct " "combined with %<simd%> or %<loop%>"); OMP_CLAUSE_REDUCTION_TASK (clauses) = 0; } else if (code != OMP_SECTIONS && (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0 && (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) == 0) { error_at (OMP_CLAUSE_LOCATION (clauses), "invalid %<task%> reduction modifier on construct " "not combined with %<parallel%>, %<for%> or " "%<sections%>"); OMP_CLAUSE_REDUCTION_TASK (clauses) = 0; } } if (OMP_CLAUSE_REDUCTION_INSCAN (clauses) && ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))) != 0)) { error_at (OMP_CLAUSE_LOCATION (clauses), "%<inscan%> %<reduction%> clause on construct other " "than %<for%>, %<simd%>, %<for simd%>, " "%<parallel for%>, %<parallel for simd%>"); OMP_CLAUSE_REDUCTION_INSCAN (clauses) = 0; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if (code == OMP_SIMD) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_INSCAN (c) = OMP_CLAUSE_REDUCTION_INSCAN (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_INSCAN (c) = OMP_CLAUSE_REDUCTION_INSCAN (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0 && !OMP_CLAUSE_REDUCTION_INSCAN (clauses)) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; } else if (code == OMP_SECTIONS || code == OMP_PARALLEL || code == OMP_MASTER) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else if (code == OMP_TASKLOOP) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else if (code == OMP_LOOP) s = C_OMP_CLAUSE_SPLIT_LOOP; else if (code == OMP_SIMD) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_INSCAN (c) = OMP_CLAUSE_REDUCTION_INSCAN (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP]; cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c; } s = C_OMP_CLAUSE_SPLIT_SIMD; } else s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_IN_REDUCTION: /* in_reduction on taskloop simd becomes reduction on the simd and keeps being in_reduction on taskloop. */ if (code == OMP_SIMD) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; case OMP_CLAUSE_IF: if (OMP_CLAUSE_IF_MODIFIER (clauses) != ERROR_MARK) { s = C_OMP_CLAUSE_SPLIT_COUNT; switch (OMP_CLAUSE_IF_MODIFIER (clauses)) { case OMP_PARALLEL: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_SIMD: if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_TASKLOOP: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; case OMP_TARGET: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) s = C_OMP_CLAUSE_SPLIT_TARGET; break; default: break; } if (s != C_OMP_CLAUSE_SPLIT_COUNT) break; /* Error-recovery here, invalid if-modifier specified, add the clause to just one construct. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) s = C_OMP_CLAUSE_SPLIT_TARGET; else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; else gcc_unreachable (); break; } /* Otherwise, duplicate if clause to all constructs. */ if (code == OMP_SIMD) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_IF); OMP_CLAUSE_IF_MODIFIER (c) = OMP_CLAUSE_IF_MODIFIER (clauses); OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } else { s = C_OMP_CLAUSE_SPLIT_SIMD; break; } } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_IF); OMP_CLAUSE_IF_MODIFIER (c) = OMP_CLAUSE_IF_MODIFIER (clauses); OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP]; cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c; s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else s = C_OMP_CLAUSE_SPLIT_TASKLOOP; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_IF); OMP_CLAUSE_IF_MODIFIER (c) = OMP_CLAUSE_IF_MODIFIER (clauses); OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_LINEAR: /* Linear clause is allowed on simd and for. Put it on the innermost construct. */ if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; else s = C_OMP_CLAUSE_SPLIT_FOR; break; case OMP_CLAUSE_NOWAIT: /* Nowait clause is allowed on target, for and sections, but is not allowed on parallel for or parallel sections. Therefore, put it on target construct if present, because that can only be combined with parallel for{, simd} and not with for{, simd}, otherwise to the worksharing construct. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) s = C_OMP_CLAUSE_SPLIT_TARGET; else s = C_OMP_CLAUSE_SPLIT_FOR; break; default: gcc_unreachable (); } OMP_CLAUSE_CHAIN (clauses) = cclauses[s]; cclauses[s] = clauses; } if (!flag_checking) return; if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE); if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0 && code != OMP_SECTIONS && code != OMP_LOOP) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE); if (code != OMP_SIMD) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE); } /* qsort callback to compare #pragma omp declare simd clauses. */ static int c_omp_declare_simd_clause_cmp (const void *p, const void *q) { tree a = *(const tree *) p; tree b = *(const tree *) q; if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b)) { if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b)) return -1; return 1; } if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH) { int c = tree_to_shwi (OMP_CLAUSE_DECL (a)); int d = tree_to_shwi (OMP_CLAUSE_DECL (b)); if (c < d) return 1; if (c > d) return -1; } return 0; } /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd CLAUSES on FNDECL into argument indexes and sort them. */ tree c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses) { tree c; vec<tree> clvec = vNULL; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) { tree decl = OMP_CLAUSE_DECL (c); tree arg; int idx; for (arg = parms, idx = 0; arg; arg = TREE_CHAIN (arg), idx++) if (arg == decl) break; if (arg == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a function argument", decl); continue; } OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) { decl = OMP_CLAUSE_LINEAR_STEP (c); for (arg = parms, idx = 0; arg; arg = TREE_CHAIN (arg), idx++) if (arg == decl) break; if (arg == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a function argument", decl); continue; } OMP_CLAUSE_LINEAR_STEP (c) = build_int_cst (integer_type_node, idx); } } clvec.safe_push (c); } if (!clvec.is_empty ()) { unsigned int len = clvec.length (), i; clvec.qsort (c_omp_declare_simd_clause_cmp); clauses = clvec[0]; for (i = 0; i < len; i++) OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE; } else clauses = NULL_TREE; clvec.release (); return clauses; } /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */ void c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses) { tree c; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) { int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i; tree arg; for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; arg = TREE_CHAIN (arg), i++) if (i == idx) break; gcc_assert (arg); OMP_CLAUSE_DECL (c) = arg; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) { idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c)); for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; arg = TREE_CHAIN (arg), i++) if (i == idx) break; gcc_assert (arg); OMP_CLAUSE_LINEAR_STEP (c) = arg; } } } /* Return true for __func__ and similar function-local predefined variables (which are in OpenMP predetermined shared, allowed in shared/firstprivate clauses). */ bool c_omp_predefined_variable (tree decl) { if (VAR_P (decl) && DECL_ARTIFICIAL (decl) && TREE_READONLY (decl) && TREE_STATIC (decl) && DECL_NAME (decl) && (DECL_NAME (decl) == ridpointers[RID_C99_FUNCTION_NAME] || DECL_NAME (decl) == ridpointers[RID_FUNCTION_NAME] || DECL_NAME (decl) == ridpointers[RID_PRETTY_FUNCTION_NAME])) return true; return false; } /* OMP_CLAUSE_DEFAULT_UNSPECIFIED unless OpenMP sharing attribute of DECL is predetermined. */ enum omp_clause_default_kind c_omp_predetermined_sharing (tree decl) { /* Predetermine artificial variables holding integral values, those are usually result of gimplify_one_sizepos or SAVE_EXPR gimplification. */ if (VAR_P (decl) && DECL_ARTIFICIAL (decl) && INTEGRAL_TYPE_P (TREE_TYPE (decl))) return OMP_CLAUSE_DEFAULT_SHARED; if (c_omp_predefined_variable (decl)) return OMP_CLAUSE_DEFAULT_SHARED; return OMP_CLAUSE_DEFAULT_UNSPECIFIED; } /* OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED unless OpenMP mapping attribute of DECL is predetermined. */ enum omp_clause_defaultmap_kind c_omp_predetermined_mapping (tree decl) { /* Predetermine artificial variables holding integral values, those are usually result of gimplify_one_sizepos or SAVE_EXPR gimplification. */ if (VAR_P (decl) && DECL_ARTIFICIAL (decl) && INTEGRAL_TYPE_P (TREE_TYPE (decl))) return OMP_CLAUSE_DEFAULTMAP_FIRSTPRIVATE; if (c_omp_predefined_variable (decl)) return OMP_CLAUSE_DEFAULTMAP_TO; return OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED; } /* Diagnose errors in an OpenMP context selector, return CTX if it is correct or error_mark_node otherwise. */ tree c_omp_check_context_selector (location_t loc, tree ctx) { /* Each trait-set-selector-name can only be specified once. There are just 4 set names. */ for (tree t1 = ctx; t1; t1 = TREE_CHAIN (t1)) for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2)) if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2)) { error_at (loc, "selector set %qs specified more than once", IDENTIFIER_POINTER (TREE_PURPOSE (t1))); return error_mark_node; } for (tree t = ctx; t; t = TREE_CHAIN (t)) { /* Each trait-selector-name can only be specified once. */ if (list_length (TREE_VALUE (t)) < 5) { for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1)) for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2)) if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2)) { error_at (loc, "selector %qs specified more than once in set %qs", IDENTIFIER_POINTER (TREE_PURPOSE (t1)), IDENTIFIER_POINTER (TREE_PURPOSE (t))); return error_mark_node; } } else { hash_set<tree> pset; for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1)) if (pset.add (TREE_PURPOSE (t1))) { error_at (loc, "selector %qs specified more than once in set %qs", IDENTIFIER_POINTER (TREE_PURPOSE (t1)), IDENTIFIER_POINTER (TREE_PURPOSE (t))); return error_mark_node; } } static const char *const kind[] = { "host", "nohost", "cpu", "gpu", "fpga", "any", NULL }; static const char *const vendor[] = { "amd", "arm", "bsc", "cray", "fujitsu", "gnu", "ibm", "intel", "llvm", "nvidia", "pgi", "ti", "unknown", NULL }; static const char *const extension[] = { NULL }; static const char *const atomic_default_mem_order[] = { "seq_cst", "relaxed", "acq_rel", NULL }; struct known_properties { const char *set; const char *selector; const char *const *props; }; known_properties props[] = { { "device", "kind", kind }, { "implementation", "vendor", vendor }, { "implementation", "extension", extension }, { "implementation", "atomic_default_mem_order", atomic_default_mem_order } }; for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1)) for (unsigned i = 0; i < ARRAY_SIZE (props); i++) if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t1)), props[i].selector) && !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t)), props[i].set)) for (tree t2 = TREE_VALUE (t1); t2; t2 = TREE_CHAIN (t2)) for (unsigned j = 0; ; j++) { if (props[i].props[j] == NULL) { if (TREE_PURPOSE (t2) && !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)), " score")) break; if (props[i].props == atomic_default_mem_order) { error_at (loc, "incorrect property %qs of %qs selector", IDENTIFIER_POINTER (TREE_PURPOSE (t2)), "atomic_default_mem_order"); return error_mark_node; } else if (TREE_PURPOSE (t2)) warning_at (loc, 0, "unknown property %qs of %qs selector", IDENTIFIER_POINTER (TREE_PURPOSE (t2)), props[i].selector); else warning_at (loc, 0, "unknown property %qE of %qs selector", TREE_VALUE (t2), props[i].selector); break; } else if (TREE_PURPOSE (t2) == NULL_TREE) { const char *str = TREE_STRING_POINTER (TREE_VALUE (t2)); if (!strcmp (str, props[i].props[j]) && ((size_t) TREE_STRING_LENGTH (TREE_VALUE (t2)) == strlen (str) + 1)) break; } else if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)), props[i].props[j])) break; } } return ctx; } /* Register VARIANT as variant of some base function marked with #pragma omp declare variant. CONSTRUCT is corresponding construct selector set. */ void c_omp_mark_declare_variant (location_t loc, tree variant, tree construct) { tree attr = lookup_attribute ("omp declare variant variant", DECL_ATTRIBUTES (variant)); if (attr == NULL_TREE) { attr = tree_cons (get_identifier ("omp declare variant variant"), unshare_expr (construct), DECL_ATTRIBUTES (variant)); DECL_ATTRIBUTES (variant) = attr; return; } if ((TREE_VALUE (attr) != NULL_TREE) != (construct != NULL_TREE) || (construct != NULL_TREE && omp_context_selector_set_compare ("construct", TREE_VALUE (attr), construct))) error_at (loc, "%qD used as a variant with incompatible %<construct%> " "selector sets", variant); } /* For OpenACC, the OMP_CLAUSE_MAP_KIND of an OMP_CLAUSE_MAP is used internally to distinguish clauses as seen by the user. Return the "friendly" clause name for error messages etc., where possible. See also c/c-parser.c:c_parser_oacc_data_clause and cp/parser.c:cp_parser_oacc_data_clause. */ const char * c_omp_map_clause_name (tree clause, bool oacc) { if (oacc && OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_MAP) switch (OMP_CLAUSE_MAP_KIND (clause)) { case GOMP_MAP_FORCE_ALLOC: case GOMP_MAP_ALLOC: return "create"; case GOMP_MAP_FORCE_TO: case GOMP_MAP_TO: return "copyin"; case GOMP_MAP_FORCE_FROM: case GOMP_MAP_FROM: return "copyout"; case GOMP_MAP_FORCE_TOFROM: case GOMP_MAP_TOFROM: return "copy"; case GOMP_MAP_RELEASE: return "delete"; case GOMP_MAP_FORCE_PRESENT: return "present"; case GOMP_MAP_ATTACH: return "attach"; case GOMP_MAP_FORCE_DETACH: case GOMP_MAP_DETACH: return "detach"; case GOMP_MAP_DEVICE_RESIDENT: return "device_resident"; case GOMP_MAP_LINK: return "link"; case GOMP_MAP_FORCE_DEVICEPTR: return "deviceptr"; default: break; } return omp_clause_code_name[OMP_CLAUSE_CODE (clause)]; }
TaskQueueRange.h
// -*- C++ -*- /*! \file TaskQueueRange.h \brief Partitioning the elements of a regular grid with a BSP tree. */ #if !defined(__concurrent_partition_TaskQueueRange_h__) #define __concurrent_partition_TaskQueueRange_h__ namespace concurrent { //! A task queue for a static range of jobs. /*! I tested this class using GCC 4.2 on a 1.66 GHz Intel Core Duo. I examined tasks with a range of costs. The task cost is measured in evaluations of the sine function. The execution times below are measured in milliseconds per task. <table> <tr> <th> Task Cost <th> 0 <th> 1 <th> 10 <th> 100 <tr> <th> 2 Threads <td> 11.3 <td> 10.8 <td> 1.25 <td> 7.90 <tr> <th> 1 Thread <td> 0.07 <td> 0.16 <td> 0.85 <td> 7.63 <tr> <th> Serial <td> 0 <td> 0.08 <td> 0.77 <td> 7.56 </table> When the tasks are very inexpensive (0 or 1 evaluations of the sine function) the contention for the tasks exacts a heavy penalty (about 11 milliseconds per task). For larger tasks (100 sine evaluations) queueing the tasks incurs a negligible overhead. For medium-sized tasks (10 sine evaluations) queueing the tasks incurs a significant overhead (about half the cost of the task). */ template < typename _ForwardIterator = int > class TaskQueueRange { // // Public types. // typedef _ForwardIterator Iterator; // // Member variables. // private: Iterator _iterator; Iterator _begin; Iterator _end; //-------------------------------------------------------------------------- //! \name Constructors etc. //@{ public: //! Default constructor. Empty task queue. TaskQueueRange() : _iterator(), _begin(), _end() {} //! Construct from the iterator range. TaskQueueRange(const Iterator begin, const Iterator end) : _iterator(begin), _begin(begin), _end(end) {} //! Destructor. ~TaskQueueRange() {} //@} //-------------------------------------------------------------------------- //! \name Accessors. //@{ public: //! Return the beginning of the index range. Iterator getBeginning() const { return _begin; } //! Return the end of the index range. Iterator getEnd() const { return _end; } //@} //-------------------------------------------------------------------------- //! \name Manipulators. //@{ public: //! Pop a task of the queue. /*! This function is thread-safe. */ Iterator pop() { Iterator result; #pragma omp critical if (_iterator != _end) { result = _iterator; ++_iterator; } else { result = _end; } return result; } //! Reset the index to the beginning of the range. /*! \note This function is not thread-safe. */ void reset() { _iterator = _begin; } //@} }; } // namespace concurrent #endif
binary_tree.c
// The Computer Language Benchmarks Game // https://salsa.debian.org/benchmarksgame-team/benchmarksgame/ // // Contributed by Jeremy Zerfas // Based on the C++ program from Jon Harrop, Alex Mizrahi, and Bruno Coutinho. // *reset* // This controls the width of lines that are output by this program. #define MAXIMUM_LINE_WIDTH 60 #include <stdint.h> #include <stdlib.h> #include <stdio.h> typedef off_t off64_t; // This is needed to keep APR happy on 32 bit systems. #include <apr-1/apr_pools.h> // intptr_t should be the native integer type on most sane systems. typedef intptr_t intnative_t; typedef struct tree_node{ struct tree_node * left_Node, * right_Node; } tree_node; // Create a binary tree of depth tree_Depth in memory_Pool, set the root node's // value to root_Node_Value, and finally return a pointer to the created binary // tree. static inline tree_node * create_Tree(const intnative_t tree_Depth, apr_pool_t * const memory_Pool){ tree_node * const root_Node=apr_palloc(memory_Pool, sizeof(tree_node)); // If tree_Depth is one or more then recursively call create_Tree() in order // to create the left and right subtrees using 2*root_Node_Value-1 and // 2*root_Node_Value respectively as the root values for those subtrees. if(tree_Depth>0){ root_Node->left_Node=create_Tree(tree_Depth-1, memory_Pool); root_Node->right_Node=create_Tree(tree_Depth-1, memory_Pool); }else root_Node->left_Node=root_Node->right_Node=NULL; return root_Node; } // Compute and return the checksum for the binary tree that has root_Node as the // root node. static inline intnative_t compute_Tree_Checksum( const tree_node * const root_Node){ // If there are subtrees then recursively call compute_Tree_Checksum() on // them and factor their values into the checksum, otherwise just return // the value of root_Node. if(root_Node->left_Node) return compute_Tree_Checksum(root_Node->left_Node)+ compute_Tree_Checksum(root_Node->right_Node)+1; else return 1; } int main(int argc, char ** argv){ // Set minimum_Tree_Depth to 4 and maximum_Tree_Depth to the maximum of what // was specified as the argument to the program and minimum_Tree_Depth+2. const intnative_t minimum_Tree_Depth=4; intnative_t maximum_Tree_Depth=atoi(argv[1]); if(maximum_Tree_Depth < minimum_Tree_Depth+2) maximum_Tree_Depth=minimum_Tree_Depth+2; apr_initialize(); apr_pool_t * memory_Pool; // Create a memory pool, create a binary tree of depth maximum_Tree_Depth+1, // compute the checksum of the binary tree, print the statistics, and then // delete the memory pool. apr_pool_create_unmanaged(&memory_Pool); tree_node * stretch_Tree=create_Tree(maximum_Tree_Depth+1, memory_Pool); printf("stretch tree of depth %jd\t check: %jd\n", (intmax_t)maximum_Tree_Depth+1, (intmax_t)compute_Tree_Checksum(stretch_Tree)); apr_pool_destroy(memory_Pool); // Create a memory pool and then create a long-lived binary tree of depth // maximum_Tree_Depth which will be left alone for a while while // more binary trees get allocated and deallocaited as required by the // rules. We'll finish working with this later. apr_pool_create_unmanaged(&memory_Pool); tree_node * long_Lived_Tree=create_Tree(maximum_Tree_Depth, memory_Pool); // Create a lot of binary trees in parallel of depths ranging from // minimum_Tree_Depth to maximum_Tree_Depth, compute and tally up all their // checksums, destroy the trees, and then record the statistics to // output_Buffer[] so they can be displayed in order later. char output_Buffer[maximum_Tree_Depth+1][MAXIMUM_LINE_WIDTH+1]; intnative_t current_Tree_Depth; #pragma omp parallel for for(current_Tree_Depth=minimum_Tree_Depth; current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2){ intnative_t iterations=1<<(maximum_Tree_Depth-current_Tree_Depth+ minimum_Tree_Depth); // Create a memory pool for this thread to use. apr_pool_t * thread_Memory_Pool; apr_pool_create_unmanaged(&thread_Memory_Pool); intnative_t i=1, total_Trees_Checksum=0; for(; i<=iterations; ++i){ // Create a binary tree of depth current_Tree_Depth tree_node * const tree_1=create_Tree(current_Tree_Depth, thread_Memory_Pool); total_Trees_Checksum+=compute_Tree_Checksum(tree_1); apr_pool_clear(thread_Memory_Pool); } apr_pool_destroy(thread_Memory_Pool); // Record the statistics for the trees of depth current_Tree_Depth. sprintf(output_Buffer[current_Tree_Depth], "%jd\t trees of depth %jd\t check: %jd\n", (intmax_t)iterations, (intmax_t)current_Tree_Depth, (intmax_t)total_Trees_Checksum); } // Print the statistics for all of the various tree depths. for(current_Tree_Depth=minimum_Tree_Depth; current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2) printf("%s", output_Buffer[current_Tree_Depth]); // Compute the checksum of the long-lived binary tree that we created // earlier, print the statistics, and then delete the memory pool. printf("long lived tree of depth %jd\t check: %jd\n", (intmax_t)maximum_Tree_Depth, (intmax_t)compute_Tree_Checksum(long_Lived_Tree)); apr_pool_destroy(memory_Pool); apr_terminate(); return 0; }
parallel_for.h
/* Copyright (c) 2016, Taiga Nomi All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. */ #pragma once #include <cassert> #include <cstdio> #include <limits> #include <string> #include <type_traits> #include <vector> #include "aligned_allocator.h" #include "nn_error.h" #include "tiny_dnn/config.h" #ifdef CNN_USE_TBB #ifndef NOMINMAX #define NOMINMAX // tbb includes windows.h in tbb/machine/windows_api.h #endif #include <tbb/task_group.h> #include <tbb/tbb.h> #endif #if !defined(CNN_USE_OMP) && !defined(CNN_SINGLE_THREAD) #include <future> #include <thread> #endif #if defined(CNN_USE_GCD) && !defined(CNN_SINGLE_THREAD) #include <dispatch/dispatch.h> #endif namespace tiny_dnn { #ifdef CNN_USE_TBB static tbb::task_scheduler_init tbbScheduler( tbb::task_scheduler_init::automatic); // tbb::task_scheduler_init::deferred); typedef tbb::blocked_range<int> blocked_range; template <typename Func> void parallel_for(int begin, int end, const Func &f, int grainsize) { tbb::parallel_for( blocked_range(begin, end, end - begin > grainsize ? grainsize : 1), f); } template <typename Func> void xparallel_for(int begin, int end, const Func &f) { f(blocked_range(begin, end, 100)); } #else struct blocked_range { typedef int const_iterator; blocked_range(int begin, int end) : begin_(begin), end_(end) {} blocked_range(size_t begin, size_t end) : begin_(static_cast<int>(begin)), end_(static_cast<int>(end)) {} const_iterator begin() const { return begin_; } const_iterator end() const { return end_; } private: int begin_; int end_; }; template <typename Func> void xparallel_for(size_t begin, size_t end, const Func &f) { blocked_range r(begin, end); f(r); } #if defined(CNN_USE_OMP) template <typename Func> void parallel_for(int begin, int end, const Func &f, int /*grainsize*/) { #pragma omp parallel for for (int i = begin; i < end; ++i) f(blocked_range(i, i + 1)); } #elif defined(CNN_USE_GCD) template <typename Func> void parallel_for(int begin, int end, const Func &f, int grainsize) { int count = end - begin; int blockSize = grainsize; if (count < blockSize || blockSize == 0) { blockSize = 1; } int blockCount = (count + blockSize - 1) / blockSize; assert(blockCount > 0); dispatch_apply(blockCount, dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0), ^(size_t block) { int blockStart = static_cast<int>(block * blockSize); int blockEnd = blockStart + blockSize; if (blockEnd > end) { blockEnd = end; } assert(blockStart < blockEnd); f(blocked_range(blockStart, blockEnd)); }); } #elif defined(CNN_SINGLE_THREAD) template <typename Func> void parallel_for(int begin, int end, const Func &f, int /*grainsize*/) { xparallel_for(static_cast<size_t>(begin), static_cast<size_t>(end), f); } #else template <typename Func> void parallel_for(int start, int end, const Func &f, int /*grainsize*/) { int nthreads = std::thread::hardware_concurrency(); int blockSize = (end - start) / nthreads; if (blockSize * nthreads < end - start) blockSize++; std::vector<std::future<void> > futures; int blockStart = start; int blockEnd = blockStart + blockSize; if (blockEnd > end) blockEnd = end; for (int i = 0; i < nthreads; i++) { futures.push_back( std::move(std::async(std::launch::async, [blockStart, blockEnd, &f] { f(blocked_range(blockStart, blockEnd)); }))); blockStart += blockSize; blockEnd = blockStart + blockSize; if (blockStart >= end) break; if (blockEnd > end) blockEnd = end; } for (auto &future : futures) future.wait(); } #endif #endif // CNN_USE_TBB template <typename T, typename U> bool value_representation(U const &value) { return static_cast<U>(static_cast<T>(value)) == value; } template <typename T, typename Func> inline void for_(std::true_type, bool parallelize, int begin, T end, Func f, int grainsize = 100) { parallelize = parallelize && value_representation<int>(end); parallelize ? parallel_for(begin, static_cast<int>(end), f, grainsize) : xparallel_for(begin, static_cast<int>(end), f); } template <typename T, typename Func> inline void for_(std::false_type, bool parallelize, int begin, T end, Func f, int grainsize = 100) { parallelize ? parallel_for(begin, static_cast<int>(end), f, grainsize) : xparallel_for(begin, end, f); } template <typename T, typename Func> inline void for_( bool parallelize, int begin, T end, Func f, int grainsize = 100) { static_assert(std::is_integral<T>::value, "end must be integral type"); for_(typename std::is_unsigned<T>::type(), parallelize, begin, end, f, grainsize); } template <typename T, typename Func> void for_i(bool parallelize, T size, Func f, int grainsize = 100) { #ifdef CNN_SINGLE_THREAD parallelize = false; #endif for_(parallelize, 0, size, [&](const blocked_range &r) { #ifdef CNN_USE_OMP #pragma omp parallel for #endif for (int i = r.begin(); i < r.end(); i++) f(i); }, grainsize); } template <typename T, typename Func> void for_i(T size, Func f, int grainsize = 100) { for_i(true, size, f, grainsize); } } // namespace tiny_dnn
Matriplex.h
#ifndef Matriplex_H #define Matriplex_H #include "MatriplexCommon.h" namespace Matriplex { //------------------------------------------------------------------------------ template<typename T, idx_t D1, idx_t D2, idx_t N> class Matriplex { public: typedef T value_type; enum { /// return no. of matrix rows kRows = D1, /// return no. of matrix columns kCols = D2, /// return no of elements: rows*columns kSize = D1 * D2, /// size of the whole matriplex kTotSize = N * kSize }; T fArray[kTotSize] __attribute__((aligned(64))); Matriplex() {} Matriplex(T v) { SetVal(v); } idx_t PlexSize() const { return N; } void SetVal(T v) { for (idx_t i = 0; i < kTotSize; ++i) { fArray[i] = v; } } void Add(const Matriplex &v) { for (idx_t i = 0; i < kTotSize; ++i) { fArray[i] += v.fArray[i]; } } void Scale(T scale) { for (idx_t i = 0; i < kTotSize; ++i) { fArray[i] *= scale; } } T operator[](idx_t xx) const { return fArray[xx]; } T& operator[](idx_t xx) { return fArray[xx]; } const T& ConstAt(idx_t n, idx_t i, idx_t j) const { return fArray[(i * D2 + j) * N + n]; } T& At(idx_t n, idx_t i, idx_t j) { return fArray[(i * D2 + j) * N + n]; } T& operator()(idx_t n, idx_t i, idx_t j) { return fArray[(i * D2 + j) * N + n]; } const T& operator()(idx_t n, idx_t i, idx_t j) const { return fArray[(i * D2 + j) * N + n]; } Matriplex& operator=(const Matriplex& m) { memcpy(fArray, m.fArray, sizeof(T) * kTotSize); return *this; } void CopySlot(idx_t n, const Matriplex& m) { for (idx_t i = n; i < kTotSize; i += N) { fArray[i] = m.fArray[i]; } } void CopyIn(idx_t n, const T *arr) { for (idx_t i = n; i < kTotSize; i += N) { fArray[i] = *(arr++); } } void CopyIn(idx_t n, const Matriplex& m, idx_t in) { for (idx_t i = n; i < kTotSize; i += N, in += N) { fArray[i] = m[in]; } } void Copy(idx_t n, idx_t in) { for (idx_t i = n; i < kTotSize; i += N, in += N) { fArray[i] = fArray[in]; } } #if defined(AVX512_INTRINSICS) template<typename U> void SlurpIn(const T *arr, __m512i& vi, const U&, const int N_proc = N) { //_mm512_prefetch_i32gather_ps(vi, arr, 1, _MM_HINT_T0); const __m512 src = { 0 }; const __mmask16 k = N_proc == N ? -1 : (1 << N_proc) - 1; for (int i = 0; i < kSize; ++i, ++arr) { //_mm512_prefetch_i32gather_ps(vi, arr+2, 1, _MM_HINT_NTA); __m512 reg = _mm512_mask_i32gather_ps(src, k, vi, arr, sizeof(U)); _mm512_mask_store_ps(&fArray[i*N], k, reg); } } /* // Experimental methods, SlurpIn() seems to be at least as fast. // See comments in mkFit/MkFitter.cc MkFitter::AddBestHit(). void ChewIn(const char *arr, int off, int vi[N], const char *tmp, __m512i& ui) { // This is a hack ... we know sizeof(Hit) = 64 = cache line = vector width. for (int i = 0; i < N; ++i) { __m512 reg = _mm512_load_ps(arr + vi[i]); _mm512_store_ps((void*) (tmp + 64*i), reg); } for (int i = 0; i < kSize; ++i) { __m512 reg = _mm512_i32gather_ps(ui, tmp + off + i*sizeof(T), 1); _mm512_store_ps(&fArray[i*N], reg); } } void Contaginate(const char *arr, int vi[N], const char *tmp) { // This is a hack ... we know sizeof(Hit) = 64 = cache line = vector width. for (int i = 0; i < N; ++i) { __m512 reg = _mm512_load_ps(arr + vi[i]); _mm512_store_ps((void*) (tmp + 64*i), reg); } } void Plexify(const char *tmp, __m512i& ui) { for (int i = 0; i < kSize; ++i) { __m512 reg = _mm512_i32gather_ps(ui, tmp + i*sizeof(T), 1); _mm512_store_ps(&fArray[i*N], reg); } } */ #elif defined(AVX2_INTRINSICS) template<typename U> void SlurpIn(const T *arr, __m256i& vi, const U&, const int N_proc = N) { // Casts to float* needed to "support" also T=HitOnTrack. // Not needed for AVX_512 (?). // This (and AVX_512 version) will be a mess if one uses T with sizeof(T) != 4. // But really, we do not need to use Matriplexes for HitOnTrack vectors // [as well as for some other things in MkFitter/Finder but at least those // do not use SlurpIn]. // Do we need specializations for float / double / HitOnTrack? const __m256 src = { 0 }; __m256i k = _mm256_setr_epi32( 0, 1, 2, 3, 4, 5, 6, 7 ); __m256i k_sel = _mm256_set1_epi32(N_proc); __m256i k_master = _mm256_cmpgt_epi32(k_sel, k); k = k_master; for (int i = 0; i < kSize; ++i, ++arr) { __m256 reg = _mm256_mask_i32gather_ps(src, (float*) arr, vi, (__m256) k, sizeof(U)); // Restore mask (docs say gather clears it but it doesn't seem to). k = k_master; _mm256_maskstore_ps((float*) &fArray[i*N], k, reg); } } #else void SlurpIn(const T *arr, int vi[N], const int N_proc = N) { // Separate N_proc == N case (gains about 7% in fit test). if (N_proc == N) { for (int i = 0; i < kSize; ++i) { // Next loop vectorizes with "#pragma ivdep", but it runs slower // #pragma ivdep for (int j = 0; j < N; ++j) { fArray[i*N + j] = * (arr + i + vi[j]); } } } else { for (int i = 0; i < kSize; ++i) { for (int j = 0; j < N_proc; ++j) { fArray[i*N + j] = * (arr + i + vi[j]); } } } } #endif void CopyOut(idx_t n, T *arr) const { for (idx_t i = n; i < kTotSize; i += N) { *(arr++) = fArray[i]; } } }; template<typename T, idx_t D1, idx_t D2, idx_t N> using MPlex = Matriplex<T, D1, D2, N>; //============================================================================== // Multiplications //============================================================================== template<typename T, idx_t D1, idx_t D2, idx_t D3, idx_t N> void MultiplyGeneral(const MPlex<T, D1, D2, N>& A, const MPlex<T, D2, D3, N>& B, MPlex<T, D1, D3, N>& C) { for (idx_t i = 0; i < D1; ++i) { for (idx_t j = 0; j < D3; ++j) { const idx_t ijo = N * (i * D3 + j); for (idx_t n = 0; n < N; ++n) { C.fArray[ijo + n] = 0; } //#pragma omp simd collapse(2) for (idx_t k = 0; k < D2; ++k) { const idx_t iko = N * (i * D2 + k); const idx_t kjo = N * (k * D3 + j); #pragma omp simd for (idx_t n = 0; n < N; ++n) { // C.fArray[i, j, n] += A.fArray[i, k, n] * B.fArray[k, j, n]; C.fArray[ijo + n] += A.fArray[iko + n] * B.fArray[kjo + n]; } } } } } //------------------------------------------------------------------------------ template<typename T, idx_t D, idx_t N> struct MultiplyCls { static void Multiply(const MPlex<T, D, D, N>& A, const MPlex<T, D, D, N>& B, MPlex<T, D, D, N>& C) { throw std::runtime_error("general multiplication not supported, well, call MultiplyGeneral()"); } }; template<typename T, idx_t N> struct MultiplyCls<T, 3, N> { static void Multiply(const MPlex<T, 3, 3, N>& A, const MPlex<T, 3, 3, N>& B, MPlex<T, 3, 3, N>& C) { const T *a = A.fArray; ASSUME_ALIGNED(a, 64); const T *b = B.fArray; ASSUME_ALIGNED(b, 64); T *c = C.fArray; ASSUME_ALIGNED(c, 64); #pragma omp simd for (idx_t n = 0; n < N; ++n) { c[ 0*N+n] = a[ 0*N+n]*b[ 0*N+n] + a[ 1*N+n]*b[ 3*N+n] + a[ 2*N+n]*b[ 6*N+n]; c[ 1*N+n] = a[ 0*N+n]*b[ 1*N+n] + a[ 1*N+n]*b[ 4*N+n] + a[ 2*N+n]*b[ 7*N+n]; c[ 2*N+n] = a[ 0*N+n]*b[ 2*N+n] + a[ 1*N+n]*b[ 5*N+n] + a[ 2*N+n]*b[ 8*N+n]; c[ 3*N+n] = a[ 3*N+n]*b[ 0*N+n] + a[ 4*N+n]*b[ 3*N+n] + a[ 5*N+n]*b[ 6*N+n]; c[ 4*N+n] = a[ 3*N+n]*b[ 1*N+n] + a[ 4*N+n]*b[ 4*N+n] + a[ 5*N+n]*b[ 7*N+n]; c[ 5*N+n] = a[ 3*N+n]*b[ 2*N+n] + a[ 4*N+n]*b[ 5*N+n] + a[ 5*N+n]*b[ 8*N+n]; c[ 6*N+n] = a[ 6*N+n]*b[ 0*N+n] + a[ 7*N+n]*b[ 3*N+n] + a[ 8*N+n]*b[ 6*N+n]; c[ 7*N+n] = a[ 6*N+n]*b[ 1*N+n] + a[ 7*N+n]*b[ 4*N+n] + a[ 8*N+n]*b[ 7*N+n]; c[ 8*N+n] = a[ 6*N+n]*b[ 2*N+n] + a[ 7*N+n]*b[ 5*N+n] + a[ 8*N+n]*b[ 8*N+n]; } } }; template<typename T, idx_t N> struct MultiplyCls<T, 6, N> { static void Multiply(const MPlex<T, 6, 6, N>& A, const MPlex<T, 6, 6, N>& B, MPlex<T, 6, 6, N>& C) { const T *a = A.fArray; ASSUME_ALIGNED(a, 64); const T *b = B.fArray; ASSUME_ALIGNED(b, 64); T *c = C.fArray; ASSUME_ALIGNED(c, 64); #pragma omp simd for (idx_t n = 0; n < N; ++n) { c[ 0*N+n] = a[ 0*N+n]*b[ 0*N+n] + a[ 1*N+n]*b[ 6*N+n] + a[ 2*N+n]*b[12*N+n] + a[ 3*N+n]*b[18*N+n] + a[ 4*N+n]*b[24*N+n] + a[ 5*N+n]*b[30*N+n]; c[ 1*N+n] = a[ 0*N+n]*b[ 1*N+n] + a[ 1*N+n]*b[ 7*N+n] + a[ 2*N+n]*b[13*N+n] + a[ 3*N+n]*b[19*N+n] + a[ 4*N+n]*b[25*N+n] + a[ 5*N+n]*b[31*N+n]; c[ 2*N+n] = a[ 0*N+n]*b[ 2*N+n] + a[ 1*N+n]*b[ 8*N+n] + a[ 2*N+n]*b[14*N+n] + a[ 3*N+n]*b[20*N+n] + a[ 4*N+n]*b[26*N+n] + a[ 5*N+n]*b[32*N+n]; c[ 3*N+n] = a[ 0*N+n]*b[ 3*N+n] + a[ 1*N+n]*b[ 9*N+n] + a[ 2*N+n]*b[15*N+n] + a[ 3*N+n]*b[21*N+n] + a[ 4*N+n]*b[27*N+n] + a[ 5*N+n]*b[33*N+n]; c[ 4*N+n] = a[ 0*N+n]*b[ 4*N+n] + a[ 1*N+n]*b[10*N+n] + a[ 2*N+n]*b[16*N+n] + a[ 3*N+n]*b[22*N+n] + a[ 4*N+n]*b[28*N+n] + a[ 5*N+n]*b[34*N+n]; c[ 5*N+n] = a[ 0*N+n]*b[ 5*N+n] + a[ 1*N+n]*b[11*N+n] + a[ 2*N+n]*b[17*N+n] + a[ 3*N+n]*b[23*N+n] + a[ 4*N+n]*b[29*N+n] + a[ 5*N+n]*b[35*N+n]; c[ 6*N+n] = a[ 6*N+n]*b[ 0*N+n] + a[ 7*N+n]*b[ 6*N+n] + a[ 8*N+n]*b[12*N+n] + a[ 9*N+n]*b[18*N+n] + a[10*N+n]*b[24*N+n] + a[11*N+n]*b[30*N+n]; c[ 7*N+n] = a[ 6*N+n]*b[ 1*N+n] + a[ 7*N+n]*b[ 7*N+n] + a[ 8*N+n]*b[13*N+n] + a[ 9*N+n]*b[19*N+n] + a[10*N+n]*b[25*N+n] + a[11*N+n]*b[31*N+n]; c[ 8*N+n] = a[ 6*N+n]*b[ 2*N+n] + a[ 7*N+n]*b[ 8*N+n] + a[ 8*N+n]*b[14*N+n] + a[ 9*N+n]*b[20*N+n] + a[10*N+n]*b[26*N+n] + a[11*N+n]*b[32*N+n]; c[ 9*N+n] = a[ 6*N+n]*b[ 3*N+n] + a[ 7*N+n]*b[ 9*N+n] + a[ 8*N+n]*b[15*N+n] + a[ 9*N+n]*b[21*N+n] + a[10*N+n]*b[27*N+n] + a[11*N+n]*b[33*N+n]; c[10*N+n] = a[ 6*N+n]*b[ 4*N+n] + a[ 7*N+n]*b[10*N+n] + a[ 8*N+n]*b[16*N+n] + a[ 9*N+n]*b[22*N+n] + a[10*N+n]*b[28*N+n] + a[11*N+n]*b[34*N+n]; c[11*N+n] = a[ 6*N+n]*b[ 5*N+n] + a[ 7*N+n]*b[11*N+n] + a[ 8*N+n]*b[17*N+n] + a[ 9*N+n]*b[23*N+n] + a[10*N+n]*b[29*N+n] + a[11*N+n]*b[35*N+n]; c[12*N+n] = a[12*N+n]*b[ 0*N+n] + a[13*N+n]*b[ 6*N+n] + a[14*N+n]*b[12*N+n] + a[15*N+n]*b[18*N+n] + a[16*N+n]*b[24*N+n] + a[17*N+n]*b[30*N+n]; c[13*N+n] = a[12*N+n]*b[ 1*N+n] + a[13*N+n]*b[ 7*N+n] + a[14*N+n]*b[13*N+n] + a[15*N+n]*b[19*N+n] + a[16*N+n]*b[25*N+n] + a[17*N+n]*b[31*N+n]; c[14*N+n] = a[12*N+n]*b[ 2*N+n] + a[13*N+n]*b[ 8*N+n] + a[14*N+n]*b[14*N+n] + a[15*N+n]*b[20*N+n] + a[16*N+n]*b[26*N+n] + a[17*N+n]*b[32*N+n]; c[15*N+n] = a[12*N+n]*b[ 3*N+n] + a[13*N+n]*b[ 9*N+n] + a[14*N+n]*b[15*N+n] + a[15*N+n]*b[21*N+n] + a[16*N+n]*b[27*N+n] + a[17*N+n]*b[33*N+n]; c[16*N+n] = a[12*N+n]*b[ 4*N+n] + a[13*N+n]*b[10*N+n] + a[14*N+n]*b[16*N+n] + a[15*N+n]*b[22*N+n] + a[16*N+n]*b[28*N+n] + a[17*N+n]*b[34*N+n]; c[17*N+n] = a[12*N+n]*b[ 5*N+n] + a[13*N+n]*b[11*N+n] + a[14*N+n]*b[17*N+n] + a[15*N+n]*b[23*N+n] + a[16*N+n]*b[29*N+n] + a[17*N+n]*b[35*N+n]; c[18*N+n] = a[18*N+n]*b[ 0*N+n] + a[19*N+n]*b[ 6*N+n] + a[20*N+n]*b[12*N+n] + a[21*N+n]*b[18*N+n] + a[22*N+n]*b[24*N+n] + a[23*N+n]*b[30*N+n]; c[19*N+n] = a[18*N+n]*b[ 1*N+n] + a[19*N+n]*b[ 7*N+n] + a[20*N+n]*b[13*N+n] + a[21*N+n]*b[19*N+n] + a[22*N+n]*b[25*N+n] + a[23*N+n]*b[31*N+n]; c[20*N+n] = a[18*N+n]*b[ 2*N+n] + a[19*N+n]*b[ 8*N+n] + a[20*N+n]*b[14*N+n] + a[21*N+n]*b[20*N+n] + a[22*N+n]*b[26*N+n] + a[23*N+n]*b[32*N+n]; c[21*N+n] = a[18*N+n]*b[ 3*N+n] + a[19*N+n]*b[ 9*N+n] + a[20*N+n]*b[15*N+n] + a[21*N+n]*b[21*N+n] + a[22*N+n]*b[27*N+n] + a[23*N+n]*b[33*N+n]; c[22*N+n] = a[18*N+n]*b[ 4*N+n] + a[19*N+n]*b[10*N+n] + a[20*N+n]*b[16*N+n] + a[21*N+n]*b[22*N+n] + a[22*N+n]*b[28*N+n] + a[23*N+n]*b[34*N+n]; c[23*N+n] = a[18*N+n]*b[ 5*N+n] + a[19*N+n]*b[11*N+n] + a[20*N+n]*b[17*N+n] + a[21*N+n]*b[23*N+n] + a[22*N+n]*b[29*N+n] + a[23*N+n]*b[35*N+n]; c[24*N+n] = a[24*N+n]*b[ 0*N+n] + a[25*N+n]*b[ 6*N+n] + a[26*N+n]*b[12*N+n] + a[27*N+n]*b[18*N+n] + a[28*N+n]*b[24*N+n] + a[29*N+n]*b[30*N+n]; c[25*N+n] = a[24*N+n]*b[ 1*N+n] + a[25*N+n]*b[ 7*N+n] + a[26*N+n]*b[13*N+n] + a[27*N+n]*b[19*N+n] + a[28*N+n]*b[25*N+n] + a[29*N+n]*b[31*N+n]; c[26*N+n] = a[24*N+n]*b[ 2*N+n] + a[25*N+n]*b[ 8*N+n] + a[26*N+n]*b[14*N+n] + a[27*N+n]*b[20*N+n] + a[28*N+n]*b[26*N+n] + a[29*N+n]*b[32*N+n]; c[27*N+n] = a[24*N+n]*b[ 3*N+n] + a[25*N+n]*b[ 9*N+n] + a[26*N+n]*b[15*N+n] + a[27*N+n]*b[21*N+n] + a[28*N+n]*b[27*N+n] + a[29*N+n]*b[33*N+n]; c[28*N+n] = a[24*N+n]*b[ 4*N+n] + a[25*N+n]*b[10*N+n] + a[26*N+n]*b[16*N+n] + a[27*N+n]*b[22*N+n] + a[28*N+n]*b[28*N+n] + a[29*N+n]*b[34*N+n]; c[29*N+n] = a[24*N+n]*b[ 5*N+n] + a[25*N+n]*b[11*N+n] + a[26*N+n]*b[17*N+n] + a[27*N+n]*b[23*N+n] + a[28*N+n]*b[29*N+n] + a[29*N+n]*b[35*N+n]; c[30*N+n] = a[30*N+n]*b[ 0*N+n] + a[31*N+n]*b[ 6*N+n] + a[32*N+n]*b[12*N+n] + a[33*N+n]*b[18*N+n] + a[34*N+n]*b[24*N+n] + a[35*N+n]*b[30*N+n]; c[31*N+n] = a[30*N+n]*b[ 1*N+n] + a[31*N+n]*b[ 7*N+n] + a[32*N+n]*b[13*N+n] + a[33*N+n]*b[19*N+n] + a[34*N+n]*b[25*N+n] + a[35*N+n]*b[31*N+n]; c[32*N+n] = a[30*N+n]*b[ 2*N+n] + a[31*N+n]*b[ 8*N+n] + a[32*N+n]*b[14*N+n] + a[33*N+n]*b[20*N+n] + a[34*N+n]*b[26*N+n] + a[35*N+n]*b[32*N+n]; c[33*N+n] = a[30*N+n]*b[ 3*N+n] + a[31*N+n]*b[ 9*N+n] + a[32*N+n]*b[15*N+n] + a[33*N+n]*b[21*N+n] + a[34*N+n]*b[27*N+n] + a[35*N+n]*b[33*N+n]; c[34*N+n] = a[30*N+n]*b[ 4*N+n] + a[31*N+n]*b[10*N+n] + a[32*N+n]*b[16*N+n] + a[33*N+n]*b[22*N+n] + a[34*N+n]*b[28*N+n] + a[35*N+n]*b[34*N+n]; c[35*N+n] = a[30*N+n]*b[ 5*N+n] + a[31*N+n]*b[11*N+n] + a[32*N+n]*b[17*N+n] + a[33*N+n]*b[23*N+n] + a[34*N+n]*b[29*N+n] + a[35*N+n]*b[35*N+n]; } } }; template<typename T, idx_t D, idx_t N> void Multiply(const MPlex<T, D, D, N>& A, const MPlex<T, D, D, N>& B, MPlex<T, D, D, N>& C) { // printf("Multipl %d %d\n", D, N); MultiplyCls<T, D, N>::Multiply(A, B, C); } //============================================================================== // Cramer inversion //============================================================================== template<typename T, idx_t D, idx_t N> struct CramerInverter { static void Invert(MPlex<T, D, D, N>& A, double *determ=0) { throw std::runtime_error("general cramer inversion not supported"); } }; template<typename T, idx_t N> struct CramerInverter<T, 2, N> { static void Invert(MPlex<T, 2, 2, N>& A, double *determ=0) { typedef T TT; T *a = A.fArray; ASSUME_ALIGNED(a, 64); #pragma omp simd for (idx_t n = 0; n < N; ++n) { //const TT det = a[0*N+n] * a[3*N+n] - a[2*N+n] * a[1*N+n]; const double det = (double)a[0*N+n] * a[3*N+n] - (double)a[2*N+n] * a[1*N+n]; //if (determ) //determ[n] = det; const TT s = TT(1) / det; const TT tmp = s * a[3*N + n]; a[1*N+n] *= -s; a[2*N+n] *= -s; a[3*N+n] = s * a[0*N+n]; a[0*N+n] = tmp; } } }; template<typename T, idx_t N> struct CramerInverter<T, 3, N> { static void Invert(MPlex<T, 3, 3, N>& A, double *determ=0) { typedef T TT; T *a = A.fArray; ASSUME_ALIGNED(a, 64); #pragma omp simd for (idx_t n = 0; n < N; ++n) { const TT c00 = a[4*N+n] * a[8*N+n] - a[5*N+n] * a[7*N+n]; const TT c01 = a[5*N+n] * a[6*N+n] - a[3*N+n] * a[8*N+n]; const TT c02 = a[3*N+n] * a[7*N+n] - a[4*N+n] * a[6*N+n]; const TT c10 = a[7*N+n] * a[2*N+n] - a[8*N+n] * a[1*N+n]; const TT c11 = a[8*N+n] * a[0*N+n] - a[6*N+n] * a[2*N+n]; const TT c12 = a[6*N+n] * a[1*N+n] - a[7*N+n] * a[0*N+n]; const TT c20 = a[1*N+n] * a[5*N+n] - a[2*N+n] * a[4*N+n]; const TT c21 = a[2*N+n] * a[3*N+n] - a[0*N+n] * a[5*N+n]; const TT c22 = a[0*N+n] * a[4*N+n] - a[1*N+n] * a[3*N+n]; const TT det = a[0*N+n] * c00 + a[1*N+n] * c01 + a[2*N+n] * c02; //if (determ) // *determ[n] = det; const TT s = TT(1) / det; a[0*N+n] = s*c00; a[1*N+n] = s*c10; a[2*N+n] = s*c20; a[3*N+n] = s*c01; a[4*N+n] = s*c11; a[5*N+n] = s*c21; a[6*N+n] = s*c02; a[7*N+n] = s*c12; a[8*N+n] = s*c22; } } }; template<typename T, idx_t D, idx_t N> void InvertCramer(MPlex<T, D, D, N>& A, double *determ=0) { // We don't do general Inverts. CramerInverter<T, D, N>::Invert(A, determ); } //============================================================================== // Cholesky inversion //============================================================================== template<typename T, idx_t D, idx_t N> struct CholeskyInverter { static void Invert(MPlex<T, D, D, N>& A) { throw std::runtime_error("general cholesky inversion not supported"); } }; template<typename T, idx_t N> struct CholeskyInverter<T, 3, N> { // Remember, this only works on symmetric matrices! // Optimized version for positive definite matrices, no checks. // Also, use as little locals as possible. // This gives: host x 5.8 (instead of 4.7x) // mic x17.7 (instead of 8.5x)) static void Invert(MPlex<T, 3, 3, N>& A) { typedef T TT; T *a = A.fArray; ASSUME_ALIGNED(a, 64); #pragma omp simd for (idx_t n = 0; n < N; ++n) { TT l0 = std::sqrt(T(1) / a[0*N+n]); TT l1 = a[3*N+n] * l0; TT l2 = a[4*N+n] - l1 * l1; l2 = std::sqrt(T(1) / l2); TT l3 = a[6*N+n] * l0; TT l4 = (a[7*N+n] - l1 * l3) * l2; TT l5 = a[8*N+n] - (l3 * l3 + l4 * l4); l5 = std::sqrt(T(1) / l5); // decomposition done l3 = (l1 * l4 * l2 - l3) * l0 * l5; l1 = -l1 * l0 * l2; l4 = -l4 * l2 * l5; a[0*N+n] = l3*l3 + l1*l1 + l0*l0; a[1*N+n] = a[3*N+n] = l3*l4 + l1*l2; a[4*N+n] = l4*l4 + l2*l2; a[2*N+n] = a[6*N+n] = l3*l5; a[5*N+n] = a[7*N+n] = l4*l5; a[8*N+n] = l5*l5; // m(2,x) are all zero if anything went wrong at l5. // all zero, if anything went wrong already for l0 or l2. } } }; template<typename T, idx_t D, idx_t N> void InvertCholesky(MPlex<T, D, D, N>& A) { CholeskyInverter<T, D, N>::Invert(A); } } #endif
clip_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: qtang@openailab.com */ #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include "clip_param.h" #include <math.h> int ref_clip_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float max, float min, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = src[i]; if (dst[i] > max) dst[i] = max; if (dst[i] < min) dst[i] = min; } } return 0; } int ref_clip_uint8(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float max, float min, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; uint8_t* input_uint8 = ( uint8_t* )input_tensor->data; uint8_t* output_uint8 = ( uint8_t* )output_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; int input_zero = input_tensor->zero_point; int output_zero = output_tensor->zero_point; /* input dequant */ float* input_fp32 = ( float* )sys_malloc(input_tensor->elem_num * sizeof(float)); float* output_fp32 = ( float* )sys_malloc(output_tensor->elem_num * sizeof(float)); for (int i = 0; i < input_tensor->elem_num; i++) input_fp32[i] = (input_uint8[i] - input_zero) * input_scale; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_fp32 + c_step * q; float* dst = output_fp32 + c_step * q; for (int i = 0; i < size; i++) { dst[i] = src[i]; if (dst[i] > max) dst[i] = max; if (dst[i] < min) dst[i] = min; } } /* output quant */ for (int i = 0; i < output_tensor->elem_num; i++) { int output_data = round(output_fp32[i] / output_scale) + output_zero; output_uint8[i] = output_data > 255 ? 255 : output_data; } sys_free(input_fp32); sys_free(output_fp32); return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; int layout = ir_graph->graph_layout; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct clip_param* clip_param = ( struct clip_param* )ir_node->op.param_mem; int in_size = input_tensor->elem_num; float max = clip_param->max; float min = clip_param->min; int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_clip_fp32(input_tensor, output_tensor, max, min, exec_graph->num_thread); else ret = ref_clip_uint8(input_tensor, output_tensor, max, min, exec_graph->num_thread); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_clip_hcl_ops(void* arg) { return register_builtin_node_ops(OP_CLIP, &hcl_node_ops); } static int unreg_clip_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_CLIP, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_clip_hcl_ops); AUTO_UNREGISTER_OPS(unreg_clip_hcl_ops);
cross_correlate_2d.c
// MIT License // // Copyright (c) 2021 Florian // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include <stdio.h> #include <stdlib.h> void cross_correlate_2d( const size_t *s_i, const double *input, const size_t *s_k, const double *kernel, const size_t *s_o, double *output) { #pragma omp parallel #pragma omp for for (int i = 0; i < s_o[0]; ++i) { for (int j = 0; j < s_o[1]; ++j) { output[i * s_o[1] + j] = 0; for (int k = 0; k < s_k[0]; ++k) { for (int l = 0; l < s_k[1]; ++l) { output[i * s_o[1] + j] += input[(i + k) * s_i[1] + j + l] * kernel[k * s_k[1] + l]; } } } } }
conv_kernel.c
/* * Copyright (C) 2015-2020 ETH Zurich and University of Bologna * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdint.h> #include "pulp.h" /* Inputs, weights and outputs are represented in fixed-point Q1.7 unsigned format: this means that each integer in [0-255] represents a real value in the range [0.0-1.0) The relationship between the integer I and real R representations is given by R = I * 2^-FRACTIONARY_BITS */ #define FRACTIONARY_BITS 7 #define ROUNDBIT (1 << (FRACTIONARY_BITS -1)) #define SATURATION 255 // K = 3 void __attribute__ ((noinline)) ConvKxK_Naive (uint8_t * In_Img, uint8_t * Out_Img, int R, int lb, int ub, int C, uint8_t * Kernel, int K) { int r, c, k, i, j, w, t; uint8_t coeff; uint8_t data; int S; //image board is black #pragma omp parallel for for (r=lb; r < ub; r++) { for (c=3/2; c < C-3/2; c++) { S = 0; t = r*R + c; //move in the window /* Coordinate window (-1;-1) (-1;0) (-1;+1) ( 0;-1) ( 0;0) ( 0;+1) (+1;-1) (+1;0) (+1;+1) */ for (i = -K/2; i <= K/2; i++) { for (j = -K/2; j <= K/2; j++) { k = (r+i)*R + (c+j); //coeff for one dimension matrix data = In_Img[k]; w = (i+1)*K + (j+1); coeff = Kernel[w]; S = S + (int)(coeff*data); } } // Normalization: Data are Q2.2*(FRACTIONARY_BITS-1), now Q2.FRACTIONARY_BITS-1 S = S >> FRACTIONARY_BITS; // Saturation S = S > SATURATION ? SATURATION : S; S = S < 0 ? 0 : S; Out_Img[t] = (uint8_t)(S); } } }
GB_binop__land_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_bool) // A.*B function (eWiseMult): GB (_AemultB_08__land_bool) // A.*B function (eWiseMult): GB (_AemultB_02__land_bool) // A.*B function (eWiseMult): GB (_AemultB_04__land_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_bool) // A*D function (colscale): GB (_AxD__land_bool) // D*A function (rowscale): GB (_DxB__land_bool) // C+=B function (dense accum): GB (_Cdense_accumB__land_bool) // C+=b function (dense accum): GB (_Cdense_accumb__land_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_bool) // C=scalar+B GB (_bind1st__land_bool) // C=scalar+B' GB (_bind1st_tran__land_bool) // C=A+scalar GB (_bind2nd__land_bool) // C=A'+scalar GB (_bind2nd_tran__land_bool) // C type: bool // A type: bool // A pattern? 0 // B type: bool // B pattern? 0 // BinaryOp: cij = (aij && bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ bool aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ bool bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x && y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_BOOL || GxB_NO_LAND_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__land_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_bool) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; bool alpha_scalar ; bool beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((bool *) alpha_scalar_in)) ; beta_scalar = (*((bool *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = GBX (Bx, p, false) ; Cx [p] = (x && bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = GBX (Ax, p, false) ; Cx [p] = (aij && y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x && aij) ; \ } GrB_Info GB (_bind1st_tran__land_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij && y) ; \ } GrB_Info GB (_bind2nd_tran__land_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
FourOP.h
#ifndef FOUROP_H_ #define FOUROP_H_ /* * FourOP.h: * a simple feed forward neural operation, four variable input. * * Created on: June 11, 2017 * Author: mszhang */ #include "Param.h" #include "MyLib.h" #include "Node.h" #include "Graph.h" class FourParams { public: Param W1; Param W2; Param W3; Param W4; Param b; bool bUseB; public: FourParams() { bUseB = true; } inline void exportAdaParams(ModelUpdate& ada) { ada.addParam(&W1); ada.addParam(&W2); ada.addParam(&W3); ada.addParam(&W4); if (bUseB) { ada.addParam(&b); } } inline void initial(int nOSize, int nISize1, int nISize2, int nISize3, int nISize4, bool useB = true) { W1.initial(nOSize, nISize1); W2.initial(nOSize, nISize2); W3.initial(nOSize, nISize3); W4.initial(nOSize, nISize4); bUseB = useB; if (bUseB) { b.initial(nOSize, 1); } } inline void save(std::ofstream &os) const { os << bUseB << std::endl; W1.save(os); W2.save(os); W3.save(os); W4.save(os); if (bUseB) { b.save(os); } } inline void load(std::ifstream &is) { is >> bUseB; W1.load(is); W2.load(is); W3.load(is); W4.load(is); if (bUseB) { b.load(is); } } }; // non-linear feed-forward node // input nodes should be specified by forward function // for input variables, we exploit column vector, // which means a concrete input vector x_i is represented by x(0, i), x(1, i), ..., x(n, i) class FourNode : public Node { public: PNode in1, in2, in3, in4; FourParams* param; dtype(*activate)(const dtype&); // activation function dtype(*derivate)(const dtype&, const dtype&); // derivation function of activation function Tensor1D ty, lty; public: FourNode() : Node() { in1 = in2 = in3 = in4 = NULL; activate = ftanh; derivate = dtanh; param = NULL; node_type = "four"; } ~FourNode() { in1 = in2 = in3 = in4 = NULL; } inline void init(int ndim, dtype dropout) { Node::init(ndim, dropout); ty.init(ndim); lty.init(ndim); } inline void setParam(FourParams* paramInit) { param = paramInit; } inline void clearValue() { Node::clearValue(); in1 = in2 = in3 = in4 = NULL; ty = 0; lty = 0; } // define the activate function and its derivation form inline void setFunctions(dtype(*f)(const dtype&), dtype(*f_deri)(const dtype&, const dtype&)) { activate = f; derivate = f_deri; } public: void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4) { in1 = x1; in2 = x2; in3 = x3; in4 = x4; degree = 0; in1->addParent(this); in2->addParent(this); in3->addParent(this); in4->addParent(this); cg->addNode(this); } public: inline void compute() { ty.mat() = param->W1.val.mat() * in1->val.mat() + param->W2.val.mat() * in2->val.mat() + param->W3.val.mat() * in3->val.mat() + param->W4.val.mat() * in4->val.mat(); if (param->bUseB) { ty.vec() += param->b.val.vec(); } val.vec() = ty.vec().unaryExpr(ptr_fun(activate)); } inline void backward() { lty.vec() = loss.vec() * ty.vec().binaryExpr(val.vec(), ptr_fun(derivate)); param->W1.grad.mat() += lty.mat() * in1->val.tmat(); param->W2.grad.mat() += lty.mat() * in2->val.tmat(); param->W3.grad.mat() += lty.mat() * in3->val.tmat(); param->W4.grad.mat() += lty.mat() * in4->val.tmat(); if (param->bUseB) { param->b.grad.vec() += lty.vec(); } in1->loss.mat() += param->W1.val.mat().transpose() * lty.mat(); in2->loss.mat() += param->W2.val.mat().transpose() * lty.mat(); in3->loss.mat() += param->W3.val.mat().transpose() * lty.mat(); in4->loss.mat() += param->W4.val.mat().transpose() * lty.mat(); } public: inline PExecute generate(bool bTrain, dtype cur_drop_factor); // better to rewrite for deep understanding inline bool typeEqual(PNode other) { bool result = Node::typeEqual(other); if (!result) return false; FourNode* conv_other = (FourNode*)other; if (param != conv_other->param) { return false; } if (activate != conv_other->activate || derivate != conv_other->derivate) { return false; } return true; } }; // non-linear feed-forward node // input nodes should be specified by forward function // for input variables, we exploit column vector, // which means a concrete input vector x_i is represented by x(0, i), x(1, i), ..., x(n, i) class LinearFourNode : public Node { public: PNode in1, in2, in3, in4; FourParams* param; public: LinearFourNode() : Node() { in1 = in2 = in3 = in4 = NULL; param = NULL; node_type = "linear_four"; } inline void setParam(FourParams* paramInit) { param = paramInit; } inline void clearValue() { Node::clearValue(); in1 = in2 = in3 = in4 = NULL; } public: void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4) { in1 = x1; in2 = x2; in3 = x3; in4 = x4; degree = 0; in1->addParent(this); in2->addParent(this); in3->addParent(this); in4->addParent(this); cg->addNode(this); } public: inline void compute() { val.mat() = param->W1.val.mat() * in1->val.mat() + param->W2.val.mat() * in2->val.mat() + param->W3.val.mat() * in3->val.mat() + param->W4.val.mat() * in4->val.mat(); if (param->bUseB) { val.vec() += param->b.val.vec(); } } inline void backward() { param->W1.grad.mat() += loss.mat() * in1->val.tmat(); param->W2.grad.mat() += loss.mat() * in2->val.tmat(); param->W3.grad.mat() += loss.mat() * in3->val.tmat(); param->W4.grad.mat() += loss.mat() * in4->val.tmat(); if (param->bUseB) { param->b.grad.vec() += loss.vec(); } in1->loss.mat() += param->W1.val.mat().transpose() * loss.mat(); in2->loss.mat() += param->W2.val.mat().transpose() * loss.mat(); in3->loss.mat() += param->W3.val.mat().transpose() * loss.mat(); in4->loss.mat() += param->W4.val.mat().transpose() * loss.mat(); } public: inline PExecute generate(bool bTrain, dtype cur_drop_factor); // better to rewrite for deep understanding inline bool typeEqual(PNode other) { bool result = Node::typeEqual(other); if (!result) return false; LinearFourNode* conv_other = (LinearFourNode*)other; if (param != conv_other->param) { return false; } return true; } }; class FourExecute :public Execute { public: bool bTrain; public: inline void forward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->compute(); batch[idx]->forward_drop(bTrain, drop_factor); } } inline void backward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->backward_drop(); batch[idx]->backward(); } } }; inline PExecute FourNode::generate(bool bTrain, dtype cur_drop_factor) { FourExecute* exec = new FourExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; exec->drop_factor = cur_drop_factor; return exec; }; class LinearFourExecute :public Execute { public: bool bTrain; public: inline void forward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->compute(); batch[idx]->forward_drop(bTrain, drop_factor); } } inline void backward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->backward_drop(); batch[idx]->backward(); } } }; inline PExecute LinearFourNode::generate(bool bTrain, dtype cur_drop_factor) { LinearFourExecute* exec = new LinearFourExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; exec->drop_factor = cur_drop_factor; return exec; }; #endif /* FOUROP_H_ */
owl_matrix_swap_impl_omp.h
/* * OWL - OCaml Scientific and Engineering Computing * Copyright (c) 2016-2020 Liang Wang <liang.wang@cl.cam.ac.uk> */ #ifdef OWL_ENABLE_TEMPLATE // swap row i and row j in x(m,n) void FUNCTION (c, swap_rows) (TYPE *x, int m, int n, int i, int j) { if (i != j) { TYPE * src = x + n * i; TYPE * dst = x + n * j; if (n >= OWL_OMP_THRESHOLD_DEFAULT) { #pragma omp parallel for schedule(static) for (int k = 0; k < n; k++) { TYPE t = *(src + k); *(src + k) = *(dst + k); *(dst + k) = t; } } else { for (int k = 0; k < n; k++) { TYPE t = *(src + k); *(src + k) = *(dst + k); *(dst + k) = t; } } } } // stub function of swap_rows CAMLprim value FUNCTION (stub, swap_rows) (value vX, value vM, value vN, value vI, value vJ) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; int m = Long_val(vM); int n = Long_val(vN); int i = Long_val(vI); int j = Long_val(vJ); FUNCTION (c, swap_rows) (X_data, m, n, i, j); return Val_unit; } // swap column i and column j in x(m,n) void FUNCTION (c, swap_cols) (TYPE *x, int m, int n, int i, int j) { if (i != j) { TYPE * src = x + i; TYPE * dst = x + j; if (m >= OWL_OMP_THRESHOLD_DEFAULT) { #pragma omp parallel for schedule(static) for (int k = 0; k < m; k++) { int base = k * n; TYPE t = *(src + base); *(src + base) = *(dst + base); *(dst + base) = t; } } else { int base = 0; for (int k = 0; k < m; k++) { TYPE t = *(src + base); *(src + base) = *(dst + base); *(dst + base) = t; base += n; } } } } // stub function of swap_cols CAMLprim value FUNCTION (stub, swap_cols) (value vX, value vM, value vN, value vI, value vJ) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; int m = Long_val(vM); int n = Long_val(vN); int i = Long_val(vI); int j = Long_val(vJ); FUNCTION (c, swap_cols) (X_data, m, n, i, j); return Val_unit; } // transpose x(m,n) and save to y(n,m) void FUNCTION (c, transpose) (TYPE *x, TYPE *y, int m, int n) { int ofsx = 0; int ofsy = 0; if (m >= OWL_OMP_THRESHOLD_DEFAULT / 100) { #pragma omp parallel for schedule(static) for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { *(y + i + j * m) = *(x + j + i * n); } } } else { for (int i = 0; i < m; i++) { ofsy = i; for (int j = 0; j < n; j++) { *(y + ofsy) = *(x + ofsx); ofsy += m; ofsx += 1; } } } } // stub function of transpose CAMLprim value FUNCTION (stub, transpose) (value vX, value vY) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); TYPE *Y_data = (TYPE *) Y->data; FUNCTION (c, transpose) (X_data, Y_data, X->dim[0], X->dim[1]); return Val_unit; } // conjugate transpose x(m,n) and save to y(n,m) void FUNCTION (c, ctranspose) (TYPE *x, TYPE *y, int m, int n) { int ofsx = 0; int ofsy = 0; if (m >= OWL_OMP_THRESHOLD_DEFAULT / 100) { #pragma omp parallel for schedule(static) for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { *(y + i + j * m) = CONJ_FUN(*(x + j + i * n)); } } } else { for (int i = 0; i < m; i++) { ofsy = i; for (int j = 0; j < n; j++) { *(y + ofsy) = CONJ_FUN(*(x + ofsx)); ofsy += m; ofsx += 1; } } } } // stub function of ctranspose CAMLprim value FUNCTION (stub, ctranspose) (value vX, value vY) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); TYPE *Y_data = (TYPE *) Y->data; FUNCTION (c, ctranspose) (X_data, Y_data, X->dim[0], X->dim[1]); return Val_unit; } #endif /* OWL_ENABLE_TEMPLATE */
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define LeftShiftOperator 0xf5U #define RightShiftOperator 0xf6U #define LessThanEqualOperator 0xf7U #define GreaterThanEqualOperator 0xf8U #define EqualOperator 0xf9U #define NotEqualOperator 0xfaU #define LogicalAndOperator 0xfbU #define LogicalOrOperator 0xfcU #define ExponentialNotation 0xfdU struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression, ExceptionInfo *exception) { char fx_op[2]; const Image *next; FxInfo *fx_info; register ssize_t i; fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void) memset(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",fx_op); *fx_op=(char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",fx_op); *fx_op=(char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",fx_op); *fx_op=(char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",fx_op); *fx_op=(char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",fx_op); *fx_op=(char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",fx_op); *fx_op=(char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",fx_op); *fx_op=(char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",fx_op); *fx_op=(char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",fx_op); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d d N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddNoiseImage() adds random noise to the image. % % The format of the AddNoiseImage method is: % % Image *AddNoiseImage(const Image *image,const NoiseType noise_type, % const double attenuate,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o noise_type: The type of noise: Uniform, Gaussian, Multiplicative, % Impulse, Laplacian, or Poisson. % % o attenuate: attenuate the random distribution. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type, const double attenuate,ExceptionInfo *exception) { #define AddNoiseImageTag "AddNoise/Image" CacheView *image_view, *noise_view; Image *noise_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } /* Add noise in each row. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,noise_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel); if ((traits == UndefinedPixelTrait) || (noise_traits == UndefinedPixelTrait)) continue; if ((noise_traits & CopyPixelTrait) != 0) { SetPixelChannel(noise_image,channel,p[i],q); continue; } SetPixelChannel(noise_image,channel,ClampToQuantum( GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)), q); } p+=GetPixelChannels(image); q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AddNoiseImage) #endif proceed=SetImageProgress(image,AddNoiseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u e S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlueShiftImage() mutes the colors of the image to simulate a scene at % nighttime in the moonlight. % % The format of the BlueShiftImage method is: % % Image *BlueShiftImage(const Image *image,const double factor, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o factor: the shift factor. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlueShiftImage(const Image *image,const double factor, ExceptionInfo *exception) { #define BlueShiftImageTag "BlueShift/Image" CacheView *image_view, *shift_view; Image *shift_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate blue shift image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); shift_image=CloneImage(image,0,0,MagickTrue,exception); if (shift_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse) { shift_image=DestroyImage(shift_image); return((Image *) NULL); } /* Blue-shift DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); shift_view=AcquireAuthenticCacheView(shift_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,shift_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; Quantum quantum; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) < quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) < quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum); pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum); pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum); quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) > quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) > quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(pixel.red+factor*quantum); pixel.green=0.5*(pixel.green+factor*quantum); pixel.blue=0.5*(pixel.blue+factor*quantum); SetPixelRed(shift_image,ClampToQuantum(pixel.red),q); SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q); SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q); p+=GetPixelChannels(image); q+=GetPixelChannels(shift_image); } sync=SyncCacheViewAuthenticPixels(shift_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlueShiftImage) #endif proceed=SetImageProgress(image,BlueShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shift_view=DestroyCacheView(shift_view); if (status == MagickFalse) shift_image=DestroyImage(shift_image); return(shift_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a r c o a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CharcoalImage() creates a new image that is a copy of an existing one with % the edge highlighted. It allocates the memory necessary for the new Image % structure and returns a pointer to the new image. % % The format of the CharcoalImage method is: % % Image *CharcoalImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CharcoalImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *charcoal_image, *clone_image, *edge_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); edge_image=EdgeImage(clone_image,radius,exception); clone_image=DestroyImage(clone_image); if (edge_image == (Image *) NULL) return((Image *) NULL); charcoal_image=BlurImage(edge_image,radius,sigma,exception); edge_image=DestroyImage(edge_image); if (charcoal_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(charcoal_image,exception); (void) NegateImage(charcoal_image,MagickFalse,exception); (void) GrayscaleImage(charcoal_image,image->intensity,exception); return(charcoal_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorizeImage() blends the fill color with each pixel in the image. % A percentage blend is specified with opacity. Control the application % of different color components by specifying a different percentage for % each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue). % % The format of the ColorizeImage method is: % % Image *ColorizeImage(const Image *image,const char *blend, % const PixelInfo *colorize,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A character string indicating the level of blending as a % percentage. % % o colorize: A color value. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorizeImage(const Image *image,const char *blend, const PixelInfo *colorize,ExceptionInfo *exception) { #define ColorizeImageTag "Colorize/Image" #define Colorize(pixel,blend_percentage,colorize) \ (((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0) CacheView *image_view; GeometryInfo geometry_info; Image *colorize_image; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; PixelInfo blend_percentage; ssize_t y; /* Allocate colorized image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colorize_image=CloneImage(image,0,0,MagickTrue,exception); if (colorize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse) { colorize_image=DestroyImage(colorize_image); return((Image *) NULL); } if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) || (IsPixelInfoGray(colorize) != MagickFalse)) (void) SetImageColorspace(colorize_image,sRGBColorspace,exception); if ((colorize_image->alpha_trait == UndefinedPixelTrait) && (colorize->alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(colorize_image,OpaqueAlpha,exception); if (blend == (const char *) NULL) return(colorize_image); GetPixelInfo(colorize_image,&blend_percentage); flags=ParseGeometry(blend,&geometry_info); blend_percentage.red=geometry_info.rho; blend_percentage.green=geometry_info.rho; blend_percentage.blue=geometry_info.rho; blend_percentage.black=geometry_info.rho; blend_percentage.alpha=(MagickRealType) TransparentAlpha; if ((flags & SigmaValue) != 0) blend_percentage.green=geometry_info.sigma; if ((flags & XiValue) != 0) blend_percentage.blue=geometry_info.xi; if ((flags & PsiValue) != 0) blend_percentage.alpha=geometry_info.psi; if (blend_percentage.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) blend_percentage.black=geometry_info.psi; if ((flags & ChiValue) != 0) blend_percentage.alpha=geometry_info.chi; } /* Colorize DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(colorize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1) #endif for (y=0; y < (ssize_t) colorize_image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) colorize_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++) { PixelTrait traits = GetPixelChannelTraits(colorize_image, (PixelChannel) i); if (traits == UndefinedPixelTrait) continue; if ((traits & CopyPixelTrait) != 0) continue; SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum( Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i), GetPixelInfoChannel(colorize,(PixelChannel) i))),q); } q+=GetPixelChannels(colorize_image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorizeImage) #endif proceed=SetImageProgress(image,ColorizeImageTag,progress++, colorize_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) colorize_image=DestroyImage(colorize_image); return(colorize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r M a t r i x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorMatrixImage() applies color transformation to an image. This method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the ColorMatrixImage method is: % % Image *ColorMatrixImage(const Image *image, % const KernelInfo *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_matrix: the color matrix. % % o exception: return any errors or warnings in this structure. % */ /* FUTURE: modify to make use of a MagickMatrix Mutliply function That should be provided in "matrix.c" (ASIDE: actually distorts should do this too but currently doesn't) */ MagickExport Image *ColorMatrixImage(const Image *image, const KernelInfo *color_matrix,ExceptionInfo *exception) { #define ColorMatrixImageTag "ColorMatrix/Image" CacheView *color_view, *image_view; double ColorMatrix[6][6] = { { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 } }; Image *color_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t u, v, y; /* Map given color_matrix, into a 6x6 matrix RGBKA and a constant */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); i=0; for (v=0; v < (ssize_t) color_matrix->height; v++) for (u=0; u < (ssize_t) color_matrix->width; u++) { if ((v < 6) && (u < 6)) ColorMatrix[v][u]=color_matrix->values[i]; i++; } /* Initialize color image. */ color_image=CloneImage(image,0,0,MagickTrue,exception); if (color_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse) { color_image=DestroyImage(color_image); return((Image *) NULL); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " ColorMatrix image with color matrix:"); message=AcquireString(""); for (v=0; v < 6; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < 6; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ", ColorMatrix[v][u]); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } /* Apply the ColorMatrix to image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); color_view=AcquireAuthenticCacheView(color_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,color_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t v; size_t height; GetPixelInfoPixel(image,p,&pixel); height=color_matrix->height > 6 ? 6UL : color_matrix->height; for (v=0; v < (ssize_t) height; v++) { double sum; sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]* GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p); if (image->colorspace == CMYKColorspace) sum+=ColorMatrix[v][3]*GetPixelBlack(image,p); if (image->alpha_trait != UndefinedPixelTrait) sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p); sum+=QuantumRange*ColorMatrix[v][5]; switch (v) { case 0: pixel.red=sum; break; case 1: pixel.green=sum; break; case 2: pixel.blue=sum; break; case 3: pixel.black=sum; break; case 4: pixel.alpha=sum; break; default: break; } } SetPixelViaPixelInfo(color_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(color_image); } if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorMatrixImage) #endif proceed=SetImageProgress(image,ColorMatrixImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } color_view=DestroyCacheView(color_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) color_image=DestroyImage(color_image); return(color_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info) { register ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % const PixelChannel channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % double FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static double FxChannelStatistics(FxInfo *fx_info,Image *image, PixelChannel channel,const char *symbol,ExceptionInfo *exception) { ChannelType channel_mask; char key[MagickPathExtent], statistic[MagickPathExtent]; const char *value; register const char *p; channel_mask=UndefinedChannel; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') { ssize_t option; option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1); if (option >= 0) { channel=(PixelChannel) option; channel_mask=SetPixelChannelMask(image,(ChannelType) (1UL << channel)); } } (void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=(const char *) GetValueFromSplayTree(fx_info->symbols,key); if (value != (const char *) NULL) { if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); return(QuantumScale*StringToDouble(value,(char **) NULL)); } (void) DeleteNodeFromSplayTree(fx_info->symbols,key); if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageDepth(image,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",(double) depth); } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",kurtosis); } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",maxima); } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",mean); } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",minima); } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",skewness); } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g", standard_deviation); } if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); (void) AddValueToSplayTree(fx_info->symbols,ConstantString(key), ConstantString(statistic)); return(QuantumScale*StringToDouble(statistic,(char **) NULL)); } static double FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t, const ssize_t,const char *,const size_t,double *,ExceptionInfo *); static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta) { if (beta != 0) return(FxGCD(beta,alpha % beta)); return(alpha); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; register ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, ExceptionInfo *exception) { char *q, symbol[MagickPathExtent]; const char *p, *value; Image *image; MagickBooleanType status; PixelInfo pixel; double alpha, beta; PointInfo point; register ssize_t i; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { char *subexpression; subexpression=AcquireString(expression); if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); i=(ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x=alpha; point.y=beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x+=alpha; point.y+=beta; if (*p != '\0') p++; } if (*p == '.') p++; } subexpression=DestroyString(subexpression); } image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } i=GetImageIndexInList(image); GetPixelInfo(image,&pixel); status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); (void) status; if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MagickPathExtent]; (void) CopyMagickString(name,p,MagickPathExtent); for (q=name+(strlen(name)-1); q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } if ((strlen(name) > 2) && (GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL)) { PixelInfo *color; color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name); if (color != (PixelInfo *) NULL) { pixel=(*color); p+=strlen(name); } else { MagickBooleanType status; status=QueryColorCompliance(name,AllCompliance,&pixel, fx_info->exception); if (status != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors,ConstantString( name),ClonePixelInfo(&pixel)); p+=strlen(name); } } } } (void) CopyMagickString(symbol,p,MagickPathExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return(QuantumScale*pixel.red); case GreenPixelChannel: return(QuantumScale*pixel.green); case BluePixelChannel: return(QuantumScale*pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return(1.0); alpha=(double) (QuantumScale*pixel.alpha); return(alpha); } case IndexPixelChannel: return(0.0); case IntensityPixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((QuantumScale*pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (LocaleNCompare(symbol,"channel",7) == 0) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'E': case 'e': { if (LocaleCompare(symbol,"extent") == 0) { if (image->extent != 0) return((double) image->extent); return((double) GetBlobSize(image)); } break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); if (LocaleCompare(symbol,"printsize.x") == 0) return(PerceptibleReciprocal(image->resolution.x)*image->columns); if (LocaleCompare(symbol,"printsize.y") == 0) return(PerceptibleReciprocal(image->resolution.y)*image->rows); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return((double) image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) return((double) GetImageDepth(image,fx_info->exception)); break; } default: break; } value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol); if (value != (const char *) NULL) return(StringToDouble(value,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",symbol); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c=(-1); level=0; subexpression=(const char *) NULL; target=NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { expression+=5; break; } #endif if (LocaleNCompare(expression,"atan2",5) == 0) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit(c) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((LocaleNCompare(expression,"j0",2) == 0) || (LocaleNCompare(expression,"j1",2) == 0)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit(c) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit(c) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha(c) != 0)) precedence=AdditionPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, const char *expression,const size_t depth,double *beta, ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 200 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } char *q, *subexpression; double alpha, gamma; register const char *p; *beta=0.0; subexpression=AcquireString(expression); *subexpression='\0'; if (depth > FxMaxSubexpressionDepth) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) (~(size_t) *beta); FxReturn(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p, depth+1,beta,exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); FxReturn(0.0); } FxReturn(alpha/(*beta)); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=fabs(floor((*beta)+0.5)); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); FxReturn(0.0); } FxReturn(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha-(*beta)); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); FxReturn(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); FxReturn(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { (void) CopyMagickString(subexpression,++p,MagickPathExtent); q=subexpression; p=StringToken(":",&q); if (q == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth+1,beta, exception); FxReturn(gamma); } case '=': { char numeric[MagickPathExtent]; q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); (void) FormatLocaleString(numeric,MagickPathExtent,"%.20g",*beta); (void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression); (void) AddValueToSplayTree(fx_info->symbols,ConstantString( subexpression),ConstantString(numeric)); FxReturn(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1, beta,exception); FxReturn(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { if (depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); (void) CopyMagickString(subexpression,expression+1,MagickPathExtent); if (strlen(subexpression) != 0) subexpression[strlen(subexpression)-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (LocaleNCompare(expression,"abs",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(acosh(alpha)); } #endif if (LocaleNCompare(expression,"acos",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"airy",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(asinh(alpha)); } #endif if (LocaleNCompare(expression,"asin",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(asin(alpha)); } if (LocaleNCompare(expression,"alt",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"atan2",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atanh(alpha)); } #endif if (LocaleNCompare(expression,"atan",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'C': case 'c': { if (LocaleNCompare(expression,"ceil",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(ceil(alpha)); } if (LocaleNCompare(expression,"clamp",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (LocaleNCompare(expression,"cosh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(cosh(alpha)); } if (LocaleNCompare(expression,"cos",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'D': case 'd': { if (LocaleNCompare(expression,"debug",5) == 0) { const char *type; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (fx_info->images->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: type="cyan"; break; case MagentaPixelChannel: type="magenta"; break; case YellowPixelChannel: type="yellow"; break; case AlphaPixelChannel: type="opacity"; break; case BlackPixelChannel: type="black"; break; default: type="unknown"; break; } else switch (channel) { case RedPixelChannel: type="red"; break; case GreenPixelChannel: type="green"; break; case BluePixelChannel: type="blue"; break; case AlphaPixelChannel: type="opacity"; break; default: type="unknown"; break; } *subexpression='\0'; if (strlen(expression) > 6) (void) CopyMagickString(subexpression,expression+6, MagickPathExtent); if (strlen(subexpression) > 1) subexpression[strlen(subexpression)-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: " "%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type, subexpression,GetMagickPrecision(),alpha); FxReturn(0.0); } if (LocaleNCompare(expression,"drc",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (LocaleNCompare(expression,"erf",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(erf(alpha)); } #endif if (LocaleNCompare(expression,"exp",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (LocaleNCompare(expression,"floor",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha)); } break; } case 'G': case 'g': { if (LocaleNCompare(expression,"gauss",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI); FxReturn(gamma); } if (LocaleNCompare(expression,"gcd",3) == 0) { MagickOffsetType gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+ 0.5)); FxReturn((double) gcd); } if (LocaleCompare(expression,"g") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleCompare(expression,"hue") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleNCompare(expression,"hypot",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'I': case 'i': { if (LocaleCompare(expression,"intensity") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleNCompare(expression,"int",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(floor(alpha)); } if (LocaleNCompare(expression,"isnan",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn((double) !!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); #if defined(MAGICKCORE_HAVE_J0) if (LocaleNCompare(expression,"j0",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"j1",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"jinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha)); FxReturn(gamma); } #endif break; } case 'L': case 'l': { if (LocaleNCompare(expression,"ln",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(log(alpha)); } if (LocaleNCompare(expression,"logtwo",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn(log10(alpha)/log10(2.0)); } if (LocaleNCompare(expression,"log",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) FxReturn(QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (LocaleNCompare(expression,"max",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (LocaleNCompare(expression,"min",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha < *beta ? alpha : *beta); } if (LocaleNCompare(expression,"mod",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); gamma=alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta); FxReturn(gamma); } if (LocaleCompare(expression,"m") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'N': case 'n': { if (LocaleNCompare(expression,"not",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression,"o") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression,"pi") == 0) FxReturn(MagickPI); if (LocaleNCompare(expression,"pow",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) FxReturn(QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (LocaleNCompare(expression,"rand",4) == 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (LocaleNCompare(expression,"round",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha+0.5)); } if (LocaleCompare(expression,"r") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleNCompare(expression,"sign",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"sinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0) FxReturn(1.0); gamma=sin((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma); } if (LocaleNCompare(expression,"sinh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sinh(alpha)); } if (LocaleNCompare(expression,"sin",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(sin(alpha)); } if (LocaleNCompare(expression,"sqrt",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sqrt(alpha)); } if (LocaleNCompare(expression,"squish",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'T': case 't': { if (LocaleNCompare(expression,"tanh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(tanh(alpha)); } if (LocaleNCompare(expression,"tan",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) FxReturn(0.0); if (LocaleNCompare(expression,"trunc",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'W': case 'w': { if (LocaleNCompare(expression,"while",5) == 0) { do { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); } while (fabs(alpha) >= MagickEpsilon); FxReturn(*beta); } if (LocaleCompare(expression,"w") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } default: break; } subexpression=DestroyString(subexpression); q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); FxReturn(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); fx_info->file=file; return(status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, double *alpha,ExceptionInfo *exception) { double beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; FxInfo **fx_info; double alpha; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) memset(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression,exception); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view, *image_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (expression == (const char *) NULL) return(CloneImage(image,0,0,MagickTrue,exception)); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows,1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if ((fx_traits & CopyPixelTrait) != 0) { SetPixelChannel(fx_image,channel,p[i],q); continue; } alpha=0.0; (void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha, exception); q[i]=ClampToQuantum(QuantumRange*alpha); } p+=GetPixelChannels(image); q+=GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxImage) #endif proceed=SetImageProgress(image,FxImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); image_view=DestroyCacheView(image_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p l o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImplodeImage() creates a new image that is a copy of an existing % one with the image pixels "implode" by the specified percentage. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ImplodeImage method is: % % Image *ImplodeImage(const Image *image,const double amount, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o implode_image: Method ImplodeImage returns a pointer to the image % after it is implode. A null image is returned if there is a memory % shortage. % % o image: the image. % % o amount: Define the extent of the implosion. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ImplodeImage(const Image *image,const double amount, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ImplodeImageTag "Implode/Image" CacheView *canvas_view, *implode_view, *interpolate_view; double radius; Image *canvas_image, *implode_image; MagickBooleanType status; MagickOffsetType progress; PointInfo center, scale; ssize_t y; /* Initialize implode image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); if ((canvas_image->alpha_trait == UndefinedPixelTrait) && (canvas_image->background_color.alpha != OpaqueAlpha)) (void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception); implode_image=CloneImage(canvas_image,0,0,MagickTrue,exception); if (implode_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse) { canvas_image=DestroyImage(canvas_image); implode_image=DestroyImage(implode_image); return((Image *) NULL); } /* Compute scaling factor. */ scale.x=1.0; scale.y=1.0; center.x=0.5*canvas_image->columns; center.y=0.5*canvas_image->rows; radius=center.x; if (canvas_image->columns > canvas_image->rows) scale.y=(double) canvas_image->columns/(double) canvas_image->rows; else if (canvas_image->columns < canvas_image->rows) { scale.x=(double) canvas_image->rows/(double) canvas_image->columns; radius=center.y; } /* Implode image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas_image,exception); interpolate_view=AcquireVirtualCacheView(canvas_image,exception); implode_view=AcquireAuthenticCacheView(implode_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas_image,implode_image,canvas_image->rows,1) #endif for (y=0; y < (ssize_t) canvas_image->rows; y++) { double distance; PointInfo delta; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) canvas_image->columns; x++) { register ssize_t i; /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++) { PixelChannel channel = GetPixelChannelChannel(canvas_image,i); PixelTrait traits = GetPixelChannelTraits(canvas_image,channel); PixelTrait implode_traits = GetPixelChannelTraits(implode_image, channel); if ((traits == UndefinedPixelTrait) || (implode_traits == UndefinedPixelTrait)) continue; SetPixelChannel(implode_image,channel,p[i],q); } else { double factor; /* Implode the pixel. */ factor=1.0; if (distance > 0.0) factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount); status=InterpolatePixelChannels(canvas_image,interpolate_view, implode_image,method,(double) (factor*delta.x/scale.x+center.x), (double) (factor*delta.y/scale.y+center.y),q,exception); if (status == MagickFalse) break; } p+=GetPixelChannels(canvas_image); q+=GetPixelChannels(implode_image); } if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse) status=MagickFalse; if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ImplodeImage) #endif proceed=SetImageProgress(canvas_image,ImplodeImageTag,progress++, canvas_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } implode_view=DestroyCacheView(implode_view); interpolate_view=DestroyCacheView(interpolate_view); canvas_view=DestroyCacheView(canvas_view); canvas_image=DestroyImage(canvas_image); if (status == MagickFalse) implode_image=DestroyImage(implode_image); return(implode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The MorphImages() method requires a minimum of two images. The first % image is transformed into the second by a number of intervening images % as specified by frames. % % The format of the MorphImage method is: % % Image *MorphImages(const Image *image,const size_t number_frames, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_frames: Define the number of in-between image to generate. % The more in-between frames, the smoother the morph. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphImages(const Image *image,const size_t number_frames, ExceptionInfo *exception) { #define MorphImageTag "Morph/Image" double alpha, beta; Image *morph_image, *morph_images; MagickBooleanType status; MagickOffsetType scene; register const Image *next; register ssize_t n; ssize_t y; /* Clone first frame in sequence. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); morph_images=CloneImage(image,0,0,MagickTrue,exception); if (morph_images == (Image *) NULL) return((Image *) NULL); if (GetNextImageInList(image) == (Image *) NULL) { /* Morph single image. */ for (n=1; n < (ssize_t) number_frames; n++) { morph_image=CloneImage(image,0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n, number_frames); if (proceed == MagickFalse) status=MagickFalse; } } return(GetFirstImageInList(morph_images)); } /* Morph image sequence. */ status=MagickTrue; scene=0; next=image; for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next)) { for (n=0; n < (ssize_t) number_frames; n++) { CacheView *image_view, *morph_view; beta=(double) (n+1.0)/(double) (number_frames+1.0); alpha=1.0-beta; morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta* GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta* GetNextImageInList(next)->rows+0.5),next->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } status=SetImageStorageClass(morph_image,DirectClass,exception); if (status == MagickFalse) { morph_image=DestroyImage(morph_image); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns, morph_images->rows,GetNextImageInList(next)->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } image_view=AcquireVirtualCacheView(morph_image,exception); morph_view=AcquireAuthenticCacheView(morph_images,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(morph_image,morph_image,morph_image->rows,1) #endif for (y=0; y < (ssize_t) morph_images->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1, exception); q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) morph_images->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++) { PixelChannel channel = GetPixelChannelChannel(morph_image,i); PixelTrait traits = GetPixelChannelTraits(morph_image,channel); PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel); if ((traits == UndefinedPixelTrait) || (morph_traits == UndefinedPixelTrait)) continue; if ((morph_traits & CopyPixelTrait) != 0) { SetPixelChannel(morph_image,channel,p[i],q); continue; } SetPixelChannel(morph_image,channel,ClampToQuantum(alpha* GetPixelChannel(morph_images,channel,q)+beta*p[i]),q); } p+=GetPixelChannels(morph_image); q+=GetPixelChannels(morph_images); } sync=SyncCacheViewAuthenticPixels(morph_view,exception); if (sync == MagickFalse) status=MagickFalse; } morph_view=DestroyCacheView(morph_view); image_view=DestroyCacheView(image_view); morph_image=DestroyImage(morph_image); } if (n < (ssize_t) number_frames) break; /* Clone last frame in sequence. */ morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphImages) #endif proceed=SetImageProgress(image,MorphImageTag,scene, GetImageListLength(image)); if (proceed == MagickFalse) status=MagickFalse; } scene++; } if (GetNextImageInList(next) != (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } return(GetFirstImageInList(morph_images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P l a s m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PlasmaImage() initializes an image with plasma fractal values. The image % must be initialized with a base color and the random number generator % seeded before this method is called. % % The format of the PlasmaImage method is: % % MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment, % size_t attenuate,size_t depth,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o segment: Define the region to apply plasma fractals values. % % o attenuate: Define the plasma attenuation factor. % % o depth: Limit the plasma recursion depth. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PlasmaPixel(RandomInfo *random_info, const double pixel,const double noise) { Quantum plasma; plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)- noise/2.0); if (plasma <= 0) return((Quantum) 0); if (plasma >= QuantumRange) return(QuantumRange); return(plasma); } static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view, CacheView *u_view,CacheView *v_view,RandomInfo *random_info, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { double plasma; register const Quantum *magick_restrict u, *magick_restrict v; register Quantum *magick_restrict q; register ssize_t i; ssize_t x, x_mid, y, y_mid; if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) && (fabs(segment->y2-segment->y1) <= MagickEpsilon)) return(MagickTrue); if (depth != 0) { MagickBooleanType status; SegmentInfo local_info; /* Divide the area into quadrants and recurse. */ depth--; attenuate++; x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); local_info=(*segment); local_info.x2=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.y1=(double) y_mid; local_info.x2=(double) x_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y1=(double) y_mid; status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); return(status); } x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); if ((fabs(segment->x1-x_mid) < MagickEpsilon) && (fabs(segment->x2-x_mid) < MagickEpsilon) && (fabs(segment->y1-y_mid) < MagickEpsilon) && (fabs(segment->y2-y_mid) < MagickEpsilon)) return(MagickFalse); /* Average pixels and apply plasma. */ plasma=(double) QuantumRange/(2.0*attenuate); if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->x2-x_mid) > MagickEpsilon)) { /* Left pixel. */ x=(ssize_t) ceil(segment->x1-0.5); u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1, exception); v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1, exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); if (fabs(segment->x1-segment->x2) > MagickEpsilon) { /* Right pixel. */ x=(ssize_t) ceil(segment->x2-0.5); u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5), 1,1,exception); v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5), 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->y1-y_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { /* Bottom pixel. */ y=(ssize_t) ceil(segment->y2-0.5); u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y, 1,1,exception); v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y, 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } if (fabs(segment->y1-segment->y2) > MagickEpsilon) { /* Top pixel. */ y=(ssize_t) ceil(segment->y1-0.5); u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y, 1,1,exception); v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y, 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->x1-segment->x2) > MagickEpsilon) || (fabs(segment->y1-segment->y2) > MagickEpsilon)) { /* Middle pixel. */ x=(ssize_t) ceil(segment->x1-0.5); y=(ssize_t) ceil(segment->y1-0.5); u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception); x=(ssize_t) ceil(segment->x2-0.5); y=(ssize_t) ceil(segment->y2-0.5); v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } if ((fabs(segment->x2-segment->x1) < 3.0) && (fabs(segment->y2-segment->y1) < 3.0)) return(MagickTrue); return(MagickFalse); } MagickExport MagickBooleanType PlasmaImage(Image *image, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { CacheView *image_view, *u_view, *v_view; MagickBooleanType status; RandomInfo *random_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); u_view=AcquireVirtualCacheView(image,exception); v_view=AcquireVirtualCacheView(image,exception); random_info=AcquireRandomInfo(); status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment, attenuate,depth,exception); random_info=DestroyRandomInfo(random_info); v_view=DestroyCacheView(v_view); u_view=DestroyCacheView(u_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l a r o i d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolaroidImage() simulates a Polaroid picture. % % The format of the PolaroidImage method is: % % Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, % const char *caption,const double angle, % const PixelInterpolateMethod method,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o caption: the Polaroid caption. % % o angle: Apply the effect along this angle. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, const char *caption,const double angle,const PixelInterpolateMethod method, ExceptionInfo *exception) { Image *bend_image, *caption_image, *flop_image, *picture_image, *polaroid_image, *rotate_image, *trim_image; size_t height; ssize_t quantum; /* Simulate a Polaroid picture. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double) image->rows)/25.0,10.0); height=image->rows+2*quantum; caption_image=(Image *) NULL; if (caption != (const char *) NULL) { char *text; /* Generate caption image. */ caption_image=CloneImage(image,image->columns,1,MagickTrue,exception); if (caption_image == (Image *) NULL) return((Image *) NULL); text=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,caption, exception); if (text != (char *) NULL) { char geometry[MagickPathExtent]; DrawInfo *annotate_info; MagickBooleanType status; ssize_t count; TypeMetric metrics; annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info); (void) CloneString(&annotate_info->text,text); count=FormatMagickCaption(caption_image,annotate_info,MagickTrue, &metrics,&text,exception); status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)*(metrics.ascent-metrics.descent)+0.5),exception); if (status == MagickFalse) caption_image=DestroyImage(caption_image); else { caption_image->background_color=image->border_color; (void) SetImageBackgroundColor(caption_image,exception); (void) CloneString(&annotate_info->text,text); (void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g", metrics.ascent); if (annotate_info->gravity == UndefinedGravity) (void) CloneString(&annotate_info->geometry,AcquireString( geometry)); (void) AnnotateImage(caption_image,annotate_info,exception); height+=caption_image->rows; } annotate_info=DestroyDrawInfo(annotate_info); text=DestroyString(text); } } picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue, exception); if (picture_image == (Image *) NULL) { if (caption_image != (Image *) NULL) caption_image=DestroyImage(caption_image); return((Image *) NULL); } picture_image->background_color=image->border_color; (void) SetImageBackgroundColor(picture_image,exception); (void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum, quantum,exception); if (caption_image != (Image *) NULL) { (void) CompositeImage(picture_image,caption_image,OverCompositeOp, MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception); caption_image=DestroyImage(caption_image); } (void) QueryColorCompliance("none",AllCompliance, &picture_image->background_color,exception); (void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception); rotate_image=RotateImage(picture_image,90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0* picture_image->columns,method,exception); picture_image=DestroyImage(picture_image); if (bend_image == (Image *) NULL) return((Image *) NULL); picture_image=bend_image; rotate_image=RotateImage(picture_image,-90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; picture_image->background_color=image->background_color; polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3, exception); if (polaroid_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } flop_image=FlopImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (flop_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } polaroid_image=flop_image; (void) CompositeImage(polaroid_image,picture_image,OverCompositeOp, MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception); picture_image=DestroyImage(picture_image); (void) QueryColorCompliance("none",AllCompliance, &polaroid_image->background_color,exception); rotate_image=RotateImage(polaroid_image,angle,exception); polaroid_image=DestroyImage(polaroid_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); polaroid_image=rotate_image; trim_image=TrimImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (trim_image == (Image *) NULL) return((Image *) NULL); polaroid_image=trim_image; return(polaroid_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p i a T o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSepiaToneImage() applies a special effect to the image, similar to the % effect achieved in a photo darkroom by sepia toning. Threshold ranges from % 0 to QuantumRange and is a measure of the extent of the sepia toning. A % threshold of 80% is a good starting point for a reasonable tone. % % The format of the SepiaToneImage method is: % % Image *SepiaToneImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: the tone threshold. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SepiaToneImage(const Image *image,const double threshold, ExceptionInfo *exception) { #define SepiaToneImageTag "SepiaTone/Image" CacheView *image_view, *sepia_view; Image *sepia_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize sepia-toned image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sepia_image=CloneImage(image,0,0,MagickTrue,exception); if (sepia_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse) { sepia_image=DestroyImage(sepia_image); return((Image *) NULL); } /* Tone each row of the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sepia_view=AcquireAuthenticCacheView(sepia_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sepia_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity, tone; intensity=GetPixelIntensity(image,p); tone=intensity > threshold ? (double) QuantumRange : intensity+ (double) QuantumRange-threshold; SetPixelRed(sepia_image,ClampToQuantum(tone),q); tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange : intensity+(double) QuantumRange-7.0*threshold/6.0; SetPixelGreen(sepia_image,ClampToQuantum(tone),q); tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0; SetPixelBlue(sepia_image,ClampToQuantum(tone),q); tone=threshold/7.0; if ((double) GetPixelGreen(image,q) < tone) SetPixelGreen(sepia_image,ClampToQuantum(tone),q); if ((double) GetPixelBlue(image,q) < tone) SetPixelBlue(sepia_image,ClampToQuantum(tone),q); SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(sepia_image); } if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SepiaToneImage) #endif proceed=SetImageProgress(image,SepiaToneImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sepia_view=DestroyCacheView(sepia_view); image_view=DestroyCacheView(image_view); (void) NormalizeImage(sepia_image,exception); (void) ContrastImage(sepia_image,MagickTrue,exception); if (status == MagickFalse) sepia_image=DestroyImage(sepia_image); return(sepia_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d o w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadowImage() simulates a shadow from the specified image and returns it. % % The format of the ShadowImage method is: % % Image *ShadowImage(const Image *image,const double alpha, % const double sigma,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: percentage transparency. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x_offset: the shadow x-offset. % % o y_offset: the shadow y-offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadowImage(const Image *image,const double alpha, const double sigma,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define ShadowImageTag "Shadow/Image" CacheView *image_view; ChannelType channel_mask; Image *border_image, *clone_image, *shadow_image; MagickBooleanType status; PixelInfo background_color; RectangleInfo border_info; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(clone_image,sRGBColorspace,exception); (void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod, exception); border_info.width=(size_t) floor(2.0*sigma+0.5); border_info.height=(size_t) floor(2.0*sigma+0.5); border_info.x=0; border_info.y=0; (void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color, exception); clone_image->alpha_trait=BlendPixelTrait; border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception); clone_image=DestroyImage(clone_image); if (border_image == (Image *) NULL) return((Image *) NULL); if (border_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception); /* Shadow image. */ status=MagickTrue; background_color=border_image->background_color; background_color.alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(border_image,exception); for (y=0; y < (ssize_t) border_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) border_image->columns; x++) { if (border_image->alpha_trait != UndefinedPixelTrait) background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0; SetPixelViaPixelInfo(border_image,&background_color,q); q+=GetPixelChannels(border_image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { border_image=DestroyImage(border_image); return((Image *) NULL); } channel_mask=SetImageChannelMask(border_image,AlphaChannel); shadow_image=BlurImage(border_image,0.0,sigma,exception); border_image=DestroyImage(border_image); if (shadow_image == (Image *) NULL) return((Image *) NULL); (void) SetPixelChannelMask(shadow_image,channel_mask); if (shadow_image->page.width == 0) shadow_image->page.width=shadow_image->columns; if (shadow_image->page.height == 0) shadow_image->page.height=shadow_image->rows; shadow_image->page.width+=x_offset-(ssize_t) border_info.width; shadow_image->page.height+=y_offset-(ssize_t) border_info.height; shadow_image->page.x+=x_offset-(ssize_t) border_info.width; shadow_image->page.y+=y_offset-(ssize_t) border_info.height; return(shadow_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S k e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SketchImage() simulates a pencil sketch. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SketchImage() selects a suitable radius for you. Angle gives the angle % of the sketch. % % The format of the SketchImage method is: % % Image *SketchImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the % center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SketchImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { CacheView *random_view; Image *blend_image, *blur_image, *dodge_image, *random_image, *sketch_image; MagickBooleanType status; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Sketch image. */ random_image=CloneImage(image,image->columns << 1,image->rows << 1, MagickTrue,exception); if (random_image == (Image *) NULL) return((Image *) NULL); status=MagickTrue; random_info=AcquireRandomInfoThreadSet(); random_view=AcquireAuthenticCacheView(random_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) random_image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) random_image->columns; x++) { double value; register ssize_t i; value=GetPseudoRandomValue(random_info[id]); for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=ClampToQuantum(QuantumRange*value); } q+=GetPixelChannels(random_image); } if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse) status=MagickFalse; } random_view=DestroyCacheView(random_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) { random_image=DestroyImage(random_image); return(random_image); } blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception); random_image=DestroyImage(random_image); if (blur_image == (Image *) NULL) return((Image *) NULL); dodge_image=EdgeImage(blur_image,radius,exception); blur_image=DestroyImage(blur_image); if (dodge_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(dodge_image,exception); (void) NegateImage(dodge_image,MagickFalse,exception); (void) TransformImage(&dodge_image,(char *) NULL,"50%",exception); sketch_image=CloneImage(image,0,0,MagickTrue,exception); if (sketch_image == (Image *) NULL) { dodge_image=DestroyImage(dodge_image); return((Image *) NULL); } (void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp, MagickTrue,0,0,exception); dodge_image=DestroyImage(dodge_image); blend_image=CloneImage(image,0,0,MagickTrue,exception); if (blend_image == (Image *) NULL) { sketch_image=DestroyImage(sketch_image); return((Image *) NULL); } if (blend_image->alpha_trait != BlendPixelTrait) (void) SetImageAlpha(blend_image,TransparentAlpha,exception); (void) SetImageArtifact(blend_image,"compose:args","20x80"); (void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue, 0,0,exception); blend_image=DestroyImage(blend_image); return(sketch_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S o l a r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SolarizeImage() applies a special effect to the image, similar to the effect % achieved in a photo darkroom by selectively exposing areas of photo % sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a % measure of the extent of the solarization. % % The format of the SolarizeImage method is: % % MagickBooleanType SolarizeImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the extent of the solarization. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SolarizeImage(Image *image, const double threshold,ExceptionInfo *exception) { #define SolarizeImageTag "Solarize/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if (image->storage_class == PseudoClass) { register ssize_t i; /* Solarize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((double) image->colormap[i].red > threshold) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((double) image->colormap[i].green > threshold) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((double) image->colormap[i].blue > threshold) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Solarize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] > threshold) q[i]=QuantumRange-q[i]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SolarizeImage) #endif proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e g a n o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SteganoImage() hides a digital watermark within the image. Recover % the hidden watermark later to prove that the authenticity of an image. % Offset defines the start position within the image to hide the watermark. % % The format of the SteganoImage method is: % % Image *SteganoImage(const Image *image,Image *watermark, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o watermark: the watermark image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SteganoImage(const Image *image,const Image *watermark, ExceptionInfo *exception) { #define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0) #define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \ | (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i))) #define SteganoImageTag "Stegano/Image" CacheView *stegano_view, *watermark_view; Image *stegano_image; int c; MagickBooleanType status; PixelInfo pixel; register Quantum *q; register ssize_t x; size_t depth, one; ssize_t i, j, k, y; /* Initialize steganographic image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(watermark != (const Image *) NULL); assert(watermark->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); one=1UL; stegano_image=CloneImage(image,0,0,MagickTrue,exception); if (stegano_image == (Image *) NULL) return((Image *) NULL); stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH; if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse) { stegano_image=DestroyImage(stegano_image); return((Image *) NULL); } /* Hide watermark in low-order bits of image. */ c=0; i=0; j=0; depth=stegano_image->depth; k=stegano_image->offset; status=MagickTrue; watermark_view=AcquireVirtualCacheView(watermark,exception); stegano_view=AcquireAuthenticCacheView(stegano_image,exception); for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--) { for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++) { for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++) { ssize_t offset; (void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel, exception); offset=k/(ssize_t) stegano_image->columns; if (offset >= (ssize_t) stegano_image->rows) break; q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t) stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1, exception); if (q == (Quantum *) NULL) break; switch (c) { case 0: { SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 1: { SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 2: { SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } } if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse) break; c++; if (c == 3) c=0; k++; if (k == (ssize_t) (stegano_image->columns*stegano_image->columns)) k=0; if (k == stegano_image->offset) j++; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType) (depth-i),depth); if (proceed == MagickFalse) status=MagickFalse; } } stegano_view=DestroyCacheView(stegano_view); watermark_view=DestroyCacheView(watermark_view); if (status == MagickFalse) stegano_image=DestroyImage(stegano_image); return(stegano_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e r e o A n a g l y p h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StereoAnaglyphImage() combines two images and produces a single image that % is the composite of a left and right image of a stereo pair. Special % red-green stereo glasses are required to view this effect. % % The format of the StereoAnaglyphImage method is: % % Image *StereoImage(const Image *left_image,const Image *right_image, % ExceptionInfo *exception) % Image *StereoAnaglyphImage(const Image *left_image, % const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o left_image: the left image. % % o right_image: the right image. % % o exception: return any errors or warnings in this structure. % % o x_offset: amount, in pixels, by which the left image is offset to the % right of the right image. % % o y_offset: amount, in pixels, by which the left image is offset to the % bottom of the right image. % % */ MagickExport Image *StereoImage(const Image *left_image, const Image *right_image,ExceptionInfo *exception) { return(StereoAnaglyphImage(left_image,right_image,0,0,exception)); } MagickExport Image *StereoAnaglyphImage(const Image *left_image, const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define StereoImageTag "Stereo/Image" const Image *image; Image *stereo_image; MagickBooleanType status; ssize_t y; assert(left_image != (const Image *) NULL); assert(left_image->signature == MagickCoreSignature); if (left_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", left_image->filename); assert(right_image != (const Image *) NULL); assert(right_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=left_image; if ((left_image->columns != right_image->columns) || (left_image->rows != right_image->rows)) ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer"); /* Initialize stereo image attributes. */ stereo_image=CloneImage(left_image,left_image->columns,left_image->rows, MagickTrue,exception); if (stereo_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse) { stereo_image=DestroyImage(stereo_image); return((Image *) NULL); } (void) SetImageColorspace(stereo_image,sRGBColorspace,exception); /* Copy left image to red channel and right image to blue channel. */ status=MagickTrue; for (y=0; y < (ssize_t) stereo_image->rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; register Quantum *magick_restrict r; p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1, exception); q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception); r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) || (r == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) stereo_image->columns; x++) { SetPixelRed(stereo_image,GetPixelRed(left_image,p),r); SetPixelGreen(stereo_image,GetPixelGreen(right_image,q),r); SetPixelBlue(stereo_image,GetPixelBlue(right_image,q),r); if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0) SetPixelAlpha(stereo_image,(GetPixelAlpha(left_image,p)+ GetPixelAlpha(right_image,q))/2,r); p+=GetPixelChannels(left_image); q+=GetPixelChannels(right_image); r+=GetPixelChannels(stereo_image); } if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse) break; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y, stereo_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } if (status == MagickFalse) stereo_image=DestroyImage(stereo_image); return(stereo_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S w i r l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SwirlImage() swirls the pixels about the center of the image, where % degrees indicates the sweep of the arc through which each pixel is moved. % You get a more dramatic effect as the degrees move from 1 to 360. % % The format of the SwirlImage method is: % % Image *SwirlImage(const Image *image,double degrees, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o degrees: Define the tightness of the swirling effect. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SwirlImage(const Image *image,double degrees, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define SwirlImageTag "Swirl/Image" CacheView *canvas_view, *interpolate_view, *swirl_view; double radius; Image *canvas_image, *swirl_image; MagickBooleanType status; MagickOffsetType progress; PointInfo center, scale; ssize_t y; /* Initialize swirl image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); if ((canvas_image->alpha_trait == UndefinedPixelTrait) && (canvas_image->background_color.alpha != OpaqueAlpha)) (void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception); swirl_image=CloneImage(canvas_image,0,0,MagickTrue,exception); if (swirl_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse) { canvas_image=DestroyImage(canvas_image); swirl_image=DestroyImage(swirl_image); return((Image *) NULL); } /* Compute scaling factor. */ center.x=(double) canvas_image->columns/2.0; center.y=(double) canvas_image->rows/2.0; radius=MagickMax(center.x,center.y); scale.x=1.0; scale.y=1.0; if (canvas_image->columns > canvas_image->rows) scale.y=(double) canvas_image->columns/(double) canvas_image->rows; else if (canvas_image->columns < canvas_image->rows) scale.x=(double) canvas_image->rows/(double) canvas_image->columns; degrees=(double) DegreesToRadians(degrees); /* Swirl image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas_image,exception); interpolate_view=AcquireVirtualCacheView(image,exception); swirl_view=AcquireAuthenticCacheView(swirl_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas_image,swirl_image,canvas_image->rows,1) #endif for (y=0; y < (ssize_t) canvas_image->rows; y++) { double distance; PointInfo delta; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) canvas_image->columns; x++) { /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++) { PixelChannel channel = GetPixelChannelChannel(canvas_image,i); PixelTrait traits = GetPixelChannelTraits(canvas_image,channel); PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image, channel); if ((traits == UndefinedPixelTrait) || (swirl_traits == UndefinedPixelTrait)) continue; SetPixelChannel(swirl_image,channel,p[i],q); } } else { double cosine, factor, sine; /* Swirl the pixel. */ factor=1.0-sqrt((double) distance)/radius; sine=sin((double) (degrees*factor*factor)); cosine=cos((double) (degrees*factor*factor)); status=InterpolatePixelChannels(canvas_image,interpolate_view, swirl_image,method,((cosine*delta.x-sine*delta.y)/scale.x+center.x), (double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q, exception); if (status == MagickFalse) break; } p+=GetPixelChannels(canvas_image); q+=GetPixelChannels(swirl_image); } if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse) status=MagickFalse; if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SwirlImage) #endif proceed=SetImageProgress(canvas_image,SwirlImageTag,progress++, canvas_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } swirl_view=DestroyCacheView(swirl_view); interpolate_view=DestroyCacheView(interpolate_view); canvas_view=DestroyCacheView(canvas_view); canvas_image=DestroyImage(canvas_image); if (status == MagickFalse) swirl_image=DestroyImage(swirl_image); return(swirl_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TintImage() applies a color vector to each pixel in the image. The length % of the vector is 0 for black and white and at its maximum for the midtones. % The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5)))) % % The format of the TintImage method is: % % Image *TintImage(const Image *image,const char *blend, % const PixelInfo *tint,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A color value used for tinting. % % o tint: A color value used for tinting. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TintImage(const Image *image,const char *blend, const PixelInfo *tint,ExceptionInfo *exception) { #define TintImageTag "Tint/Image" CacheView *image_view, *tint_view; double intensity; GeometryInfo geometry_info; Image *tint_image; MagickBooleanType status; MagickOffsetType progress; PixelInfo color_vector; MagickStatusType flags; ssize_t y; /* Allocate tint image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); tint_image=CloneImage(image,0,0,MagickTrue,exception); if (tint_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse) { tint_image=DestroyImage(tint_image); return((Image *) NULL); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsPixelInfoGray(tint) == MagickFalse)) (void) SetImageColorspace(tint_image,sRGBColorspace,exception); if (blend == (const char *) NULL) return(tint_image); /* Determine RGB values of the color. */ GetPixelInfo(image,&color_vector); flags=ParseGeometry(blend,&geometry_info); color_vector.red=geometry_info.rho; color_vector.green=geometry_info.rho; color_vector.blue=geometry_info.rho; color_vector.alpha=(MagickRealType) OpaqueAlpha; if ((flags & SigmaValue) != 0) color_vector.green=geometry_info.sigma; if ((flags & XiValue) != 0) color_vector.blue=geometry_info.xi; if ((flags & PsiValue) != 0) color_vector.alpha=geometry_info.psi; if (image->colorspace == CMYKColorspace) { color_vector.black=geometry_info.rho; if ((flags & PsiValue) != 0) color_vector.black=geometry_info.psi; if ((flags & ChiValue) != 0) color_vector.alpha=geometry_info.chi; } intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint); color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity); color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity); color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity); color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity); color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity); /* Tint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); tint_view=AcquireAuthenticCacheView(tint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,tint_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; double weight; GetPixelInfo(image,&pixel); weight=QuantumScale*GetPixelRed(image,p)-0.5; pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelGreen(image,p)-0.5; pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelBlue(image,p)-0.5; pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelBlack(image,p)-0.5; pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black* (1.0-(4.0*(weight*weight))); pixel.alpha=(MagickRealType) GetPixelAlpha(image,p); SetPixelViaPixelInfo(tint_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(tint_image); } if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TintImage) #endif proceed=SetImageProgress(image,TintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } tint_view=DestroyCacheView(tint_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) tint_image=DestroyImage(tint_image); return(tint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V i g n e t t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % VignetteImage() softens the edges of the image in vignette style. % % The format of the VignetteImage method is: % % Image *VignetteImage(const Image *image,const double radius, % const double sigma,const ssize_t x,const ssize_t y, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x, y: Define the x and y ellipse offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *VignetteImage(const Image *image,const double radius, const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception) { char ellipse[MagickPathExtent]; DrawInfo *draw_info; Image *canvas, *blur_image, *oval_image, *vignette_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); return((Image *) NULL); } canvas->alpha_trait=BlendPixelTrait; oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue, exception); if (oval_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } (void) QueryColorCompliance("#000000",AllCompliance, &oval_image->background_color,exception); (void) SetImageBackgroundColor(oval_image,exception); draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke, exception); (void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g," "0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x, image->rows/2.0-y); draw_info->primitive=AcquireString(ellipse); (void) DrawImage(oval_image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); blur_image=BlurImage(oval_image,radius,sigma,exception); oval_image=DestroyImage(oval_image); if (blur_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } blur_image->alpha_trait=UndefinedPixelTrait; (void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue, 0,0,exception); blur_image=DestroyImage(blur_image); vignette_image=MergeImageLayers(canvas,FlattenLayer,exception); canvas=DestroyImage(canvas); if (vignette_image != (Image *) NULL) (void) TransformImageColorspace(vignette_image,image->colorspace,exception); return(vignette_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveImage() creates a "ripple" effect in the image by shifting the pixels % vertically along a sine wave whose amplitude and wavelength is specified % by the given parameters. % % The format of the WaveImage method is: % % Image *WaveImage(const Image *image,const double amplitude, % const double wave_length,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o amplitude, wave_length: Define the amplitude and wave length of the % sine wave. % % o interpolate: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *WaveImage(const Image *image,const double amplitude, const double wave_length,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define WaveImageTag "Wave/Image" CacheView *canvas_image_view, *wave_view; Image *canvas_image, *wave_image; MagickBooleanType status; MagickOffsetType progress; double *sine_map; register ssize_t i; ssize_t y; /* Initialize wave image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); if ((canvas_image->alpha_trait == UndefinedPixelTrait) && (canvas_image->background_color.alpha != OpaqueAlpha)) (void) SetImageAlpha(canvas_image,OpaqueAlpha,exception); wave_image=CloneImage(canvas_image,canvas_image->columns,(size_t) (canvas_image->rows+2.0*fabs(amplitude)),MagickTrue,exception); if (wave_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse) { canvas_image=DestroyImage(canvas_image); wave_image=DestroyImage(wave_image); return((Image *) NULL); } /* Allocate sine map. */ sine_map=(double *) AcquireQuantumMemory((size_t) wave_image->columns, sizeof(*sine_map)); if (sine_map == (double *) NULL) { canvas_image=DestroyImage(canvas_image); wave_image=DestroyImage(wave_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) wave_image->columns; i++) sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/ wave_length)); /* Wave image. */ status=MagickTrue; progress=0; canvas_image_view=AcquireVirtualCacheView(canvas_image,exception); wave_view=AcquireAuthenticCacheView(wave_image,exception); (void) SetCacheViewVirtualPixelMethod(canvas_image_view, BackgroundVirtualPixelMethod); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas_image,wave_image,wave_image->rows,1) #endif for (y=0; y < (ssize_t) wave_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_image_view,0,y,canvas_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) wave_image->columns; x++) { status=InterpolatePixelChannels(canvas_image,canvas_image_view, wave_image,method,(double) x,(double) (y-sine_map[x]),q,exception); if (status == MagickFalse) break; p+=GetPixelChannels(canvas_image); q+=GetPixelChannels(wave_image); } if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WaveImage) #endif proceed=SetImageProgress(canvas_image,WaveImageTag,progress++,canvas_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } wave_view=DestroyCacheView(wave_view); canvas_image_view=DestroyCacheView(canvas_image_view); canvas_image=DestroyImage(canvas_image); sine_map=(double *) RelinquishMagickMemory(sine_map); if (status == MagickFalse) wave_image=DestroyImage(wave_image); return(wave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e l e t D e n o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveletDenoiseImage() removes noise from the image using a wavelet % transform. The wavelet transform is a fast hierarchical scheme for % processing an image using a set of consecutive lowpass and high_pass filters, % followed by a decimation. This results in a decomposition into different % scales which can be regarded as different “frequency bands”, determined by % the mother wavelet. Adapted from dcraw.c by David Coffin. % % The format of the WaveletDenoiseImage method is: % % Image *WaveletDenoiseImage(const Image *image,const double threshold, % const double softness,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: set the threshold for smoothing. % % o softness: attenuate the smoothing threshold. % % o exception: return any errors or warnings in this structure. % */ static inline void HatTransform(const float *magick_restrict pixels, const size_t stride,const size_t extent,const size_t scale,float *kernel) { const float *magick_restrict p, *magick_restrict q, *magick_restrict r; register ssize_t i; p=pixels; q=pixels+scale*stride; r=pixels+scale*stride; for (i=0; i < (ssize_t) scale; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q-=stride; r+=stride; } for ( ; i < (ssize_t) (extent-scale); i++) { kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride)); p+=stride; } q=p-scale*stride; r=pixels+stride*(extent-2); for ( ; i < (ssize_t) extent; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q+=stride; r-=stride; } } MagickExport Image *WaveletDenoiseImage(const Image *image, const double threshold,const double softness,ExceptionInfo *exception) { CacheView *image_view, *noise_view; float *kernel, *pixels; Image *noise_image; MagickBooleanType status; MagickSizeType number_pixels; MemoryInfo *pixels_info; ssize_t channel; static const float noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f, 0.0080f, 0.0044f }; /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); pixels_info=AcquireVirtualMemory(3*image->columns,image->rows* sizeof(*pixels)); kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1, GetOpenMPMaximumThreads()*sizeof(*kernel)); if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL)) { if (kernel != (float *) NULL) kernel=(float *) RelinquishMagickMemory(kernel); if (pixels_info != (MemoryInfo *) NULL) pixels_info=RelinquishVirtualMemory(pixels_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(float *) GetVirtualMemoryBlob(pixels_info); status=MagickTrue; number_pixels=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++) { register ssize_t i; size_t high_pass, low_pass; ssize_t level, y; PixelChannel pixel_channel; PixelTrait traits; if (status == MagickFalse) continue; traits=GetPixelChannelTraits(image,(PixelChannel) channel); if (traits == UndefinedPixelTrait) continue; pixel_channel=GetPixelChannelChannel(image,channel); if ((pixel_channel != RedPixelChannel) && (pixel_channel != GreenPixelChannel) && (pixel_channel != BluePixelChannel)) continue; /* Copy channel from image to wavelet pixel array. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { pixels[i++]=(float) p[channel]; p+=GetPixelChannels(image); } } /* Low pass filter outputs are called approximation kernel & high pass filters are referred to as detail kernel. The detail kernel have high values in the noisy parts of the signal. */ high_pass=0; for (level=0; level < 5; level++) { double magnitude; ssize_t x, y; low_pass=(size_t) (number_pixels*((level & 0x01)+1)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t x; p=kernel+id*image->columns; q=pixels+y*image->columns; HatTransform(q+high_pass,1,image->columns,(size_t) (1UL << level),p); q+=low_pass; for (x=0; x < (ssize_t) image->columns; x++) *q++=(*p++); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t y; p=kernel+id*image->rows; q=pixels+x+low_pass; HatTransform(q,image->columns,image->rows,(size_t) (1UL << level),p); for (y=0; y < (ssize_t) image->rows; y++) { *q=(*p++); q+=image->columns; } } /* To threshold, each coefficient is compared to a threshold value and attenuated / shrunk by some factor. */ magnitude=threshold*noise_levels[level]; for (i=0; i < (ssize_t) number_pixels; ++i) { pixels[high_pass+i]-=pixels[low_pass+i]; if (pixels[high_pass+i] < -magnitude) pixels[high_pass+i]+=magnitude-softness*magnitude; else if (pixels[high_pass+i] > magnitude) pixels[high_pass+i]-=magnitude-softness*magnitude; else pixels[high_pass+i]*=softness; if (high_pass != 0) pixels[i]+=pixels[high_pass+i]; } high_pass=low_pass; } /* Reconstruct image from the thresholded wavelet kernel. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; ssize_t offset; q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } offset=GetPixelChannelOffset(noise_image,pixel_channel); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType pixel; pixel=(MagickRealType) pixels[i]+pixels[low_pass+i]; q[offset]=ClampToQuantum(pixel); i++; q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType) channel,GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); kernel=(float *) RelinquishMagickMemory(kernel); pixels_info=RelinquishVirtualMemory(pixels_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); }
9862.c
/* * Compile using the command: * `cc 27Stencil.c -o oa -fopenmp -lm` */ #include <math.h> #include <omp.h> #include <stdint.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENACC #include <openacc.h> #endif #define DEFAULT_DATASIZE 1048576 /* Default datasize. */ #define DEFAULT_REPS 10 /* Default repetitions. */ #define CONF95 1.96 #define ITERATIONS 10 #define FAC (1./26) #define TOLERANCE 1.0e-15 extern int reps; /* Repetitions. */ extern double *times; /* Array to store results in. */ extern int flag; /* Flag to set CPU or GPU invocation. */ extern unsigned int datasize; /* Datasize passed to benchmark functions. */ unsigned int datasize = -1; /* Datasize for tests in bytes. */ int reps = -1; /* Repetitions. */ double *times; /* Array of doubles storing the benchmark times in microseconds. */ double testtime; /* The average test time in microseconds for reps runs. */ double testsd; /* The standard deviation in the test time in microseconds for reps runs. */ int flag = 0; /* 0 indicates CPU. */ /* * Function prototypes for common functions. */ void init(int argc, char **argv); void finalisetest(char *); void finalise(void); void benchmark(char *, double (*test)(void)); void print_results(char *, double, double); /* Forward Declarations of utility functions*/ double max_diff(double *, double *, int); void wul(); void usage(char *argv[]) { printf("Usage: %s \n" "\t--reps <repetitions> (default %d)\n" "\t--datasize <datasize> (default %d bytes)\n", argv[0], DEFAULT_REPS, DEFAULT_DATASIZE); } /* * This function parses the parameters from the command line. */ void parse_args(int argc, char *argv[]) { int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--reps") == 0) { reps = atoi(argv[++arg]); if (reps == 0) { printf("Invalid integer:--reps: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--datasize") == 0) { datasize = atoi(argv[++arg]); if (datasize == 0) { printf("Invalid integer:--datasize: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd; int i, good_reps; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; good_reps = 0; for (i = 0; i < reps; i++) { /* Skip entries where times is 0, this indicates an error occured */ if (times[i] != 0){ mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; good_reps++; } } meantime = totaltime / good_reps; sumsq = 0; for (i = 0; i < reps; i++) { if (times[i] != 0){ sumsq += (times[i] - meantime) * (times[i] - meantime); } } sd = sqrt(sumsq / good_reps); *mtp = meantime; *sdp = sd; } /* * This function prints the results of the tests. * If you use a compiler which sets a different preprocessor flag * you may wish to add it here. */ void print_results(char *name, double testtime, double testsd) { char compiler[20]; /* Set default compiler idetifier. */ sprintf(compiler, "COMPILER"); /* Set compiler identifier based on known preprocessor flags. */ #ifdef __PGI sprintf(compiler, "PGI"); #endif #ifdef __HMPP sprintf(compiler, "CAPS"); #endif //printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6); printf("%f\n", testtime*1e6); } /* * This function initialises the storage for the test results and set the defaults. */ void init(int argc, char **argv) { parse_args(argc, argv); if (reps == -1) { reps = DEFAULT_REPS; } if (datasize == (unsigned int)-1) { datasize = DEFAULT_DATASIZE; } times = (double *)malloc((reps) * sizeof(double)); /* #ifdef __PGI acc_init(acc_device_nvidia); // printf("PGI INIT\n"); #endif #ifdef __HMPP int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif #ifdef _CRAYC int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif */ } void finalise(void) { free(times); } /* * This function runs the benchmark specified. */ void benchmark(char *name, double (*test)(void)) { int i = 0; double tmp = 0; for (i=0; i<reps; i++) { tmp = test(); if (tmp == -10000){ printf("Memory allocation failure in %s\n", name); times[i] = 0; } else if (tmp == -11000){ printf("CPU/GPU mismatch in %s\n", name); times[i] = 0; } else{ times[i] = tmp; } } stats(&testtime, &testsd); //printf("in benchmark\n"); print_results(name, testtime, testsd); //printf("printed result\n"); } double stencil() { extern unsigned int datasize; int sz = cbrt((datasize/sizeof(double))/2); int i, j, k, iter; int n = sz-2; double fac = FAC; double t1, t2; double md; //printf("size = %d\n", sz); /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz); double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz); double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz); if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ return(-10000); } /* initialize input array a0 */ /* zero all of array (including halos) */ //printf("size = %d\n", sz); for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = 0.0; //printf("%d\t", (i*sz*sz+j*sz+k)); } } } //printf("\n"); //int size_of_a0 = sizeof(a0) / sizeof(*a0); //printf("size of a0 = %d\n", size_of_a0); /* use random numbers to fill interior */ for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX); } } } /* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */ /* save initial input array for later GPU run */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; } } } //printf("Host computation\n"); /* run main computation on host */ for (iter = 0; iter < ITERATIONS; iter++) { for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ /* save result */ /* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; // printf("%lf\t", a0[i*sz*sz+j*sz+k]); } } } //int size = sizeof(host_result)/sizeof(host_result[0]); //for(i = 0; i < size; i++) { // printf("%lf\t", host_result[i]); //} //printf("\n"); /* copy initial array back to a0 */ /* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k]; } } } //printf("Starting acc pragma code\n"); t1 = omp_get_wtime(); #pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n) { for (iter = 0; iter < ITERATIONS; iter++) { #pragma omp target teams distribute for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } #pragma acc parallel loop for (i = 1; i < n+1; i++) { #pragma acc loop for (j = 1; j < n+1; j++) { #pragma acc loop for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ } /* end data region */ #pragma acc wait t2 = omp_get_wtime(); memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz); md = max_diff(&host_result[0],&device_result[0], sz); /* Free malloc'd memory to prevent leaks */ free(a0); free(a0_init); free(a1); free(host_result); free(device_result); //printf("md: %lf \t tolerance: %lf", md, TOLERANCE); if (md < TOLERANCE ){ //printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE); return(t2 - t1); } else{ // printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md); return(-11000); } } /* Utility Functions */ double max_diff(double *array1,double *array2, int sz) { double tmpdiff, diff; int i,j,k; int n = sz-2; diff=0.0; for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]); //printf("diff: %lf", tmpdiff); if (tmpdiff > diff) diff = tmpdiff; } } } return diff; } /* * This function ensures the device is awake. * It is more portable than acc_init(). */ void wul(){ int data = 8192; double *arr_a = (double *)malloc(sizeof(double) * data); double *arr_b = (double *)malloc(sizeof(double) * data); int i = 0; if (arr_a==NULL||arr_b==NULL) { printf("Unable to allocate memory in wul.\n"); } for (i=0;i<data;i++){ arr_a[i] = (double) (rand()/(1.0+RAND_MAX)); } #pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data]) { #pragma acc parallel loop for (i=0;i<data;i++){ arr_b[i] = arr_a[i] * 2; } } if (arr_a[0] < 0){ printf("Error in WUL\n"); /* * This should never be called as rands should be in the range (0,1]. * This stops clever optimizers. */ } free(arr_a); free(arr_b); } int main(int argc, char **argv) { char testName[32]; //printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n"); /* Initialise storage for test results & parse input arguements. */ init(argc, argv); /* Ensure device is awake. */ wul(); sprintf(testName, "27S"); benchmark(testName, &stencil); /* Print results & free results storage */ finalise(); return EXIT_SUCCESS; }
AVX-512Fsearch.c
#include "AVX-512Fsearch.h" // Host search using AVX-512 instrucions and Score Profile technique void search_avx512f_ap (char * query_sequences, unsigned short int * query_sequences_lengths, unsigned long int query_sequences_count, unsigned int * query_disp, char profile, unsigned short int query_length_threshold, char * vect_db_sequences, unsigned short int * vect_db_sequences_lengths, unsigned short int * vect_db_nbbs, unsigned long int vect_db_sequences_count, unsigned long int * vect_db_sequences_disp, __m512i * submat, int open_gap, int extend_gap, int n_threads, int block_width, __m512i * scores, double * workTime){ long int i, j, k, qp_count, sp_count; double tick; char *a; unsigned int * a_disp, queryProfiles_length; unsigned long int * b_disp = NULL; unsigned short int * m, *n, *nbbs, query_sequences_max_length; char *b; __m512i * queryProfiles; a = query_sequences; m = query_sequences_lengths; a_disp = query_disp; query_sequences_max_length = query_sequences_lengths[query_sequences_count-1]; b = vect_db_sequences; n = vect_db_sequences_lengths; nbbs = vect_db_nbbs; b_disp = vect_db_sequences_disp; if (profile == QUERY_PROFILE) query_length_threshold = query_sequences_max_length+1; else if (profile == SCORE_PROFILE) query_length_threshold = 0; // calculate number of query sequences that are processed with query and score profile i = 0; while ((i < query_sequences_count) && (query_sequences_lengths[i] < query_length_threshold)) i++; qp_count = i; sp_count = query_sequences_count-qp_count; // allocate memory for query profiles (if correspond) if (qp_count > 0) queryProfiles = (__m512i *)_mm_malloc((a_disp[qp_count])*2*sizeof(__m512i),MEMALIGN); tick = dwalltime(); #pragma omp parallel default(none) shared(block_width, a, b, n, nbbs, m, a_disp, b_disp, submat, scores, query_sequences_count, \ vect_db_sequences_count, open_gap, extend_gap, query_sequences_max_length, qp_count, sp_count, \ queryProfiles, query_length_threshold) num_threads(n_threads) { char * ptr_a; __m512i *row1, *row2, *maxCol, *maxRow, *lastCol, *tmp, *ptr_scores, *bIndexes, *queryProfile, * scoreProfile, *ptr_scoreProfile1, *ptr_scoreProfile2; __declspec(align(16)) __m128i* ptr_b, *ptr_b_block; __declspec(align(MEMALIGN)) __m512i vzero = _mm512_setzero_epi32(), score, previous, current1, current2, aux1, auxLastCol; __declspec(align(MEMALIGN)) __m512i vextend_gap = _mm512_set1_epi32(extend_gap), vopen_extend_gap = _mm512_set1_epi32(open_gap+extend_gap); __declspec(align(MEMALIGN)) __m512i v16 = _mm512_set1_epi32(16), submat_hi1, submat_lo1, submat_hi2, submat_lo2, bValues, maxRow1, maxRow2; __mmask16 * masks, mask; unsigned int i, j, ii, jj, k, disp, dim1, dim2, nbb; unsigned long int t, s, q; // allocate memory for auxiliary buffers row1 = (__m512i *) _mm_malloc((block_width+1)*sizeof(__m512i),MEMALIGN); row2 = (__m512i *) _mm_malloc((block_width+1)*sizeof(__m512i),MEMALIGN); maxCol = (__m512i *) _mm_malloc((block_width+1)*sizeof(__m512i),MEMALIGN); maxRow = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i),MEMALIGN); lastCol = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i),MEMALIGN); // allocate memory for SP (if correspond) if (query_sequences_max_length >= query_length_threshold) scoreProfile = (__m512i *) _mm_malloc(SUBMAT_ROWS*block_width*sizeof(__m512i), MEMALIGN); // build query profiles (if correspond) if (qp_count > 0) { // alloc memory for indexes bIndexes = (__m512i *) _mm_malloc((block_width)*sizeof(__m512i),MEMALIGN); masks = (__mmask16 *) _mm_malloc((block_width)*sizeof(__mmask16),MEMALIGN); #pragma omp for schedule(dynamic) for (i=0; i< a_disp[qp_count] ; i++) { queryProfiles[i*2] = submat[a[i]*2]; queryProfiles[i*2+1] = submat[a[i]*2+1]; } } // calculate chunk alignments using query profile technique #pragma omp for schedule(dynamic) nowait for (t=0; t< qp_count*vect_db_sequences_count; t++) { q = (qp_count-1) - (t % qp_count); s = (vect_db_sequences_count-1) - (t / qp_count); queryProfile = queryProfiles + a_disp[q]*2; ptr_b = (__m128i*)(b + b_disp[s]); ptr_scores = scores + (q*vect_db_sequences_count+s); // init buffers #pragma unroll(UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_setzero_epi32(); // index 0 is not used #pragma unroll(UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_setzero_epi32(); // set score to 0 score = _mm512_setzero_epi32(); // calculate number of blocks nbb = nbbs[s]; for (k=0; k < nbb; k++){ // calculate dim1 disp = k*block_width; dim1 = (block_width < n[s]-disp ? block_width : n[s]-disp); // calculate dim2 dim2 = dim1 / DB_SEQ_LEN_MULT; // init buffers #pragma unroll(UNROLL_COUNT) for (i=1; i<dim1+1 ; i++ ) maxCol[i] = _mm512_setzero_epi32(); //index 0 is not used #pragma unroll(UNROLL_COUNT) for (i=0; i<dim1 ; i++ ) row1[i] = _mm512_setzero_epi32(); auxLastCol = _mm512_setzero_epi32(); // get bIndexes ptr_b_block = ptr_b + disp; #pragma unroll(UNROLL_COUNT) for (i=0; i<dim1 ; i++ ) { bIndexes[i] = _mm512_cvtepi8_epi32(ptr_b_block[i]); masks[i] = _mm512_cmpge_epi32_mask(bIndexes[i],v16); } for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; previous = lastCol[i+1]; // load submat values corresponding to current a residue submat_lo1 = (queryProfile[i*2]); submat_hi1 = (queryProfile[i*2+1]); submat_lo2 = (queryProfile[(i+1)*2]); submat_hi2 = (queryProfile[(i+1)*2+1]); // store maxRow in auxiliars maxRow1 = maxRow[i]; maxRow2 = maxRow[i+1]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(DB_SEQ_LEN_MULT) for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) { //calcuate the diagonal value aux1 = _mm512_permutevar_epi32(bIndexes[j-1], submat_lo1); aux1 = _mm512_mask_permutevar_epi32(aux1, masks[j-1], bIndexes[j-1], submat_hi1); current1 = _mm512_add_epi32(row1[j-1], aux1); // calculate current1 max value current1 = _mm512_max_epi32(current1, maxRow1); current1 = _mm512_max_epi32(current1, maxCol[j]); current1 = _mm512_max_epi32(current1, vzero); // update maxRow and maxCol maxRow1 = _mm512_sub_epi32(maxRow1, vextend_gap); maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap); aux1 = _mm512_sub_epi32(current1, vopen_extend_gap); maxRow1 = _mm512_max_epi32(maxRow1, aux1); maxCol[j] = _mm512_max_epi32(maxCol[j], aux1); // update max score score = _mm512_max_epi32(score,current1); //calcuate the diagonal value aux1 = _mm512_permutevar_epi32(bIndexes[j-1], submat_lo2); aux1 = _mm512_mask_permutevar_epi32(aux1, masks[j-1], bIndexes[j-1], submat_hi2); current2 = _mm512_add_epi32(previous, aux1); // update previous previous = current1; // calculate current2 max value current2 = _mm512_max_epi32(current2, maxRow2); current2 = _mm512_max_epi32(current2, maxCol[j]); current2 = _mm512_max_epi32(current2, vzero); // update maxRow and maxCol maxRow2 = _mm512_sub_epi32(maxRow2, vextend_gap); maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap); aux1 = _mm512_sub_epi32(current2, vopen_extend_gap); maxRow2 = _mm512_max_epi32(maxRow2, aux1); maxCol[j] = _mm512_max_epi32(maxCol[j], aux1); // update row buffer row2[j] = current2; // update max score score = _mm512_max_epi32(score,current2); } } if (k != nbb-1) { // update maxRow maxRow[i] = maxRow1; maxRow[i+1] = maxRow2; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = current1; auxLastCol = current2; } // swap buffers tmp = row1; row1 = row2; row2 = tmp; } } // store max value _mm512_store_epi32(ptr_scores, score); } // calculate chunk alignments using score profile technique #pragma omp for schedule(dynamic) nowait for (t=0; t< sp_count*vect_db_sequences_count; t++) { q = qp_count + (sp_count-1) - (t % sp_count); s = (vect_db_sequences_count-1) - (t / sp_count); ptr_a = a + a_disp[q]; ptr_b = (__m128i*)(b + b_disp[s]); ptr_scores = scores + (q*vect_db_sequences_count+s); // init buffers #pragma unroll(UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_setzero_epi32(); // index 0 is not used #pragma unroll(UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_setzero_epi32(); // set score to 0 score = _mm512_setzero_epi32(); // calculate number of blocks nbb = nbbs[s]; for (k=0; k < nbb; k++){ // calculate dim1 disp = k*block_width; dim1 = (block_width < n[s]-disp ? block_width : n[s]-disp); // calculate dim2 dim2 = dim1 / DB_SEQ_LEN_MULT; // init buffers #pragma unroll(UNROLL_COUNT) for (i=1; i<dim1+1 ; i++ ) maxCol[i] = _mm512_setzero_epi32(); //index 0 is not used #pragma unroll(UNROLL_COUNT) for (i=0; i<dim1 ; i++ ) row1[i] = _mm512_setzero_epi32(); auxLastCol = _mm512_setzero_epi32(); // build score profile ptr_b_block = ptr_b + disp; for (i=0; i< dim1 ;i++ ) { bValues = _mm512_cvtepi8_epi32(ptr_b_block[i]); mask = _mm512_cmpge_epi32_mask(bValues,v16); ptr_scoreProfile1 = scoreProfile + i; #pragma unroll for (j=0; j< SUBMAT_ROWS; j++) { aux1 = _mm512_permutevar_epi32(bValues, (submat[j*2])); ptr_scoreProfile1[j*dim1] = _mm512_mask_permutevar_epi32(aux1, mask, bValues, (submat[j*2+1])); } } for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; previous = lastCol[i+1]; // calculate score profile displacement ptr_scoreProfile1 = scoreProfile+ptr_a[i]*dim1; ptr_scoreProfile2 = scoreProfile+ptr_a[i+1]*dim1; // store maxRow in auxiliars maxRow1 = maxRow[i]; maxRow2 = maxRow[i+1]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(DB_SEQ_LEN_MULT) for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) { //calcuate the diagonal value current1 = _mm512_add_epi32(row1[j-1], (ptr_scoreProfile1[j-1])); // calculate current1 max value current1 = _mm512_max_epi32(current1, maxRow1); current1 = _mm512_max_epi32(current1, maxCol[j]); current1 = _mm512_max_epi32(current1, vzero); // update maxRow and maxCol maxRow1 = _mm512_sub_epi32(maxRow1, vextend_gap); maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap); aux1 = _mm512_sub_epi32(current1, vopen_extend_gap); maxRow1 = _mm512_max_epi32(maxRow1, aux1); maxCol[j] = _mm512_max_epi32(maxCol[j], aux1); // update max score score = _mm512_max_epi32(score,current1); //calcuate the diagonal value current2 = _mm512_add_epi32(previous, (ptr_scoreProfile2[j-1])); // update previous previous = current1; // calculate current2 max value current2 = _mm512_max_epi32(current2, maxRow2); current2 = _mm512_max_epi32(current2, maxCol[j]); current2 = _mm512_max_epi32(current2, vzero); // update maxRow and maxCol maxRow2 = _mm512_sub_epi32(maxRow2, vextend_gap); maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap); aux1 = _mm512_sub_epi32(current2, vopen_extend_gap); maxRow2 = _mm512_max_epi32(maxRow2, aux1); maxCol[j] = _mm512_max_epi32(maxCol[j], aux1); // update row buffer row2[j] = current2; // update max score score = _mm512_max_epi32(score,current2); } } if (k != nbb-1) { // update maxRow maxRow[i] = maxRow1; maxRow[i+1] = maxRow2; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = current1; auxLastCol = current2; } // swap buffers tmp = row1; row1 = row2; row2 = tmp; } } // store max value _mm512_store_epi32(ptr_scores, score); } _mm_free(row1);_mm_free(row2); _mm_free(maxCol); _mm_free(maxRow); _mm_free(lastCol); if (qp_count > 0) { _mm_free(bIndexes); _mm_free(masks); } if (sp_count > 0) _mm_free(scoreProfile); } *workTime = dwalltime()-tick; if (qp_count > 0) _mm_free(queryProfiles); }
flow_map.c
// // I compile with >> gcc-8 -fopenmp ftle.c -o ftle // since I'm running on Mac with gcc-8 being my gcc which is // installed by brew. Need OpenMP to get parallel code to work. // // run with >> ./ftle x0 xend y0 yend t0 tend sizex sizey // // where the spacial variables define a bounding box [x0, x1]x[y0, y1] // and [t0, t1] define the time interval to compute the flow map over. // // If using over an interval outside of [-200, 200]x[-200, 200], you will // need to change the #define variables to extend the trajectory cutoff // window as necessary. // // Meant to be used with Matlab as // // % compute flow map with flow executable // [status, result] = system(sprintf('./flow %f %f %f %f %f %f %d %d',... // x0, x1, y0, y1, t0, t1, numx, numy)); // // % evaluate expression for stacked flow maps // flows = eval(result); // flow_mapx = flows(1:numx, :)'; // flow_mapy = flows(1+numx:end, :)'; // // ftle.c // flow_map // // Created by Evan Burton on 12/4/18. // Copyright © 2018 Evan Burton. All rights reserved. // #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/stat.h> #define xubound 200 #define xlbound -200 #define yubound 200 #define ylbound -200 double A = 0.1; double ep = 0.1; double w = M_PI/5; // The x velocity component x' = f1(t, x, y) double f1(double t, double x, double y){ double g = x*(ep*sin(w*t)*x + (1 - 2*ep*sin(w*t))); return -A*M_PI*sin(M_PI*g)*cos(M_PI*y); //return -A*M_PI*sin(M_PI*x)*cos(M_PI*y); //return y; } // The y velocity component y' = f2(t, x, y) double f2(double t, double x, double y){ double g = x*(ep*sin(w*t)*x + (1 - 2*ep*sin(w*t))); double dg = 1 + (2*x - ep)*ep*sin(w*t); return A*M_PI*cos(M_PI*g)*sin(M_PI*y)*dg; //return A*M_PI*cos(M_PI*x)*sin(M_PI*y); //return sin(x*t); } int offset(int, int, int, int, int); void rk4(double a, double b, double h, double x0, double y0, double* fval); int main(int argc, const char * argv[]) { double x0 = atof(argv[1]); double xend = atof(argv[2]); double y0 = atof(argv[3]); double yend = atof(argv[4]); double t0 = atof(argv[5]); double tend = atof(argv[6]); unsigned int numx = atof(argv[7]); unsigned int numy = atof(argv[8]); //double flow_map[numx][numy][2]; //double exps[numx][numy]; double* flow_map; flow_map = malloc(numx*numy*2 * sizeof(double)); double xs[numx]; double ys[numy]; // time step for rk4 double dt = 0.01; if (tend < t0){ dt = -dt; } double dx = (xend-x0)/((double)numx-1); double dy = (yend-y0)/((double)numy-1); #pragma omp parallel { #pragma omp for for(int i = 0; i < numx; i++) xs[i] = x0 + i*dx; #pragma omp for for(int j = 0; j < numy; j++) ys[j] = y0 + j*dy; } #pragma omp parallel for schedule(guided) shared(xs, ys, t0, tend, dt, flow_map) for(int i = 0; i < numx; i++){ for(int j = 0; j < numy; j++){ double fval[2]; rk4(t0, tend, dt, xs[i], ys[j], fval); flow_map[offset(i, j, 0, numx, numy)] = fval[0]; flow_map[offset(i, j, 1, numx, numy)] = fval[1]; } //printf("%d / %d\n", i, numx); } printf("["); for (int k = 0; k < 2; k++){ for (int i=0; i < numx; i++) { for (int j=0; j < numy-1; j++) { printf("%f,", flow_map[offset(i, j, k, numx, numy)]); } printf("%f\n", flow_map[offset(i, numy-1, k, numx, numy)]); } } printf("]"); free(flow_map); return 0; } int offset(int x, int y, int z, int numx, int numy) { return (z * numx * numy) + (y * numx) + x; } void rk4(double a, double b, double h, double x0, double y0, double* fval){ // Get number of points int n = fabs((b-a)/h) + 1; double xi = x0; double yi = y0; double t = a; double k1, k2, k3, k4, l1, l2, l3, l4; for(int i = 0; i < n; i++){ // RK4 Scheme k1 = h*f1(t, xi, yi); l1 = h*f2(t, xi, yi); k2 = h*f1(t + h/2.0, xi + k1/2.0, yi + l1/2); l2 = h*f2(t + h/2.0, xi + k1/2.0, yi + l1/2); k3 = h*f1(t + h/2.0, xi + k2/2.0, yi + l2/2); l3 = h*f2(t + h/2.0, xi + k2/2.0, yi + l2/2); k4 = h*f1(t+h, xi+k3, yi + l3); l4 = h*f2(t+h, xi+k3, yi + l3); xi = xi + (k1+2*(k2+k3)+k4)/6.0; yi = yi + (l1+2*(l2+l3)+l4)/6.0; // Ensure spacial variables do not leave bounding boxs if (xi > xubound || xi < xlbound || yi > yubound || yi < xlbound){ fval[0] = xi; fval[1] = yi; return; } t += h; } fval[0] = xi; fval[1] = yi; }
openmp_kernels.c
#include "pcg_basic.h" #include "openmp_kernels.h" #include <stdio.h> #define SIMD 8 void sg_omp( sgData_t* restrict target, sgIdx_t* restrict ti, sgData_t* restrict source, sgIdx_t* restrict si, size_t n) { #pragma omp parallel for simd safelen(SIMD) #pragma prefervector for(long i = 0; i < n; i++){ target[ti[i]] = source[si[i]]; } } void scatter_omp( sgData_t* restrict target, sgIdx_t* restrict ti, sgData_t* restrict source, sgIdx_t* restrict si, size_t n) { #pragma omp parallel for simd safelen(SIMD) #pragma prefervector for(long i = 0; i < n; i++){ target[ti[i]] = source[i]; } } void gather_omp( sgData_t* restrict target, sgIdx_t* restrict ti, sgData_t* restrict source, sgIdx_t* restrict si, size_t n) { //Users may want to set a specific safelen value like 32 #pragma omp parallel for simd safelen(SIMD) #pragma prefervector for(long i = 0; i < n; i++){ target[i] = source[si[i]]; } } int get_ind(void) { return 1; } void gather( sgData_t* restrict target, sgData_t* restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t delta, size_t n) { #pragma omp parallel for simd safelen(SIMD) #pragma prefervector for (size_t i = 0; i < n; i++) { #pragma loop_info est_trips(8) #pragma loop_info prefetch for (size_t j = 0; j < pat_len; j++) { target[i*pat_len+j] = source[pat[j]]; } source += delta; } } void gather_smallbuf( sgData_t** restrict target, sgData_t* const restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t delta, size_t n, size_t target_len) { #ifdef __GNUC__ #pragma omp parallel #else #pragma omp parallel shared(pat) #endif { int t = omp_get_thread_num(); #ifdef __CRAYC__ #pragma concurrent #endif #ifdef __INTEL_COMPILER #pragma ivdep #endif #pragma omp for for (size_t i = 0; i < n; i++) { sgData_t *sl = source + delta * i; sgData_t *tl = target[t] + pat_len*(i%target_len); #ifdef __CRAYC__ #pragma concurrent #endif #if defined __CRAYC__ || defined __INTEL_COMPILER #pragma vector always,unaligned #endif for (size_t j = 0; j < pat_len; j++) { tl[j] = sl[pat[j]]; } } } } void gather_smallbuf_rdm( sgData_t** restrict target, sgData_t* const restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t delta, size_t n, size_t target_len) { #ifdef __GNUC__ #pragma omp parallel #else #pragma omp parallel shared(pat) #endif { int t = omp_get_thread_num(); #ifdef __CRAYC__ #pragma concurrent #endif #ifdef __INTEL_COMPILER #pragma ivdep #endif #pragma omp for for (size_t i = 0; i < n; i++) { sgData_t *sl = source + rand()%((n-1)*delta); sgData_t *tl = target[t] + pat_len*(i%target_len); #ifdef __CRAYC__ #pragma concurrent #endif #if defined __CRAYC__ || defined __INTEL_COMPILER #pragma vector always,unaligned #endif for (size_t j = 0; j < pat_len; j++) { tl[j] = sl[pat[j]]; } } } } void scatter_smallbuf( sgData_t* restrict target, sgData_t** const restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t delta, size_t n, size_t source_len) { #ifdef __GNUC__ #pragma omp parallel #else #pragma omp parallel shared(pat) #endif { int t = omp_get_thread_num(); #ifdef __CRAYC__ #pragma concurrent #endif #ifdef __INTEL_COMPILER #pragma ivdep #endif #pragma omp for for (size_t i = 0; i < n; i++) { sgData_t *tl = target + delta * i; sgData_t *sl = source[t] + pat_len*(i%source_len); #ifdef __CRAYC__ #pragma concurrent #endif #if defined __CRAYC__ || defined __INTEL_COMPILER #pragma vector always,unaligned #endif for (size_t j = 0; j < pat_len; j++) { tl[pat[j]] = sl[j]; } } } } void gather_smallbuf_random( sgData_t** restrict target, sgData_t* const restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t delta, size_t n, size_t target_len, long initstate) { #ifdef __GNUC__ #pragma omp parallel #else #pragma omp parallel shared(pat) #endif { int t = omp_get_thread_num(); pcg32_random_t rng; pcg32_srandom_r(&rng, initstate, t); #ifdef __CRAYC__ #pragma concurrent #endif #pragma omp for for (size_t i = 0; i < n; i++) { //long r = ()%n; uint32_t r = pcg32_boundedrand_r(&rng, (uint32_t)n); sgData_t *sl = source + delta * r; sgData_t *tl = target[t] + pat_len*(i%target_len); #ifdef __CRAYC__ #pragma concurrent #pragma vector always,unaligned #endif for (size_t j = 0; j < pat_len; j++) { tl[j] = sl[pat[j]]; } } } } void scatter_smallbuf_random( sgData_t* restrict target, sgData_t** const restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t delta, size_t n, size_t source_len, long initstate) { if (n > 1ll<<32) {printf("n too big for rng, exiting.\n"); exit(1);} #ifdef __GNUC__ #pragma omp parallel #else #pragma omp parallel shared(pat) #endif { int t = omp_get_thread_num(); pcg32_random_t rng; pcg32_srandom_r(&rng, initstate, t); #ifdef __CRAYC__ #pragma concurrent #endif #pragma omp for for (size_t i = 0; i < n; i++) { uint32_t r = pcg32_boundedrand_r(&rng, (uint32_t)n); sgData_t *tl = target + delta * r; sgData_t *sl = source[t] + pat_len*(i%source_len); #ifdef __CRAYC__ #pragma concurrent #pragma vector always,unaligned #endif for (size_t j = 0; j < pat_len; j++) { tl[pat[j]] = sl[j]; } } } } void gather_smallbuf_multidelta( sgData_t** restrict target, sgData_t* restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t *delta, size_t n, size_t target_len, size_t delta_len) { #ifdef __GNUC__ #pragma omp parallel #else #pragma omp parallel shared(pat) #endif { int t = omp_get_thread_num(); #ifdef __CRAYC__ #pragma concurrent #endif //taget_len is in multiples of pat_len #pragma omp for for (size_t i = 0; i < n; i++) { sgData_t *sl = source + (i/delta_len)*delta[delta_len-1] + delta[i%delta_len] - delta[0]; sgData_t *tl = target[t] + pat_len*(i%target_len); //sgData_t *sl = source; //sgData_t *tl = target[0]; #ifdef __CRAYC__ #pragma concurrent #pragma vector always,unaligned #endif for (size_t j = 0; j < pat_len; j++) { //printf("i: %zu, j: %zu\n", i, j); tl[j] = sl[pat[j]]; //tl[j] = sl[pat[j]]; } } } } void scatter( sgData_t* restrict target, sgData_t* restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t delta, size_t n) { #pragma omp parallel for simd safelen(SIMD) #pragma prefervector for (size_t i = 0; i < n; i++) { #pragma loop_info est_trips(8) #pragma loop_info prefetch for (size_t j = 0; j < pat_len; j++) { target[pat[j]] = source[i*pat_len+j]; } source += delta; } } void gather_stride_os( sgData_t* restrict target, sgData_t* restrict source, sgIdx_t* restrict pat, size_t pat_len, size_t delta, size_t n, size_t target_wrap) { for (size_t i = 0; i < n; i++) { for (size_t j = 0; j < pat_len; j++) { target[(i%target_wrap)*pat_len+j] = source[pat[j]]; } source += delta; } } void gather_stride8( sgData_t* restrict target, sgData_t* restrict source, sgIdx_t* restrict pat, size_t delta, size_t n) { for (size_t i = 0; i < n; i++) { for (size_t j = 0; j < 8; j++) { target[i*8+j] = source[pat[j]]; } source += delta; } } void gather_stride16( sgData_t* restrict target, sgData_t* restrict source, sgIdx_t* restrict pat, size_t stride, size_t delta, size_t n) { for (size_t i = 0; i < n; i++) { for (size_t j = 0; j < 16; j++) { //printf("%zu <- %zu\n", i*16+j, pat[j]); target[i*16+j] = source[pat[j]]; } source += delta; } } void sg_accum_omp( sgData_t* restrict target, sgIdx_t* restrict ti, sgData_t* restrict source, sgIdx_t* restrict si, size_t n) { #pragma omp parallel for schedule(runtime) for(long i = 0; i < n; i++){ target[ti[i]] += source[si[i]]; } } void scatter_accum_omp( sgData_t* restrict target, sgIdx_t* restrict ti, sgData_t* restrict source, sgIdx_t* restrict si, size_t n) { #pragma omp parallel for schedule(runtime) for(long i = 0; i < n; i++){ target[ti[i]] += source[i]; } } void gather_accum_omp( sgData_t* restrict target, sgIdx_t* restrict ti, sgData_t* restrict source, sgIdx_t* restrict si, size_t n) { #pragma omp parallel for schedule(runtime) for(long i = 0; i < n; i++){ target[i] += source[si[i]]; } }
SumaVectoresSections.c
/* SumaVectoresSection.c Suma de dos vectores utilizando la directiva sections: v3 = v1 + v2 Para compilar usar (-lrt: real time library): gcc -O2 SumaVectores.c -o SumaVectores –lrt gcc -O2 –S SumaVectores.c –lrt //para generar el código ensamblador Para ejecutar use: SumaVectoresC longitud */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <omp.h> //#define PRINTF_ALL #define VECTOR_GLOBAL #define MAX 67108864 //=2^26 double v1[MAX], v2[MAX], v3[MAX]; int main(int argc,char** argv){ int i, tope; double t_inicial, t_final; //para tiempo de ejecución //Leer argumento de entrada (nº de componentes del vector) if (argc<2){ printf("Faltan nº componentes del vector \n"); exit(-1); } unsigned int N = atoi(argv[1]); // Máximo N =2^32 -1=4294967295 (sizeof(unsigned int) = 4 B) if (N>MAX) N=MAX; //Inicializar vectores #pragma omp parallel sections { #pragma omp section { tope = N/4; for (i=0; i<tope; i++) { v1[i] = N*0.1 + i*0.1; v2[i] = N*0.1 - i*0.1; } } #pragma omp section { tope = N/2; for (i=N/4; i<tope; i++) { v1[i] = N*0.1 + i*0.1; v2[i] = N*0.1 - i*0.1; } } #pragma omp section { tope = 3*N/4; for (i=N/2; i<tope; i++) { v1[i] = N*0.1 + i*0.1; v2[i] = N*0.1 - i*0.1; } } #pragma omp section { tope = N; for (i=3*N/4; i<tope; i++) { v1[i] = N*0.1 + i*0.1; v2[i] = N*0.1 - i*0.1; } } } t_inicial = omp_get_wtime(); //Calcular suma de vectores #pragma omp parallel sections { #pragma omp section { tope = N/4; for (i=0; i<tope; i++) { v3[i] = v1[i] + v2[i]; } } #pragma omp section { tope = N/2; for (i=N/4; i<tope; i++) { v3[i] = v1[i] + v2[i]; } } #pragma omp section { tope = 3*N/4; for (i=N/2; i<tope; i++) { v3[i] = v1[i] + v2[i]; } } #pragma omp section { tope = N; for (i=3*N/4; i<tope; i++) { v3[i] = v1[i] + v2[i]; } } } t_final = omp_get_wtime() - t_inicial; //Imprimir resultado de la suma y el tiempo de ejecución #ifdef PRINTF_ALL printf("Tiempo(seg.):%11.9f \t/ Tamaño Vectores:%u \t/", t_final, N); for (i=0; i<N; i++) printf("v3[%d] = %11.9f\n", i, v3[i]); #else printf("Tiempo(seg.):%11.9f \t/ Tamaño Vectores:%u \t/" "V1[0]+V2[0]=V3[0](%8.6f+%8.6f=%8.6f) / /" "V1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f) / \n", t_final, N, v1[0], v2[0], v3[0], N-1, N-1, N-1, v1[N-1], v2[N-1], v3[N-1]); #endif return 0; }
test_nvector_openmpdev.c
/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the testing routine to check the OpenMP 4.5 NVECTOR * module implementation. * -----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <sundials/sundials_types.h> #include <nvector/nvector_openmpdev.h> #include <sundials/sundials_math.h> #include "test_nvector.h" #include <omp.h> /* OpenMPDEV vector specific tests */ int Test_N_VMake_OpenMPDEV(N_Vector X, sunindextype length, int myid); /* ---------------------------------------------------------------------- * Main NVector Testing Routine * --------------------------------------------------------------------*/ int main(int argc, char *argv[]) { int fails = 0; /* counter for test failures */ int retval; /* function return value */ sunindextype length; /* vector length */ N_Vector U, V, W, X, Y, Z; /* test vectors */ int print_timing; /* turn timing on/off */ /* check input and set vector length */ if (argc < 3){ printf("ERROR: TWO (2) Inputs required: vector length and print timing \n"); return(-1); } length = (sunindextype) atol(argv[1]); if (length <= 0) { printf("ERROR: length of vector must be a positive integer \n"); return(-1); } print_timing = atoi(argv[2]); SetTiming(print_timing, 0); printf("Testing the OpenMP DEV N_Vector \n"); printf("Vector length %ld \n", (long int) length); printf("\n omp_get_default_device = %d \n", omp_get_default_device()); printf("\n omp_get_num_devices = %d \n", omp_get_num_devices()); printf("\n omp_get_initial_device = %d \n", omp_get_initial_device()); printf("\n omp_is_initial_device = %d \n", omp_is_initial_device()); /* Create new vectors */ W = N_VNewEmpty_OpenMPDEV(length); if (W == NULL) { printf("FAIL: Unable to create a new empty vector \n\n"); return(1); } X = N_VNew_OpenMPDEV(length); if (X == NULL) { N_VDestroy(W); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* Check vector ID */ fails += Test_N_VGetVectorID(X, SUNDIALS_NVEC_OPENMPDEV, 0); /* Check vector length */ fails += Test_N_VGetLength(X, 0); /* Check vector communicator */ fails += Test_N_VGetCommunicator(X, NULL, 0); /* Test clone functions */ fails += Test_N_VCloneEmpty(X, 0); fails += Test_N_VClone(X, length, 0); fails += Test_N_VCloneEmptyVectorArray(5, X, 0); fails += Test_N_VCloneVectorArray(5, X, length, 0); /* Clone additional vectors for testing */ Y = N_VClone(X); if (Y == NULL) { N_VDestroy(W); N_VDestroy(X); printf("FAIL: Unable to create a new vector \n\n"); return(1); } Z = N_VClone(X); if (Z == NULL) { N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* Standard vector operation tests */ printf("\nTesting standard vector operations:\n\n"); fails += Test_N_VConst(X, length, 0); fails += Test_N_VLinearSum(X, Y, Z, length, 0); fails += Test_N_VProd(X, Y, Z, length, 0); fails += Test_N_VDiv(X, Y, Z, length, 0); fails += Test_N_VScale(X, Z, length, 0); fails += Test_N_VAbs(X, Z, length, 0); fails += Test_N_VInv(X, Z, length, 0); fails += Test_N_VAddConst(X, Z, length, 0); fails += Test_N_VDotProd(X, Y, length, 0); fails += Test_N_VMaxNorm(X, length, 0); fails += Test_N_VWrmsNorm(X, Y, length, 0); fails += Test_N_VWrmsNormMask(X, Y, Z, length, 0); fails += Test_N_VMin(X, length, 0); fails += Test_N_VWL2Norm(X, Y, length, 0); fails += Test_N_VL1Norm(X, length, 0); fails += Test_N_VCompare(X, Z, length, 0); fails += Test_N_VInvTest(X, Z, length, 0); fails += Test_N_VConstrMask(X, Y, Z, length, 0); fails += Test_N_VMinQuotient(X, Y, length, 0); /* Fused and vector array operations tests (disabled) */ printf("\nTesting fused and vector array operations (disabled):\n\n"); /* create vector and disable all fused and vector array operations */ U = N_VNew_OpenMPDEV(length); retval = N_VEnableFusedOps_OpenMPDEV(U, SUNFALSE); if (U == NULL || retval != 0) { N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* fused operations */ fails += Test_N_VLinearCombination(U, length, 0); fails += Test_N_VScaleAddMulti(U, length, 0); fails += Test_N_VDotProdMulti(U, length, 0); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(U, length, 0); fails += Test_N_VScaleVectorArray(U, length, 0); fails += Test_N_VConstVectorArray(U, length, 0); fails += Test_N_VWrmsNormVectorArray(U, length, 0); fails += Test_N_VWrmsNormMaskVectorArray(U, length, 0); fails += Test_N_VScaleAddMultiVectorArray(U, length, 0); fails += Test_N_VLinearCombinationVectorArray(U, length, 0); /* Fused and vector array operations tests (enabled) */ printf("\nTesting fused and vector array operations (enabled):\n\n"); /* create vector and enable all fused and vector array operations */ V = N_VNew_OpenMPDEV(length); retval = N_VEnableFusedOps_OpenMPDEV(V, SUNTRUE); if (V == NULL || retval != 0) { N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); N_VDestroy(U); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* fused operations */ fails += Test_N_VLinearCombination(V, length, 0); fails += Test_N_VScaleAddMulti(V, length, 0); fails += Test_N_VDotProdMulti(V, length, 0); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(V, length, 0); fails += Test_N_VScaleVectorArray(V, length, 0); fails += Test_N_VConstVectorArray(V, length, 0); fails += Test_N_VWrmsNormVectorArray(V, length, 0); fails += Test_N_VWrmsNormMaskVectorArray(V, length, 0); fails += Test_N_VScaleAddMultiVectorArray(V, length, 0); fails += Test_N_VLinearCombinationVectorArray(V, length, 0); /* local reduction operations */ printf("\nTesting local reduction operations:\n\n"); fails += Test_N_VDotProdLocal(X, Y, length, 0); fails += Test_N_VMaxNormLocal(X, length, 0); fails += Test_N_VMinLocal(X, length, 0); fails += Test_N_VL1NormLocal(X, length, 0); fails += Test_N_VWSqrSumLocal(X, Y, length, 0); fails += Test_N_VWSqrSumMaskLocal(X, Y, Z, length, 0); fails += Test_N_VInvTestLocal(X, Z, length, 0); fails += Test_N_VConstrMaskLocal(X, Y, Z, length, 0); fails += Test_N_VMinQuotientLocal(X, Y, length, 0); /* Free vectors */ N_VDestroy(U); N_VDestroy(V); N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); /* Print result */ if (fails) { printf("FAIL: NVector module failed %i tests \n\n", fails); } else { printf("SUCCESS: NVector module passed all tests \n\n"); } return(fails); } /* ---------------------------------------------------------------------- * OpenMPDEV specific tests * --------------------------------------------------------------------*/ /* -------------------------------------------------------------------- * Test for the CUDA N_Vector N_VMake_OpenMPDEV function. Requires N_VConst * to check data. */ int Test_N_VMake_OpenMPDEV(N_Vector X, sunindextype length, int myid) { int failure = 0; realtype *h_data, *d_data; N_Vector Y; N_VConst(NEG_HALF, X); N_VCopyFromDevice_OpenMPDEV(X); h_data = N_VGetHostArrayPointer_OpenMPDEV(X); d_data = N_VGetDeviceArrayPointer_OpenMPDEV(X); /* Case 1: h_data and d_data are not null */ Y = N_VMake_OpenMPDEV(length, h_data, d_data); if (Y == NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid); printf(" Vector is NULL \n \n"); return(1); } if (N_VGetHostArrayPointer_OpenMPDEV(Y) == NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid); printf(" Vector host data == NULL \n \n"); N_VDestroy(Y); return(1); } if (N_VGetDeviceArrayPointer_OpenMPDEV(Y) == NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid); printf(" Vector device data -= NULL \n \n"); N_VDestroy(Y); return(1); } failure += check_ans(NEG_HALF, Y, length); if (failure) { printf(">>> FAILED test -- N_VMake_OpenMPDEV Case 1, Proc %d \n", myid); printf(" Failed N_VConst check \n \n"); N_VDestroy(Y); return(1); } if (myid == 0) { printf("PASSED test -- N_VMake_OpenMPDEV Case 1 \n"); } N_VDestroy(Y); /* Case 2: data is null */ Y = N_VMake_OpenMPDEV(length, NULL, NULL); if (Y != NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV Case 2, Proc %d \n", myid); printf(" Vector is not NULL \n \n"); return(1); } if (myid == 0) { printf("PASSED test -- N_VMake_OpenMPDEV Case 2 \n"); } N_VDestroy(Y); return(failure); } /* ---------------------------------------------------------------------- * Implementation specific utility functions for vector tests * --------------------------------------------------------------------*/ int check_ans(realtype ans, N_Vector X, sunindextype local_length) { int failure = 0; sunindextype i; realtype *Xdata; N_VCopyFromDevice_OpenMPDEV(X); Xdata = N_VGetHostArrayPointer_OpenMPDEV(X); /* check vector data */ for (i = 0; i < local_length; i++) { failure += SUNRCompare(Xdata[i], ans); } return (failure > ZERO) ? (1) : (0); } booleantype has_data(N_Vector X) { realtype *Xdata = N_VGetHostArrayPointer_OpenMPDEV(X); if (Xdata == NULL) return SUNFALSE; else return SUNTRUE; } void set_element(N_Vector X, sunindextype i, realtype val) { set_element_range(X, i, i, val); } void set_element_range(N_Vector X, sunindextype is, sunindextype ie, realtype val) { realtype *xdev; int dev; sunindextype i; xdev = N_VGetDeviceArrayPointer_OpenMPDEV(X); dev = omp_get_default_device(); /* set elements [is,ie] of the data array */ #pragma omp target map(to:is,ie,val) is_device_ptr(xdev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) { for(i = is; i <= ie; i++) xdev[i] = val; } } realtype get_element(N_Vector X, sunindextype i) { realtype *data; N_VCopyFromDevice_OpenMPDEV(X); data = N_VGetHostArrayPointer_OpenMPDEV(X); return data[i]; } double max_time(N_Vector X, double time) { /* not running in parallel, just return input time */ return(time); } void sync_device(N_Vector x) { /* not running on DEV, just return */ return; }
GB_unaryop__abs_uint64_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_uint16 // op(A') function: GB_tran__abs_uint64_uint16 // C type: uint64_t // A type: uint16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_uint16 ( uint64_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
transpose.c
/*----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <omp.h> /*----------------------------------------------------------------*/ #include "transpose.h" /*----------------------------------------------------------------*/ /* Local transpose (RICO) */ /*----------------------------------------------------------------*/ void hcl_transpose_block_hom ( fftw_complex* X, const int start, const int end, const int n, const unsigned int nt, const int block_size, const unsigned int verbosity) { int i, j; #pragma omp parallel for shared(X) private(i, j) num_threads(nt) for (i = 0; i < end; i++) { for (j = i; j < end; j++) { if (verbosity) { printf( "[%d]: i = %d j = %d\n", omp_get_thread_num(), i, j); } double tmpr = X[i*n + j][0]; double tmpi = X[i*n + j][1]; X[i*n + j][0] = X[j*n + i][0]; X[i*n + j][1] = X[j*n + i][1]; X[j*n + i][0] = tmpr; X[j*n + i][1] = tmpi; } } } void hcl_transpose_homogeneous ( fftw_complex* X, const int start, const int end, const int n, const unsigned int nt, const int block_size, const int *rowd, const unsigned int verbosity) { int k; //#pragma omp parallel for shared(X) private(k) num_threads(nt) for (k = 0; k < n; k += end) { if (verbosity) { printf( "[%d]: k = %d\n", omp_get_thread_num(), k); } hcl_transpose_block_hom (&X[k], start, end, n, nt, block_size, verbosity); } } /*----------------------------------------------------------------*/ void hcl_local_transpose_scalar_block( fftw_complex* X1, fftw_complex* X2, const int i, const int j, const int n, const int block_size, const unsigned int verbosity) { int p, q; for (p = 0; p < min(n-i,block_size); p++) { for (q = 0; q < min(n-j,block_size); q++) { if (verbosity) printf( "%d: i %d, j %d, p %d, q %d, index1 %d index2 %d\n", omp_get_thread_num(), i, j, p, q, i*n+j + p*n+q, j*n+i + q*n+p); double tmpr = X1[p*n+q][0]; double tmpi = X1[p*n+q][1]; X1[p*n+q][0] = X2[q*n+p][0]; X1[p*n+q][1] = X2[q*n+p][1]; X2[q*n+p][0] = tmpr; X2[q*n+p][1] = tmpi; } } } void hcl_transpose_block( fftw_complex* X, const int start, const int end, const int n, const unsigned int nt, const int block_size, const unsigned int verbosity) { int i, j; #pragma omp parallel for shared(X) private(i, j) num_threads(nt) for (i = 0; i < end; i += block_size) { for (j = 0; j < end; j += block_size) { if (verbosity) printf( "%d: i %d, j %d\n", omp_get_thread_num(), i, j); hcl_local_transpose_scalar_block( &X[start + i*n + j], &X[start + j*n + i], i, j, n, block_size, verbosity); } } } /*----------------------------------------------------------------*/
ellipticBlockPartialAxCoeffHex3D_N3.c
extern "C" void FUNC(ellipticBlockPartialAxCoeffHex3D_N3)(const dlong & Nelements, const dlong & offset, const dlong & loffset, const dlong* __restrict__ elementList, const dfloat* __restrict__ ggeo, const dfloat* __restrict__ D, const dfloat* __restrict__ S, const dfloat* __restrict__ lambda, const dfloat* __restrict__ q, dfloat* __restrict__ Aq ) { dfloat s_q[3][p_Nq][p_Nq][p_Nq]; dfloat s_Gqr[3][p_Nq][p_Nq][p_Nq]; dfloat s_Gqs[3][p_Nq][p_Nq][p_Nq]; dfloat s_Gqt[3][p_Nq][p_Nq][p_Nq]; dfloat s_D[p_Nq][p_Nq]; dfloat s_S[p_Nq][p_Nq]; for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { s_D[j][i] = D[j * p_Nq + i]; s_S[j][i] = S[j * p_Nq + i]; } #ifdef __NEKRS__OMP__ #pragma omp parallel for private(s_q, s_Gqr, s_Gqs, s_Gqt) #endif for(dlong e = 0; e < Nelements; ++e) { const dlong element = elementList[e]; for(int k = 0; k < p_Nq; k++) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong base = i + j * p_Nq + k * p_Nq * p_Nq + element * p_Np; s_q[0][k][j][i] = q[base + 0 * offset]; s_q[1][k][j][i] = q[base + 1 * offset]; s_q[2][k][j][i] = q[base + 2 * offset]; } for(int k = 0; k < p_Nq; ++k) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong gbase = element * p_Nggeo * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat r_G00 = ggeo[gbase + p_G00ID * p_Np]; const dfloat r_G01 = ggeo[gbase + p_G01ID * p_Np]; const dfloat r_G11 = ggeo[gbase + p_G11ID * p_Np]; const dfloat r_G12 = ggeo[gbase + p_G12ID * p_Np]; const dfloat r_G02 = ggeo[gbase + p_G02ID * p_Np]; const dfloat r_G22 = ggeo[gbase + p_G22ID * p_Np]; const dlong id = element * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat r_lam00 = lambda[id + 0 * offset + 0 * loffset]; const dfloat r_lam10 = lambda[id + 0 * offset + 1 * loffset]; const dfloat r_lam20 = lambda[id + 0 * offset + 2 * loffset]; dfloat qr0 = 0.f, qr1 = 0.f, qr2 = 0.f; dfloat qs0 = 0.f, qs1 = 0.f, qs2 = 0.f; dfloat qt0 = 0.f, qt1 = 0.f, qt2 = 0.f; for(int m = 0; m < p_Nq; m++) { qr0 += s_S[m][i] * s_q[0][k][j][m]; qs0 += s_S[m][j] * s_q[0][k][m][i]; qt0 += s_S[m][k] * s_q[0][m][j][i]; // qr1 += s_S[m][i] * s_q[1][k][j][m]; qs1 += s_S[m][j] * s_q[1][k][m][i]; qt1 += s_S[m][k] * s_q[1][m][j][i]; qr2 += s_S[m][i] * s_q[2][k][j][m]; qs2 += s_S[m][j] * s_q[2][k][m][i]; qt2 += s_S[m][k] * s_q[2][m][j][i]; } dfloat Gqr0 = r_G00 * qr0 + r_G01 * qs0 + r_G02 * qt0; dfloat Gqs0 = r_G01 * qr0 + r_G11 * qs0 + r_G12 * qt0; dfloat Gqt0 = r_G02 * qr0 + r_G12 * qs0 + r_G22 * qt0; dfloat Gqr1 = r_G00 * qr1 + r_G01 * qs1 + r_G02 * qt1; dfloat Gqs1 = r_G01 * qr1 + r_G11 * qs1 + r_G12 * qt1; dfloat Gqt1 = r_G02 * qr1 + r_G12 * qs1 + r_G22 * qt1; dfloat Gqr2 = r_G00 * qr2 + r_G01 * qs2 + r_G02 * qt2; dfloat Gqs2 = r_G01 * qr2 + r_G11 * qs2 + r_G12 * qt2; dfloat Gqt2 = r_G02 * qr2 + r_G12 * qs2 + r_G22 * qt2; s_Gqr[0][k][j][i] = r_lam00 * Gqr0; s_Gqs[0][k][j][i] = r_lam00 * Gqs0; s_Gqt[0][k][j][i] = r_lam00 * Gqt0; s_Gqr[1][k][j][i] = r_lam10 * Gqr1; s_Gqs[1][k][j][i] = r_lam10 * Gqs1; s_Gqt[1][k][j][i] = r_lam10 * Gqt1; s_Gqr[2][k][j][i] = r_lam20 * Gqr2; s_Gqs[2][k][j][i] = r_lam20 * Gqs2; s_Gqt[2][k][j][i] = r_lam20 * Gqt2; } for(int k = 0; k < p_Nq; k++) for(int j = 0; j < p_Nq; ++j) for(int i = 0; i < p_Nq; ++i) { const dlong gbase = element * p_Nggeo * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat r_GwJ = ggeo[gbase + p_GWJID * p_Np]; const dlong id = element * p_Np + k * p_Nq * p_Nq + j * p_Nq + i; const dfloat r_lam01 = lambda[id + 1 * offset + 0 * loffset]; const dfloat r_lam11 = lambda[id + 1 * offset + 1 * loffset]; const dfloat r_lam21 = lambda[id + 1 * offset + 2 * loffset]; dfloat r_Aq0 = r_GwJ * r_lam01 * s_q[0][k][j][i]; dfloat r_Aq1 = r_GwJ * r_lam11 * s_q[1][k][j][i]; dfloat r_Aq2 = r_GwJ * r_lam21 * s_q[2][k][j][i]; dfloat r_Aqr0 = 0.f, r_Aqs0 = 0.f, r_Aqt0 = 0.f; dfloat r_Aqr1 = 0.f, r_Aqs1 = 0.f, r_Aqt1 = 0.f; dfloat r_Aqr2 = 0.f, r_Aqs2 = 0.f, r_Aqt2 = 0.f; for(int m = 0; m < p_Nq; m++) { r_Aqr0 += s_D[m][i] * s_Gqr[0][k][j][m]; r_Aqr1 += s_D[m][i] * s_Gqr[1][k][j][m]; r_Aqr2 += s_D[m][i] * s_Gqr[2][k][j][m]; } for(int m = 0; m < p_Nq; m++) { r_Aqs0 += s_D[m][j] * s_Gqs[0][k][m][i]; r_Aqs1 += s_D[m][j] * s_Gqs[1][k][m][i]; r_Aqs2 += s_D[m][j] * s_Gqs[2][k][m][i]; } for(int m = 0; m < p_Nq; m++) { r_Aqt0 += s_D[m][k] * s_Gqt[0][m][j][i]; r_Aqt1 += s_D[m][k] * s_Gqt[1][m][j][i]; r_Aqt2 += s_D[m][k] * s_Gqt[2][m][j][i]; } Aq[id + 0 * offset] = r_Aqr0 + r_Aqs0 + r_Aqt0 + r_Aq0; Aq[id + 1 * offset] = r_Aqr1 + r_Aqs1 + r_Aqt1 + r_Aq1; Aq[id + 2 * offset] = r_Aqr2 + r_Aqs2 + r_Aqt2 + r_Aq2; } } }
move_particle_utility_pfem2.h
/* ============================================================================== KratosIncompressibleFluidApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: pbecker $ // Date: $Date: 2011-09-21 12:30:32 $ // Revision: $Revision: 1.0 $ // // #if !defined(KRATOS_MOVE_PARTICLE_UTILITY_PFEM2_INCLUDED) #define KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/node.h" /// #include "includes/dof.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "includes/deprecated_variables.h" #include "containers/array_1d.h" #include "containers/data_value_container.h" #include "includes/mesh.h" #include "utilities/math_utils.h" #include "processes/node_erase_process.h" /// #include "utilities/geometry_utilities.h" #include "includes/model_part.h" #include "spatial_containers/spatial_containers.h" #include "spatial_containers/bounding_box.h" #include "spatial_containers/cell.h" #include "spatial_containers/bins_dynamic_objects.h" #include "utilities/spatial_containers_configure.h" #include "geometries/line_2d_2.h" #include "geometries/triangle_2d_3.h" #include "geometries/triangle_3d_3.h" #include "geometries/point.h" #include "pfem_2_application.h" #include "pfem_particle_fluidonly.h" //#include "utilities/enrich_2d_2dofs.h" #include "utilities/enrichment_utilities.h" #include "utilities/openmp_utils.h" #include "time.h" //#include "processes/process.h" namespace Kratos { //this class is to be modified by the user to customize the interpolation process template< unsigned int TDim> class MoveParticleUtilityPFEM2 { public: typedef SpatialContainersConfigure<TDim> Configure; typedef typename Configure::PointType PointType; //typedef PointType::CoordinatesArrayType CoordinatesArrayType; typedef typename Configure::ContainerType ContainerType; //typedef Configure::PointerType PointerType; typedef typename Configure::IteratorType IteratorType; typedef typename Configure::ResultContainerType ResultContainerType; //typedef Configure::ResultPointerType ResultPointerType; typedef typename Configure::ResultIteratorType ResultIteratorType; typedef PointerVector< PFEM_Particle_Fluid, PFEM_Particle_Fluid*, std::vector<PFEM_Particle_Fluid*> > ParticlePointerVector; //typedef Configure::ContactPairType ContactPairType; //typedef Configure::ContainerContactType ContainerContactType; //typedef Configure::IteratorContactType IteratorContactType; //typedef Configure::PointerContactType PointerContactType; //typedef Configure::PointerTypeIterator PointerTypeIterator; KRATOS_CLASS_POINTER_DEFINITION(MoveParticleUtilityPFEM2); //template<unsigned int TDim> MoveParticleUtilityPFEM2(ModelPart& model_part, int maximum_number_of_particles) : mr_model_part(model_part) , mmaximum_number_of_particles(maximum_number_of_particles) { std::cout << "initializing moveparticle utility" << std::endl; Check(); //tools to move the domain, in case we are using a moving domain approach. mintialized_transfer_tool=false; mcalculation_domain_complete_displacement=ZeroVector(3); mcalculation_domain_added_displacement=ZeroVector(3); //storing water and air density and their inverses, just in case it is needed for the streamline integration ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); mDENSITY_AIR = CurrentProcessInfo[DENSITY_AIR]; mDENSITY_WATER = CurrentProcessInfo[DENSITY_WATER]; //mmaximum_number_of_particles = maximum_number_of_particles; //loop in elements to change their ID to their position in the array. Easier to get information later. //DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!! ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; ielem->SetId(ii+1); } mlast_elem_id= (mr_model_part.ElementsEnd()-1)->Id(); int node_id=0; // we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used) ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator pnode = inodebegin+ii; array_1d<double,3> position_node; double distance=0.0; position_node = pnode->Coordinates(); WeakPointerVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES); //we loop all the nodes to check all the edges const double number_of_neighbours = double(rneigh.size()); for( WeakPointerVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++) { array_1d<double,3> position_difference; position_difference = inode->Coordinates() - position_node; double current_distance= sqrt(pow(position_difference[0],2)+pow(position_difference[1],2)+pow(position_difference[2],2)); //if (current_distance>distance) // distance=current_distance; distance += current_distance / number_of_neighbours; } //and we save the largest edge. pnode->FastGetSolutionStepValue(MEAN_SIZE)=distance; node_id=pnode->GetId(); } } mlast_node_id=node_id; //we also calculate the element mean size in the same way, for the courant number //also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; double elem_size; array_1d<double,3> Edge(3,0.0); Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates(); elem_size = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) elem_size += Edge[d]*Edge[d]; for (unsigned int i = 2; i < (TDim+1); i++) for(unsigned int j = 0; j < i; j++) { Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates(); double Length = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) Length += Edge[d]*Edge[d]; if (Length < elem_size) elem_size = Length; } elem_size = sqrt(elem_size); ielem->SetValue(MEAN_SIZE, elem_size); //and the matrix column for the enrichments in the pressure. if (TDim==3) ielem->SetValue(ENRICH_LHS_ROW_3D, ZeroVector(4)); // { // Vector & lhs_enrich = ielem->GetValue(ENRICH_LHS_ROW_3D); // lhs_enrich.resize(4); // lhs_enrich=ZeroVector(4); // } else ielem->SetValue(ENRICH_LHS_ROW, ZeroVector(3)); //KRATOS_WATCH(mElemSize) } } //matrix containing the position of the 4/15/45 particles that we will seed at the beggining BoundedMatrix<double, 5*(1+TDim), 3 > pos; BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N; int particle_id=0; mnelems = mr_model_part.Elements().size(); std::cout << "about to resize vectors" << std::endl; //setting the right size to the vector containing the particles assigned to each element //particles vector. this vector contains ALL the particles in the simulation. mparticles_vector.resize(mnelems*mmaximum_number_of_particles); //and this vector contains the current number of particles that are in each element (currently zero) mnumber_of_particles_in_elems.resize(mnelems); mnumber_of_particles_in_elems=ZeroVector(mnelems); //when moving the particles, an auxiliary vector is necessary (to store the previous number) mnumber_of_particles_in_elems_aux.resize(mnelems); //each element will have a list of pointers to all the particles that are inside. //this vector contains the pointers to the vector of (particle) pointers of each element. mpointers_to_particle_pointers_vectors.resize(mnelems); //int artz; //std::cin >> artz; int i_int=0; //careful! it's not the id, but the position inside the array! std::cout << "about to create particles" << std::endl; //now we seed: LOOP IN ELEMENTS //using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mmaximum_number_of_particles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; (ielem->GetValue(FLUID_PARTICLE_POINTERS)) = ParticlePointerVector( mmaximum_number_of_particles*2);//, &firstparticle ); ParticlePointerVector& particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); //now we link the mpointers_to_particle_pointers_vectors to the corresponding element mpointers_to_particle_pointers_vectors(ii) = &particle_pointers; //now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half). //for(int j=0; j<(mmaximum_number_of_particles*2); j++) // particle_pointers.push_back(&firstparticle); int & number_of_particles = ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); number_of_particles=0; //int & number_of_water_particles = ielem->GetValue(NUMBER_OF_WATER_PARTICLES); Geometry< Node<3> >& geom = ielem->GetGeometry(); //unsigned int elem_id = ielem->Id(); //mareas_vector[i_int]=CalculateArea(geom); UNUSED SO COMMENTED ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45 //now we seed the particles in the current element for (unsigned int j = 0; j < pos.size1(); j++) { ++particle_id; PFEM_Particle_Fluid& pparticle =mparticles_vector[particle_id-1]; pparticle.X()=pos(j,0); pparticle.Y()=pos(j,1); pparticle.Z()=pos(j,2); pparticle.GetEraseFlag()=false; array_1d<float, 3 > & vel = pparticle.GetVelocity(); float & distance= pparticle.GetDistance(); noalias(vel) = ZeroVector(3); distance=0.0; for (unsigned int k = 0; k < (TDim+1); k++) { noalias(vel) += (N(j, k) * geom[k].FastGetSolutionStepValue(VELOCITY)); distance += N(j, k) * geom[k].FastGetSolutionStepValue(DISTANCE); } if( ii % 100000 == 0) KRATOS_WATCH(particle_id); if (distance<=0.0) { distance=-1.0; } //else if(distance<2.0) //{ // distance=1.0; //} else { distance=1.0; } particle_pointers(j) = &pparticle; number_of_particles++ ; } ++i_int; } bool nonzero_mesh_velocity = false; //seeing if we have to use the mesh_velocity or not for(ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode!=mr_model_part.NodesEnd(); inode++) { const array_1d<double, 3 > velocity = inode->FastGetSolutionStepValue(MESH_VELOCITY); for(unsigned int i = 0; i!=3; i++) { if (fabs(velocity[i])>1.0e-9) nonzero_mesh_velocity=true; } if( nonzero_mesh_velocity==true) break; } if ( nonzero_mesh_velocity==true) muse_mesh_velocity_to_convect = true; // if there is mesh velocity, then we have to take it into account when moving the particles else muse_mesh_velocity_to_convect = false; //otherwise, we can avoid reading the values since we know it is zero everywhere (to save time!) m_nparticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true. KRATOS_WATCH(m_nparticles); //KRATOS_WATCH(mlast_elem_id); mparticle_printing_tool_initialized=false; //std::cin >> artz; } ~MoveParticleUtilityPFEM2() {} void MountBin() { KRATOS_TRY //copy the elements to a new container, as the list will //be shuffled duringthe construction of the tree ContainerType& rElements = mr_model_part.ElementsArray(); IteratorType it_begin = rElements.begin(); IteratorType it_end = rElements.end(); //const int number_of_elem = rElements.size(); typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) ); paux.swap(mpBinsObjectDynamic); //BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end ); std::cout << "finished mounting Bins" << std::endl; KRATOS_CATCH("") } //TOOL TO TRANSFER INFORMATION INITIALLY FROM ONE DOMAIN TO OTHER. void IntializeTransferTool(ModelPart* topographic_model_part, array_1d<double, 3 > initial_domains_offset, bool ovewrite_particle_data) //mtopographic_model_part(topographic_model_part) { KRATOS_TRY mintialized_transfer_tool=true; const unsigned int max_results = 1000; std::cout << "initializing transfer utility" << std::endl; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); mcalculation_domain_complete_displacement=initial_domains_offset; mtopographic_model_part_pointer = topographic_model_part; //copying the pointer. //CONSTRUCTING BIN STRUCTURE ContainerType& rElements_topo = mtopographic_model_part_pointer->ElementsArray(); IteratorType it_begin_topo = rElements_topo.begin(); IteratorType it_end_topo = rElements_topo.end(); typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin_topo, it_end_topo ) ); paux.swap(mpTopographicBinsObjectDynamic); std::cout << "Gathering Information From Topographic Domain for the first time" << std::endl; if(ovewrite_particle_data==false) { std::cout << "Not overwriting particle data (assuming correct initial conditions in calculation domain)" << std::endl; } else { std::cout << "Replacing particle information using the Topographic domain" << std::endl; const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { ResultContainerType results(max_results); ResultIteratorType result_begin = results.begin(); for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { if (results.size()!=max_results) results.resize(max_results); //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*it_begin_topo); //we have no idea in which element it might be from the topographic domain, so we just set it in the first element. //Geometry<Node<3> >& geom = ielem->GetGeometry(); //array_1d<double,TDim+1> N; ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false) { OverwriteParticleDataUsingTopographicDomain(pparticle,pelement,mcalculation_domain_complete_displacement,result_begin, max_results); } } } } } KRATOS_CATCH("") } //TOOL TO TRANSFER INFORMATION FROM ONE DOMAIN TO OTHER when necessary. to be don void PreReseedUsingTopographicDomain(const int minimum_number_of_particles, array_1d<double, 3 > domains_added_displacement) //mtopographic_model_part(topographic_model_part) { KRATOS_TRY if(mintialized_transfer_tool==false) KRATOS_THROW_ERROR(std::logic_error, "TRANSFER TOOL NOT INITIALIZED!", ""); const unsigned int max_results = 1000; std::cout << "executing transfer tool" << std::endl; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); mcalculation_domain_added_displacement = domains_added_displacement; mcalculation_domain_complete_displacement += domains_added_displacement; ContainerType& rElements_topo = mtopographic_model_part_pointer->ElementsArray(); IteratorType it_begin_topo = rElements_topo.begin(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { ResultContainerType results(max_results); ResultIteratorType result_begin = results.begin(); Element::Pointer pelement(*it_begin_topo); //we have no idea in which element it might be from the topographic domain, so we just set it in the first element. BoundedMatrix<double, (TDim+1), 3 > pos; BoundedMatrix<double, (TDim+1) , (TDim+1) > N; unsigned int freeparticle=0; //we start with the first position in the particles array for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { if (results.size()!=max_results) results.resize(max_results); //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); if (number_of_particles_in_elem<(minimum_number_of_particles))// && (ielem->GetGeometry())[0].Y()<0.10 ) { //KRATOS_WATCH("elem with little particles") Geometry< Node<3> >& geom = ielem->GetGeometry(); ComputeGaussPointPositionsForPreReseed(geom, pos, N); //double conductivity = ielem->GetProperties()[CONDUCTIVITY]; //KRATOS_WATCH(conductivity); for (unsigned int j = 0; j < (pos.size1()); j++) //i am dropping the last one, the one in the middle of the element { bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; /* else if (freeparticle<(it_end_particle_model_part-1)) freeparticle++; */ else freeparticle++; //break; } else { //if (freeparticle<(it_end_particle_model_part-1)) freeparticle++; //else //break; //we finished the list and we couldnt find a free space } } PFEM_Particle_Fluid pparticle(pos(j,0),pos(j,1),pos(j,2)); /* PFEM_Particle_Fluid & pparticle = mparticles_vector[freeparticle]; pparticle.X() = pos(j,0); pparticle.Y() = pos(j,1); pparticle.Z() = pos(j,2); */ array_1d<double,TDim+1>aux2_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N); if (is_found==false) { KRATOS_WATCH(aux2_N); } pparticle.GetEraseFlag()=false; OverwriteParticleDataUsingTopographicDomain(pparticle,pelement,mcalculation_domain_complete_displacement,result_begin, max_results); //and we copy it to the array: mparticles_vector[freeparticle] = pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; number_of_particles_in_elem++; //KRATOS_WATCH(number_of_particles_in_elem); //KRATOS_WATCH(mparticles_vector[freeparticle]) //KRATOS_WATCH(geom) } } } } KRATOS_CATCH("") } void CalculateVelOverElemSize() { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const double nodal_weight = 1.0/ (1.0 + double (TDim) ); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); if (muse_mesh_velocity_to_convect==false) { #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Geometry<Node<3> >& geom = ielem->GetGeometry(); array_1d<double, 3 >vector_mean_velocity=ZeroVector(3); for (unsigned int i=0; i != (TDim+1) ; i++) vector_mean_velocity += geom[i].FastGetSolutionStepValue(VELOCITY); vector_mean_velocity *= nodal_weight; const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) ); ielem->SetValue(VELOCITY_OVER_ELEM_SIZE, mean_velocity / ( ielem->GetValue(MEAN_SIZE) ) ); } } } else { #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Geometry<Node<3> >& geom = ielem->GetGeometry(); array_1d<double, 3 >vector_mean_velocity=ZeroVector(3); for (unsigned int i=0; i != (TDim+1) ; i++) vector_mean_velocity += geom[i].FastGetSolutionStepValue(VELOCITY)-geom[i].FastGetSolutionStepValue(MESH_VELOCITY); vector_mean_velocity *= nodal_weight; const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) ); ielem->SetValue(VELOCITY_OVER_ELEM_SIZE, mean_velocity / ( ielem->GetValue(MEAN_SIZE) ) ); } } } KRATOS_CATCH("") } //name self explained void ResetBoundaryConditions(bool fully_reset_nodes) { KRATOS_TRY if (fully_reset_nodes) { ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if (inode->IsFixed(VELOCITY_X)) { inode->FastGetSolutionStepValue(VELOCITY_X)=inode->GetSolutionStepValue(VELOCITY_X,1); } if (inode->IsFixed(VELOCITY_Y)) { inode->FastGetSolutionStepValue(VELOCITY_Y)=inode->GetSolutionStepValue(VELOCITY_Y,1); } if (TDim==3) if (inode->IsFixed(VELOCITY_Z)) { inode->FastGetSolutionStepValue(VELOCITY_Z)=inode->GetSolutionStepValue(VELOCITY_Z,1); } if (inode->IsFixed(PRESSURE)) inode->FastGetSolutionStepValue(PRESSURE)=inode->GetSolutionStepValue(PRESSURE,1); inode->GetSolutionStepValue(PRESSURE,1)=inode->FastGetSolutionStepValue(PRESSURE); } } } else //for fractional step only! { ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; const array_1d<double, 3 > original_velocity = inode->FastGetSolutionStepValue(VELOCITY); if (inode->IsFixed(VELOCITY_X) || inode->IsFixed(VELOCITY_Y) || inode->IsFixed(VELOCITY_Z) ) { const array_1d<double, 3 > & normal = inode->FastGetSolutionStepValue(NORMAL); const double normal_scalar_sq = normal[0]*normal[0]+normal[1]*normal[1]+normal[2]*normal[2]; const array_1d<double, 3 > normal_adimensionalized = normal / sqrt(normal_scalar_sq); array_1d<double, 3 > & velocity = inode->FastGetSolutionStepValue(VELOCITY); array_1d<double, 3 > normal_velocity; for (unsigned int j=0; j!=3; j++) normal_velocity[j] = fabs(normal_adimensionalized[j])*original_velocity[j]; if (inode->IsFixed(VELOCITY_X)) { velocity[0] = original_velocity[0] - normal_velocity[0]; } if (inode->IsFixed(VELOCITY_Y)) { velocity[1] = original_velocity[1] - normal_velocity[1]; } if (TDim==3) if (inode->IsFixed(VELOCITY_Z)) { velocity[2] = original_velocity[2] - normal_velocity[2]; } } if (inode->IsFixed(PRESSURE)) inode->FastGetSolutionStepValue(PRESSURE)=inode->GetSolutionStepValue(PRESSURE,1); } } } KRATOS_CATCH("") } //setting the normal component of the velocity to zero void ResetBoundaryConditionsSlip() { KRATOS_TRY { ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if(inode->FastGetSolutionStepValue(IS_STRUCTURE)!=0.0) { array_1d<double, 3 >& velocity = inode->FastGetSolutionStepValue(VELOCITY); const array_1d<double, 3 > & normal = inode->FastGetSolutionStepValue(NORMAL); const double normal_scalar_sq = normal[0]*normal[0]+normal[1]*normal[1]+normal[2]*normal[2]; const array_1d<double, 3 > normal_adimensionalized = normal / sqrt(normal_scalar_sq); //calculating the normal component of the velocity array_1d<double, 3 > normal_velocity; for (unsigned int j=0; j!=3; j++) normal_velocity[j] = normal_adimensionalized[j]*velocity[j]; const double dot_prod = normal_velocity[0]*velocity[0] + normal_velocity[1]*velocity[1] + normal_velocity[2]*velocity[2]; //if the dot product of velocity * normal velocity is lower than zero, then they have opposite signs and we must invert the direction: if (dot_prod<0.0) normal_velocity*= -1.0; velocity -= normal_velocity; //substracting the normal component } else if (inode->IsFixed(VELOCITY_X) && inode->IsFixed(VELOCITY_Y) ) { inode->FastGetSolutionStepValue(VELOCITY) = inode->GetSolutionStepValue(VELOCITY,1); } } } } KRATOS_CATCH("") } void CalculateDeltaVelocity() { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DELTA_VELOCITY) = inode->FastGetSolutionStepValue(VELOCITY) - inode->FastGetSolutionStepValue(PROJECTED_VELOCITY) ; } } KRATOS_CATCH("") } void CopyVectorVarToPreviousTimeStep(const Variable< array_1d<double, 3 > >& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; noalias(inode->GetSolutionStepValue(OriginVariable,1)) = inode->FastGetSolutionStepValue(OriginVariable); } } KRATOS_CATCH("") } void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable); } } KRATOS_CATCH("") } //to move all the particles across the streamlines. heavy task! void MoveParticles(const bool discriminate_streamlines) //,const bool pressure_gradient_integrate) { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part) //since it is the only function in the whole procedure that does this, it must use alternatively one part and the other. //KRATOS_WATCH(offset) bool even_timestep; if (offset!=0) even_timestep=false; else even_timestep=true; const int post_offset = mmaximum_number_of_particles*int(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles //KRATOS_WATCH(post_offset) double delta_t = CurrentProcessInfo[DELTA_TIME]; const array_1d<double,3> gravity= CurrentProcessInfo[GRAVITY]; array_1d<double,TDim+1> N; const unsigned int max_results = 10000; //double integration_distance= 2.0; max_nsubsteps = 10; max_substep_dt=delta_t/double(max_nsubsteps); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii; int & number_of_particles = old_element->GetValue(NUMBER_OF_FLUID_PARTICLES); mnumber_of_particles_in_elems_aux(ii)=number_of_particles; mnumber_of_particles_in_elems(ii)=0; //we reset the local vectors for a faster access; } } bool nonzero_mesh_velocity = false; //seeing if we have to use the mesh_velocity or not for(ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode!=mr_model_part.NodesEnd(); inode++) { const array_1d<double, 3 > velocity = inode->FastGetSolutionStepValue(MESH_VELOCITY); for(unsigned int i = 0; i!=3; i++) { if (fabs(velocity[i])>1.0e-9) nonzero_mesh_velocity=true; } if( nonzero_mesh_velocity==true) break; } if ( nonzero_mesh_velocity==true) muse_mesh_velocity_to_convect = true; // if there is mesh velocity, then we have to take it into account when moving the particles else muse_mesh_velocity_to_convect = false; //otherwise, we can avoid reading the values since we know it is zero everywhere (to save time!) std::cout << "convecting particles" << std::endl; //We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle) const bool local_use_mesh_velocity_to_convect = muse_mesh_velocity_to_convect; #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { const array_1d<double,3> mesh_displacement = mcalculation_domain_added_displacement; //if it is a standard problem, displacements are zero and therefore nothing is added. ResultContainerType results(max_results); WeakPointerVector< Element > elements_in_trajectory; elements_in_trajectory.resize(20); for(unsigned int ielem=element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++) { //for(unsigned int ielem=0; ielem<mr_model_part.Elements().size(); ielem++) //{ ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem; const int old_element_id = old_element->Id(); ParticlePointerVector& old_element_particle_pointers = *mpointers_to_particle_pointers_vectors(old_element_id-1); if ( (results.size()) !=max_results) results.resize(max_results); unsigned int number_of_elements_in_trajectory=0; //excluding the origin one (current one, ielem) for(int ii=0; ii<(mnumber_of_particles_in_elems_aux(ielem)); ii++) { PFEM_Particle_Fluid & pparticle = old_element_particle_pointers[offset+ii]; Element::Pointer pcurrent_element( *old_element.base() ); ResultIteratorType result_begin = results.begin(); bool & erase_flag=pparticle.GetEraseFlag(); if (erase_flag==false){ MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results, mesh_displacement, discriminate_streamlines, local_use_mesh_velocity_to_convect); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina const int current_element_id = pcurrent_element->Id(); int & number_of_particles_in_current_elem = mnumber_of_particles_in_elems(current_element_id-1); //int & number_of_water_particles_in_current_elem = mnumber_of_water_particles_in_elems(current_element_id-1); if (number_of_particles_in_current_elem<mmaximum_number_of_particles && erase_flag==false) { { ParticlePointerVector& current_element_particle_pointers = *mpointers_to_particle_pointers_vectors(current_element_id-1); #pragma omp critical { if (number_of_particles_in_current_elem<mmaximum_number_of_particles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!! { current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle; number_of_particles_in_current_elem++ ; if (number_of_particles_in_current_elem>mmaximum_number_of_particles) KRATOS_WATCH("MAL"); } else pparticle.GetEraseFlag()=true; //so we just delete it! } } } else pparticle.GetEraseFlag()=true; //so we just delete it! } } } } //now we pass info from the local vector to the elements: #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii; old_element->GetValue(NUMBER_OF_FLUID_PARTICLES) = mnumber_of_particles_in_elems(ii); //old_element->GetValue(NUMBER_OF_WATER_PARTICLES) = mnumber_of_water_particles_in_elems(ii); } } //after having changed everything we change the status of the modd_timestep flag: CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET] = post_offset;; // KRATOS_CATCH("") } void TransferLagrangianToEulerian() //explicit { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); //const double delta_t =CurrentProcessInfo[DELTA_TIME]; const double threshold= 0.0/(double(TDim)+1.0); std::cout << "projecting info to mesh" << std::endl; const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles //we must project data from the particles (lagrangian) into the eulerian mesh //ValuesVectorType eulerian_nodes_old_temperature; //int nnodes = mr_model_part.Nodes().size(); //array_1d<double,(n_nodes)> eulerian_nodes_sumweights; //we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes //though we could've use a bigger buffer, to be changed later! //after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles. ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DISTANCE)=0.0; inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=ZeroVector(3); inode->FastGetSolutionStepValue(YP)=0.0; } } //adding contribution, loop on elements, since each element has stored the particles found inside of it vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,3*(TDim+1)> nodes_addedvel = ZeroVector(3*(TDim+1)); array_1d<double,(TDim+1)> nodes_added_distance = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1)); //array_1d<double,(TDim+1)> weighting_inverse_divisor; Geometry<Node<3> >& geom = ielem->GetGeometry(); for (int i=0 ; i!=(TDim+1) ; ++i) { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); //weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01); } ///KRATOS_WATCH(ielem->Id()) ///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size()); int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; if (pparticle.GetEraseFlag()==false) { array_1d<double,3> & position = pparticle.Coordinates(); const array_1d<float,3>& velocity = pparticle.GetVelocity(); const float& particle_distance = pparticle.GetDistance(); // -1 if water, +1 if air array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_WATCH(N); for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 && N[j]> -1e-5) N[j]=1e-10; } for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element { //double sq_dist = 0; //these lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions //for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k])); //double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) ); double weight=N(j); //weight=N(j)*N(j)*N(j); if (weight<threshold) weight=1e-10; if (weight<0.0) {KRATOS_WATCH(weight)}//;weight=0.0;KRATOS_WATCH(velocity);KRATOS_WATCH(N);KRATOS_WATCH(number_of_particles_in_elem);}//{KRATOS_WATCH(weight); KRATOS_WATCH(geom[j].Id()); KRATOS_WATCH(position);} else { nodes_addedweights[j]+= weight; //nodes_addedtemp[j] += weight * particle_temp; nodes_added_distance[j] += weight*particle_distance; //nodes_added_oxygen[j] += weight*particle_oxygen; for (int k=0 ; k!=(TDim); k++) //x,y,(z) { nodes_addedvel[j*3+k] += weight * double(velocity[k]); } }// } } } for (int i=0 ; i!=(TDim+1) ; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(DISTANCE) +=nodes_added_distance[i]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_X) +=nodes_addedvel[3*i+0]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_Y) +=nodes_addedvel[3*i+1]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_Z) +=nodes_addedvel[3*i+2]; //we are updating info to the previous time step!! geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i]; geom[i].UnSetLock(); } } } #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double sum_weights = inode->FastGetSolutionStepValue(YP); if (sum_weights>0.00001) { //inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT)=(inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT))/sum_weights; //resetting the temperature double & dist = inode->FastGetSolutionStepValue(DISTANCE); dist /=sum_weights; //resetting the density inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=(inode->FastGetSolutionStepValue(PROJECTED_VELOCITY))/sum_weights; //resetting the velocity } else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { inode->FastGetSolutionStepValue(DISTANCE)=3.0; //resetting the temperature //inode->FastGetSolutionStepValue(DISTANCE)=inode->GetSolutionStepValue(DISTANCE,1); //resetting the temperature inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=inode->GetSolutionStepValue(VELOCITY,1); } ///finally, if there was an inlet that had a fixed position for the distance function, that has to remain unchanged: if (inode->IsFixed(DISTANCE)) inode->FastGetSolutionStepValue(DISTANCE)=inode->GetSolutionStepValue(DISTANCE,1); } } KRATOS_CATCH("") } void TransferLagrangianToEulerianImp() //semi implicit { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); std::cout << "projecting info to mesh (semi implicit)" << std::endl; const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles //we must project data from the particles (lagrangian) into the eulerian mesh //ValuesVectorType eulerian_nodes_old_temperature; //int nnodes = mr_model_part.Nodes().size(); //array_1d<double,(n_nodes)> eulerian_nodes_sumweights; //we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes //though we could've use a bigger buffer, to be changed later! //after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles. ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DISTANCE)=0.0; inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=ZeroVector(3); inode->FastGetSolutionStepValue(YP)=0.0; } } //adding contribution, loop on elements, since each element has stored the particles found inside of it vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { //creating a matrix for each of the problems. BoundedMatrix<double, TDim+1 , TDim+1 > mass_matrix; // WE ONLY NEED ONE! they are the same for all the variables! //_x,mass_matrix_y,mass_matrix_z,mass_matrix_d; //mass matrices for the projected vel (x,y,z) and the distance array_1d<double,(TDim+1)> rhs_x,rhs_y,rhs_z,rhs_d; array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,3*(TDim+1)> nodes_addedvel = ZeroVector(3*(TDim+1)); array_1d<double,(TDim+1)> nodes_added_distance = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1)); for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; nodes_addedvel = ZeroVector(3*(TDim+1)); //resetting vectors nodes_added_distance = ZeroVector((TDim+1)); //resetting vectors nodes_addedweights = ZeroVector((TDim+1)); //resetting vectors mass_matrix = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices. WE ONLY NEED ONE! they are the same for all the variable. only the rhs changes. //mass_matrix_y = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices //mass_matrix_z = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices //mass_matrix_d = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices rhs_x = ZeroVector((TDim+1)); //resetting vectors rhs_y = ZeroVector((TDim+1)); //resetting vectors rhs_z = ZeroVector((TDim+1)); //resetting vectors rhs_d = ZeroVector((TDim+1)); //resetting vectors Geometry<Node<3> >& geom = ielem->GetGeometry(); const double elem_volume = geom.Area(); for (int i=0 ; i!=(TDim+1) ; ++i) //saving the nodal positions for faster access { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); } ///KRATOS_WATCH(ielem->Id()) ///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size()); int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; if (pparticle.GetEraseFlag()==false) { array_1d<double,3> & position = pparticle.Coordinates(); const array_1d<float,3>& velocity = pparticle.GetVelocity(); const float& particle_distance = pparticle.GetDistance(); // -1 if water, +1 if air array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_WATCH(N); for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 && N[j]> -1e-5) N[j]=1e-10; } for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element { double weight=N(j); for (int k=0 ; k!=(TDim+1); k++) //building the mass matrix mass_matrix(j,k) += weight*N(k); rhs_x[j] += weight * double(velocity[0]); rhs_y[j] += weight * double(velocity[1]); rhs_z[j] += weight * double(velocity[2]); rhs_d[j] += weight * double(particle_distance); //adding also a part with the lumped mass matrix to reduce overshoots and undershoots if(true) { double this_particle_weight = weight*elem_volume/(double(number_of_particles_in_elem))*0.1; //can be increased or reduced to change the lumped mass contrubtion nodes_addedweights[j]+= this_particle_weight; nodes_added_distance[j] += this_particle_weight*particle_distance; for (int k=0 ; k!=(TDim); k++) //x,y,(z) { nodes_addedvel[j*3+k] += this_particle_weight * double(velocity[k]); } } } } } //now we invert the matrix BoundedMatrix<double, TDim+1 , TDim+1 > inverse_mass_matrix=ZeroMatrix(TDim+1 , TDim+1); if(TDim==3) InvertMatrix( mass_matrix, inverse_mass_matrix); else InvertMatrix3x3( mass_matrix, inverse_mass_matrix); //and now compute the elemental contribution to the gobal system: if(number_of_particles_in_elem>(TDim*3)) //otherwise it's impossible to define a correctly the gradients, therefore the results inside the element are useless. { for (int i=0 ; i!=(TDim+1); i++) { for (int j=0 ; j!=(TDim+1); j++) { nodes_addedvel[3*i+0] += inverse_mass_matrix(i,j)*rhs_x[j]*elem_volume*(1.0/(double(1+TDim))); nodes_addedvel[3*i+1] += inverse_mass_matrix(i,j)*rhs_y[j]*elem_volume*(1.0/(double(1+TDim))); nodes_addedvel[3*i+2] += inverse_mass_matrix(i,j)*rhs_z[j]*elem_volume*(1.0/(double(1+TDim))); nodes_added_distance[i] += inverse_mass_matrix(i,j)*rhs_d[j]*elem_volume*(1.0/(double(1+TDim))); } } //and also to the mass matrix. LUMPED (but for the contribution of the grandient at elemental level. for (int i=0 ; i!=(TDim+1); i++) nodes_addedweights[i] += elem_volume*(1.0/(double(1+TDim))); } for (int i=0 ; i!=(TDim+1) ; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(DISTANCE) +=nodes_added_distance[i]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_X) +=nodes_addedvel[3*i+0]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_Y) +=nodes_addedvel[3*i+1]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_Z) +=nodes_addedvel[3*i+2]; //we are updating info to the previous time step!! geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i]; geom[i].UnSetLock(); } } } #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double sum_weights = inode->FastGetSolutionStepValue(YP); if (sum_weights>0.00001) { //inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT)=(inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT))/sum_weights; //resetting the temperature double & dist = inode->FastGetSolutionStepValue(DISTANCE); dist /=sum_weights; //resetting the density inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=(inode->FastGetSolutionStepValue(PROJECTED_VELOCITY))/sum_weights; //resetting the velocity } else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { inode->FastGetSolutionStepValue(DISTANCE)=3.0; //resetting the temperature //inode->FastGetSolutionStepValue(DISTANCE)=inode->GetSolutionStepValue(DISTANCE,1); //resetting the temperature inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=inode->GetSolutionStepValue(VELOCITY,1); } ///finally, if there was an inlet that had a fixed position for the distance function, that has to remain unchanged: if (inode->IsFixed(DISTANCE)) inode->FastGetSolutionStepValue(DISTANCE)=inode->GetSolutionStepValue(DISTANCE,1); } } KRATOS_CATCH("") } void AccelerateParticlesWithoutMovingUsingDeltaVelocity() { KRATOS_TRY //std::cout << "updating particles" << std::endl; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles //KRATOS_WATCH(offset) ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*ielem.base()); Geometry<Node<3> >& geom = ielem->GetGeometry(); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false) { AccelerateParticleUsingDeltaVelocity(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper } } } } KRATOS_CATCH("") } //************************************************************************************************************** //************************************************************************************************************** template< class TDataType > void AddUniqueWeakPointer (WeakPointerVector< TDataType >& v, const typename TDataType::WeakPointer candidate) { typename WeakPointerVector< TDataType >::iterator i = v.begin(); typename WeakPointerVector< TDataType >::iterator endit = v.end(); while ( i != endit && (i)->Id() != (candidate.lock())->Id()) { i++; } if( i == endit ) { v.push_back(candidate); } } //************************************************************************************************************** //************************************************************************************************************** void PreReseed(int minimum_number_of_particles) { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; const int max_results = 1000; //tools for the paralelization unsigned int number_of_threads = OpenMPUtils::GetNumThreads(); vector<unsigned int> elem_partition; int number_of_rows=mr_model_part.Elements().size(); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; //KRATOS_WATCH(elem_partition_size); for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; const bool local_use_mesh_velocity_to_convect = muse_mesh_velocity_to_convect; #pragma omp parallel firstprivate(elem_partition) { ResultContainerType results(max_results); int k = OpenMPUtils::ThisThread(); ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k]; ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ; //ModelPart::NodesContainerType local_list=aux[k]; //PointerVectorSet<PFEM_Particle_Fluid, IndexedObject> & list=aux[k]; //KRATOS_WATCH(k); BoundedMatrix<double, (TDim+1), 3 > pos; BoundedMatrix<double, (TDim+1) , (TDim+1) > N; unsigned int freeparticle=0; //we start with the first position in the particles array //int local_id=1; for (ModelPart::ElementsContainerType::iterator ielem = it_begin; ielem != it_end; ielem++) { results.resize(max_results); //const int & elem_id = ielem->Id(); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); if (number_of_particles_in_elem<(minimum_number_of_particles))// && (ielem->GetGeometry())[0].Y()<0.10 ) { //KRATOS_WATCH("elem with little particles") Geometry< Node<3> >& geom = ielem->GetGeometry(); ComputeGaussPointPositionsForPreReseed(geom, pos, N); //double conductivity = ielem->GetProperties()[CONDUCTIVITY]; //KRATOS_WATCH(conductivity); for (unsigned int j = 0; j < (pos.size1()); j++) //i am dropping the last one, the one in the middle of the element { bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else { freeparticle++; } } PFEM_Particle_Fluid pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<double,TDim+1>aux2_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N); if (is_found==false) { KRATOS_WATCH(aux2_N); } pparticle.GetEraseFlag()=false; ResultIteratorType result_begin = results.begin(); Element::Pointer pelement( *ielem.base() ); MoveParticle_inverse_way(pparticle, pelement, result_begin, max_results, local_use_mesh_velocity_to_convect); //and we copy it to the array: mparticles_vector[freeparticle] = pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; pparticle.GetEraseFlag()=false; number_of_particles_in_elem++; } } } } KRATOS_CATCH("") } //************************************************************************************************************** //************************************************************************************************************** void PostReseed(int minimum_number_of_particles, double mass_correction_factor ) //pooyan's way { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; if (mass_correction_factor>0.5) mass_correction_factor=0.5; if (mass_correction_factor<-0.5) mass_correction_factor=-0.5; //mass_correction_factor=0.0; //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); //const double delta_t = CurrentProcessInfo[DELTA_TIME]; //array_1d<double,3> & gravity= CurrentProcessInfo[GRAVITY]; //const int max_results = 1000; const double threshold = mass_correction_factor*0.5; //TOOLS FOR THE PARALELIZATION //int last_id= (mr_linea_model_part.NodesEnd()-1)->Id(); unsigned int number_of_threads = OpenMPUtils::GetNumThreads(); //KRATOS_WATCH(number_of_threads); vector<unsigned int> elem_partition; int number_of_rows=mr_model_part.Elements().size(); //KRATOS_WATCH(number_of_threads); //KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", ""); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; //KRATOS_WATCH(elem_partition_size); for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; //typedef Node < 3 > PointType; //std::vector<ModelPart::NodesContainerType> aux;// aux; //aux.resize(number_of_threads); //ModelPart::NodesContainerType::iterator it_begin_particle_model_part = mr_linea_model_part.NodesBegin(); //ModelPart::NodesContainerType::iterator it_end_particle_model_part = mr_linea_model_part.NodesEnd(); #pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids { unsigned int reused_particles=0; unsigned int freeparticle = 0; //we start by the first position; int k = OpenMPUtils::ThisThread(); ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k]; ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ; BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D) BoundedMatrix<double, (3+2*TDim), (TDim+1) > N; array_1d<double, 3 > vel_complete, vel_without_air_nodes; double sum_Ns_without_air_nodes; double mesh_distance; array_1d<double, (3+2*TDim) > distances; array_1d<int, (3+2*TDim) > positions; array_1d<bool, (3+2*TDim) > is_water_particle; //for both unsigned int number_of_reseeded_particles; //unsigned int number_of_water_reseeded_particles; //array_1d<double, 3 > nodes_distances; //int local_id=1; for (ModelPart::ElementsContainerType::iterator ielem = it_begin; ielem != it_end; ielem++) { //results.resize(max_results); int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); Geometry< Node<3> >& geom = ielem->GetGeometry(); if ( (number_of_particles_in_elem<(minimum_number_of_particles)))// && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(minimum_number_of_particles) ) ) { //bool reseed_more=false; number_of_reseeded_particles=0; //reseed_more=true; number_of_reseeded_particles= 3+2*TDim; ComputeGaussPointPositionsForPostReseed(geom, pos, N); distances = ZeroVector(3+2*TDim); bool has_water_node=false; bool has_air_node=false; double mean_element_distance = 0.0; for (unsigned int j = 0; j < (TDim+1); j++) { mean_element_distance += (1.0/double(TDim+1))*(geom[j].FastGetSolutionStepValue(DISTANCE)); if ((geom[j].FastGetSolutionStepValue(DISTANCE))<0.0) has_water_node=true; else has_air_node=true; } //first we check the particle distance according to the nodal values for (unsigned int j = 0; j < number_of_reseeded_particles; j++) //first we order particles { positions[j]=j+1; //just creating a vector from 1 to 7 or whathever our lenght is (7 for 2d, 9 for 3d) for (unsigned int l = 0; l < (TDim+1); l++) { distances[j] += N(j, l) * geom[l].FastGetSolutionStepValue(DISTANCE); } } if ( (has_air_node && has_water_node) ) //for slit elements we use the distance function { for (unsigned int j = 0; j < number_of_reseeded_particles ; j++) //first we order particles { if (distances[j]>threshold) is_water_particle[j]=false; else is_water_particle[j]=true; } } else if (has_air_node) { double water_fraction = 0.5 - 0.5*(mean_element_distance); if (water_fraction>0.9 && mass_correction_factor<0.0) //to avoid seeding air particles when we are in a pure water element mass_correction_factor = 0.0; unsigned int number_of_water_reseeded_particles = double(number_of_reseeded_particles)*(1.01+mass_correction_factor*1.0)*water_fraction; BubbleSort(distances, positions, number_of_reseeded_particles); //ok. now we have the particles ordered from the "watermost" to "airmost". therefore we will fill the water particles and later the air ones using that order for (unsigned int j = 0; j < number_of_reseeded_particles ; j++) //first we order particles { int array_position = positions[j]-1; if (array_position>3 && number_of_reseeded_particles==4) { KRATOS_WATCH("error in reseeding") } if ( (j+1) <= number_of_water_reseeded_particles ) //means it is a water particle is_water_particle[array_position]=true; else is_water_particle[array_position]=false; } } else //only water particles { for (unsigned int j = 0; j < number_of_reseeded_particles ; j++) //first we order particles is_water_particle[j]=true; } bool fix_distance = false; unsigned int node_with_fixed_distance = 0; for (unsigned int j = 0; j < (TDim+1) ; j++) //we go over the 3/4 nodes: { if ((geom[j].IsFixed(DISTANCE))) { fix_distance = true; node_with_fixed_distance = j; } } // so now if the 3 were fixed, we assign the sign of the first node to all the particles: if (fix_distance) { bool is_water_for_all_particles=true; if ((geom[node_with_fixed_distance].FastGetSolutionStepValue(DISTANCE))>0.0) is_water_for_all_particles=false; for (unsigned int j = 0; j < number_of_reseeded_particles ; j++) //first we order particles is_water_particle[j]=is_water_for_all_particles; } for (unsigned int j = 0; j < number_of_reseeded_particles; j++) { //now we have to find an empty space ( a particle that was about to be deleted) in the particles model part. once found. there will be our renewed particle: bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else { freeparticle++; } } PFEM_Particle_Fluid pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<float, 3 > & vel = pparticle.GetVelocity(); float& distance= pparticle.GetDistance(); array_1d<double,TDim+1>aux_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N); if (is_found==false) { KRATOS_WATCH(aux_N); KRATOS_WATCH(j) KRATOS_WATCH(ielem->Id()) } noalias(vel_complete)=ZeroVector(3); noalias(vel_without_air_nodes)=ZeroVector(3); sum_Ns_without_air_nodes=0.0; noalias(vel) = ZeroVector(3); distance=0.0; mesh_distance = 0.0; //oxygen = 0.0; for (unsigned int l = 0; l < (TDim+1); l++) { noalias(vel_complete) += N(j, l) * geom[l].FastGetSolutionStepValue(VELOCITY); mesh_distance += N(j,l) * geom[l].FastGetSolutionStepValue(DISTANCE); if ((geom[l].FastGetSolutionStepValue(DISTANCE))<0.0) { sum_Ns_without_air_nodes+=N(j, l); noalias(vel_without_air_nodes) += N(j, l) * geom[l].FastGetSolutionStepValue(VELOCITY); } } ///COMMENT TO GET A CONTINOUS DISTANCE FUNCTION FIELD if (is_water_particle[j]) { distance=-1.0; } else { //if (mesh_distance<2.0) distance=1.0; //else // distance=3.0; } if (distance<0.0 && sum_Ns_without_air_nodes>0.01) vel = vel_without_air_nodes / sum_Ns_without_air_nodes ; else vel = vel_complete; pparticle.GetEraseFlag()=false; mparticles_vector[freeparticle]=pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; number_of_particles_in_elem++; if (keep_looking) { KRATOS_THROW_ERROR(std::logic_error, "FINISHED THE LIST AND COULDNT FIND A FREE CELL FOR THE NEW PARTICLE!", ""); } else { reused_particles++; } } } } } KRATOS_CATCH("") } void ExecuteParticlesPritingTool( ModelPart& lagrangian_model_part, int input_filter_factor ) { KRATOS_TRY //mfilter_factor; //we will only print one out of every "filter_factor" particles of the total particle list if(mparticle_printing_tool_initialized==false) { mfilter_factor=input_filter_factor; if(lagrangian_model_part.NodesBegin()-lagrangian_model_part.NodesEnd()>0) KRATOS_THROW_ERROR(std::logic_error, "AN EMPTY MODEL PART IS REQUIRED FOR THE PRINTING OF PARTICLES", ""); lagrangian_model_part.AddNodalSolutionStepVariable(VELOCITY); lagrangian_model_part.AddNodalSolutionStepVariable(DISPLACEMENT); lagrangian_model_part.AddNodalSolutionStepVariable(DISTANCE); for (unsigned int i=0; i!=((mmaximum_number_of_particles*mnelems)/mfilter_factor)+mfilter_factor; i++) { Node < 3 > ::Pointer pnode = lagrangian_model_part.CreateNewNode( i+mlast_node_id+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!! //pnode->SetBufferSize(mr_model_part.NodesBegin()->GetBufferSize()); pnode->SetBufferSize(1); } mparticle_printing_tool_initialized=true; } //resetting data of the unused particles const double inactive_particle_position= -10.0; array_1d<double,3>inactive_particle_position_vector; inactive_particle_position_vector(0)=inactive_particle_position; inactive_particle_position_vector(1)=inactive_particle_position; inactive_particle_position_vector(2)=inactive_particle_position; ModelPart::NodesContainerType::iterator inodebegin = lagrangian_model_part.NodesBegin(); for(unsigned int ii=0; ii<lagrangian_model_part.Nodes().size(); ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DISTANCE) = 0.0; inode->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3); inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector; } int counter=0; //ModelPart::NodesContainerType::iterator it_begin = lagrangian_model_part.NodesBegin(); for (int i=0; i!=mmaximum_number_of_particles*mnelems; i++) { PFEM_Particle_Fluid& pparticle =mparticles_vector[i]; if(pparticle.GetEraseFlag()==false && i%mfilter_factor==0) { ModelPart::NodesContainerType::iterator inode = inodebegin+counter; //copying info from the particle to the (printing) node. inode->FastGetSolutionStepValue(DISTANCE) = pparticle.GetDistance(); inode->FastGetSolutionStepValue(VELOCITY) = pparticle.GetVelocity(); inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates(); counter++; } } KRATOS_CATCH("") } void ExecuteParticlesPritingToolForDroppletsOnly( ModelPart& lagrangian_model_part, int input_filter_factor ) { KRATOS_TRY //mfilter_factor; //we will only print one out of every "filter_factor" particles of the total particle list const int first_particle_id=1000000; if(mparticle_printing_tool_initialized==false) { mfilter_factor=input_filter_factor; if(lagrangian_model_part.NodesBegin()-lagrangian_model_part.NodesEnd()>0) KRATOS_THROW_ERROR(std::logic_error, "AN EMPTY MODEL PART IS REQUIRED FOR THE PRINTING OF PARTICLES", ""); lagrangian_model_part.AddNodalSolutionStepVariable(VELOCITY); lagrangian_model_part.AddNodalSolutionStepVariable(DISPLACEMENT); lagrangian_model_part.AddNodalSolutionStepVariable(DISTANCE); for (unsigned int i=0; i!=((mmaximum_number_of_particles*mnelems)/mfilter_factor)+mfilter_factor; i++) { Node < 3 > ::Pointer pnode = lagrangian_model_part.CreateNewNode( i+first_particle_id+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!! //pnode->SetBufferSize(mr_model_part.NodesBegin()->GetBufferSize()); pnode->SetBufferSize(1); } mparticle_printing_tool_initialized=true; } //resetting data of the unused particles const double inactive_particle_position= -10.0; array_1d<double,3>inactive_particle_position_vector; inactive_particle_position_vector(0)=inactive_particle_position; inactive_particle_position_vector(1)=inactive_particle_position; inactive_particle_position_vector(2)=inactive_particle_position; ModelPart::NodesContainerType::iterator inodebegin = lagrangian_model_part.NodesBegin(); for(unsigned int ii=0; ii<lagrangian_model_part.Nodes().size(); ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DISTANCE) = 0.0; inode->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3); inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector; } const int max_number_of_printed_particles=lagrangian_model_part.Nodes().size(); ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles //KRATOS_WATCH(offset) ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); int counter=0; for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*ielem.base()); Geometry<Node<3> >& geom = ielem->GetGeometry(); //double mean_elem_dist=0.0; bool pure_air_elem=true; for(unsigned int j=0; j<(TDim+1); j++) { if (geom[j].FastGetSolutionStepValue(DISTANCE)<0.0) pure_air_elem=false; //mean_elem_dist += geom[j].FastGetSolutionStepValue(DISTANCE); } //if (mean_elem_dist>0.0) //only air elements if (pure_air_elem==true) { ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false && pparticle.GetDistance()<0.0) { ModelPart::NodesContainerType::iterator inode = inodebegin+counter; //copying info from the particle to the (printing) node. inode->FastGetSolutionStepValue(DISTANCE) = pparticle.GetDistance(); inode->FastGetSolutionStepValue(VELOCITY) = pparticle.GetVelocity(); inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates(); counter++; } } } if (counter>(max_number_of_printed_particles-30)) //we are approaching the end of the model part. so we stop before it's too late break; } KRATOS_CATCH("") } void AssignNodalVelocityUsingInletConditions(const double inlet_vel) { KRATOS_TRY //first we are going to delete all the velocities! ModelPart::ConditionsContainerType::iterator iconditionbegin = mr_model_part.ConditionsBegin(); vector<unsigned int> condition_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Conditions().size(), condition_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=condition_partition[kkk]; ii<condition_partition[kkk+1]; ii++) { ModelPart::ConditionsContainerType::iterator icondition = iconditionbegin+ii; if ( icondition->GetValue(IS_INLET) > 0.5 ) { Geometry<Node<3> >& geom = icondition->GetGeometry(); array_1d<double,3> normal = ZeroVector(3); this->CalculateNormal(geom,normal); const double normal_lenght = sqrt(normal[0]*normal[0] + normal[1]*normal[1] + normal[2]*normal[2]); const array_1d<double,3> velocity = - inlet_vel/normal_lenght * normal; for (unsigned int l = 0; l < (TDim); l++) { geom[l].SetLock(); geom[l].FastGetSolutionStepValue(VELOCITY) = velocity; geom[l].UnSetLock(); } } } } KRATOS_CATCH("") } void RotateParticlesAndDomainVelocities(array_1d<double, 3 > rotations) { KRATOS_TRY if(fabs(rotations[0])>0.000000001 || fabs(rotations[1])>0.000000001) KRATOS_THROW_ERROR(std::invalid_argument,"ROTATIONS ONLY IMPLEMENTED AROUND Z AXIS! (xy plane) ",""); const double cosinus_theta = cos(rotations[2]); const double sinus_theta = sin(rotations[2]); //std::cout << "updating particles" << std::endl; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles //KRATOS_WATCH(offset) ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*ielem.base()); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false) { array_1d<float, 3 > & vel = pparticle.GetVelocity(); const float vel_x = vel[0]; const float vel_y = vel[1]; vel[0] = cosinus_theta*vel_x + sinus_theta*vel_y; vel[1] = cosinus_theta*vel_y - sinus_theta*vel_x; } } } } ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if (inode->IsFixed(VELOCITY_X)==false) { array_1d<double, 3 > & vel = inode->FastGetSolutionStepValue(VELOCITY); const double vel_x = vel[0]; const double vel_y = vel[1]; vel[0] = cosinus_theta*vel_x + sinus_theta*vel_y; vel[1] = cosinus_theta*vel_y - sinus_theta*vel_x; } } } KRATOS_CATCH("") } protected: private: void Check() { if(mr_model_part.NodesBegin()->SolutionStepsDataHas(DISTANCE) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing DISTANCE variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(PRESS_PROJ) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing PRESS_PROJ variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(VELOCITY) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing VELOCITY variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(PRESSURE) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing PRESSURE variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(PROJECTED_VELOCITY) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing PROJECTED_VELOCITY variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(DELTA_VELOCITY) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing DELTA_VELOCITY variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(MESH_VELOCITY) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing MESH_VELOCITY variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(YP) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing YP variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(NORMAL) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing NORMAL variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(NODAL_AREA) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing NODAL_AREA variable on solution step data",""); } ///this function moves a particle according to the "velocity" given ///by "rVariable". The movement is performed in nsubsteps, during a total time ///of Dt void MoveParticle( PFEM_Particle_Fluid & pparticle, Element::Pointer & pelement, WeakPointerVector< Element >& elements_in_trajectory, unsigned int & number_of_elements_in_trajectory, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults, const array_1d<double,3> mesh_displacement, const bool discriminate_streamlines, const bool use_mesh_velocity_to_convect) { ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; array_1d<double,3> & gravity= CurrentProcessInfo[GRAVITY]; unsigned int nsubsteps; double substep_dt; bool KEEP_INTEGRATING=false; bool is_found; //bool have_air_node; //bool have_water_node; array_1d<double,3> vel; array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3); array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pparticle.Coordinates(); //initial coordinates const float particle_distance = pparticle.GetDistance(); array_1d<float,3> particle_velocity = pparticle.GetVelocity(); //double distance=0.0; array_1d<double,3> last_useful_vel; double sum_Ns_without_other_phase_nodes; //double pressure=0.0; ///***** //bool flying_water_particle=true; //if a water particle does not find a water element in its whole path, then we add the gravity*dt double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { KEEP_INTEGRATING=true; Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); vel_without_other_phase_nodes = ZeroVector(3); sum_Ns_without_other_phase_nodes=0.0; //distance=0.0; if (particle_distance<0.0 && discriminate_streamlines==true) { for(unsigned int j=0; j<(TDim+1); j++) { if ((geom[j].FastGetSolutionStepValue(DISTANCE))<0.0) //ok. useful info! { sum_Ns_without_other_phase_nodes += N[j]; noalias(vel_without_other_phase_nodes) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel_without_other_phase_nodes) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } if (sum_Ns_without_other_phase_nodes>0.01) { vel = vel_without_other_phase_nodes / sum_Ns_without_other_phase_nodes; //flying_water_particle=false; } else { vel = particle_velocity; if (use_mesh_velocity_to_convect) { for(unsigned int j=0; j<(TDim+1); j++) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } } } else // air particle or we are not following streamlines { for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } //flying_water_particle=false; } //calculating substep to get +- courant(substep) = 0.1 nsubsteps = 10.0 * (delta_t * pelement->GetValue(VELOCITY_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position += vel*substep_dt;//weight; ///***** last_useful_vel=vel; ///***** //DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH VELOCITY ////////////////////////////////////////////////////////////////////////////////////////////////////// unsigned int check_from_element_number=0; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (KEEP_INTEGRATING==true) { is_found = FindNodeOnMesh(position, N ,pelement,elements_in_trajectory,number_of_elements_in_trajectory,check_from_element_number,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in sum_Ns_without_other_phase_nodes=0.0; if (particle_distance<0.0 && discriminate_streamlines==true) { vel_without_other_phase_nodes = ZeroVector(3); for(unsigned int j=0; j<TDim+1; j++) { if ((geom[j].FastGetSolutionStepValue(DISTANCE))<0.0) //ok. useful info! { sum_Ns_without_other_phase_nodes += N[j]; noalias(vel_without_other_phase_nodes) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel_without_other_phase_nodes) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } //if (have_water_node) //if (distance<0.0) if (sum_Ns_without_other_phase_nodes>0.01) { vel = vel_without_other_phase_nodes / sum_Ns_without_other_phase_nodes; //flying_water_particle=false; } else { particle_velocity += substep_dt * gravity; vel = particle_velocity; if (use_mesh_velocity_to_convect) { for(unsigned int j=0; j<(TDim+1); j++) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } } } else //air particle or we are not discriminating streamlines { vel_without_other_phase_nodes = ZeroVector(3); vel = ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } //flying_water_particle=false; } only_integral += 1.0; //values saved for the current time step position+=vel*substep_dt;//weight; } else { KEEP_INTEGRATING=false; break; } } else break; } } //if there's a mesh velocity, we add it at the end in a single step: position-=mesh_displacement; if (KEEP_INTEGRATING==false) (pparticle.GetEraseFlag()=true); else is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pelement) if (is_found==false) ( pparticle.GetEraseFlag()=true); pparticle.Coordinates() = position; } void AccelerateParticleUsingDeltaVelocity( PFEM_Particle_Fluid & pparticle, Element::Pointer & pelement, Geometry< Node<3> >& geom) { array_1d<double,TDim+1> N; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const double delta_t = CurrentProcessInfo[DELTA_TIME]; array_1d<double,3> gravity = CurrentProcessInfo[GRAVITY]; //we start with the first position, then it will enter the loop. array_1d<double,3> coords = pparticle.Coordinates(); float & particle_distance = pparticle.GetDistance(); //double distance=0.0; array_1d<double,3> delta_velocity = ZeroVector(3); array_1d<double,3> delta_velocity_without_air = ZeroVector(3); array_1d<double,3> delta_velocity_without_water = ZeroVector(3); double sum_Ns_without_water_nodes = 0.0; double sum_Ns_without_air_nodes = 0.0; bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == false) { KRATOS_WATCH(N) for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 ) N[j]=1e-10; } if (particle_distance>0.0) //no problem. air { for(unsigned int j=0; j<(TDim+1); j++) { //just for air if ((geom[j].FastGetSolutionStepValue(DISTANCE))>0.0) { noalias(delta_velocity_without_water) += geom[j].FastGetSolutionStepValue(DELTA_VELOCITY)*N[j]; sum_Ns_without_air_nodes += N[j]; } //both air and water noalias(delta_velocity) += geom[j].FastGetSolutionStepValue(DELTA_VELOCITY)*N[j]; } if (sum_Ns_without_water_nodes>0.01) { //delta_velocity = delta_velocity_without_water/sum_Ns_without_water_nodes ; //commented = using all the velocities always! } //else we use the complete field } else //water particle { for(unsigned int j=0; j<(TDim+1); j++) { if ((geom[j].FastGetSolutionStepValue(DISTANCE))<0.0) { noalias(delta_velocity_without_air) += geom[j].FastGetSolutionStepValue(DELTA_VELOCITY)*N[j]; sum_Ns_without_air_nodes += N[j]; } noalias(delta_velocity) += geom[j].FastGetSolutionStepValue(DELTA_VELOCITY)*N[j]; } if (sum_Ns_without_air_nodes>0.01) { delta_velocity = delta_velocity_without_air/sum_Ns_without_air_nodes ; } else { if (mDENSITY_WATER>(10.0*mDENSITY_AIR)) { delta_velocity=gravity*(1.0-mDENSITY_AIR/mDENSITY_WATER)*delta_t; } } } pparticle.GetVelocity() = pparticle.GetVelocity() + delta_velocity; } void MoveParticle_inverse_way( PFEM_Particle_Fluid & pparticle, Element::Pointer & pelement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO! ResultIteratorType result_begin, const unsigned int MaxNumberOfResults, const bool use_mesh_velocity_to_convect) { ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool KEEP_INTEGRATING=false; bool is_found; array_1d<double,3> vel; array_1d<double,3> particle_vel; array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pparticle.Coordinates(); // + (pparticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates float & distance = pparticle.GetDistance(); double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { KEEP_INTEGRATING=true; Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); particle_vel=ZeroVector(3); distance=0.0; for(unsigned int j=0; j<(TDim+1); j++) { distance += geom[j].FastGetSolutionStepValue(DISTANCE)*N(j); noalias(particle_vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } //calculating substep to get +- courant(substep) = 1/4 nsubsteps = 10.0 * (delta_t * pelement->GetValue(VELOCITY_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position -= vel*substep_dt;//weight; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (KEEP_INTEGRATING==true) { is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); particle_vel=ZeroVector(3); distance=0.0; for(unsigned int j=0; j<(TDim+1); j++) { noalias(particle_vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j] ; noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j] ; distance += geom[j].FastGetSolutionStepValue(DISTANCE)*N(j); if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } only_integral += 1.0;//weight ; //values saved for the current time step position-=vel*substep_dt;//weight; } else KEEP_INTEGRATING=false; } } ///COMMENT TO GET A A CONTINOUS DISTANCE FUNCTION FIELD!!!!! if(distance>0.0) { //if(distance<2.0) distance=1.0; //else // distance=3.0; } else distance=-1.0; pparticle.GetVelocity()=particle_vel; } //else {KRATOS_WATCH(position); } } void OverwriteParticleDataUsingTopographicDomain( PFEM_Particle_Fluid & pparticle, Element::Pointer & pelement, array_1d<double,3> domains_offset, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. array_1d<double,3> coords = pparticle.Coordinates()+domains_offset; float & particle_distance = pparticle.GetDistance(); bool is_found = FindNodeOnTopographicMesh(coords, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if (is_found) //it is part of the solid topographic domain { particle_distance= -1.0; } else //it is outside the topographic domain, therefore it is air or whatever it means { particle_distance= 1.0; } pparticle.GetVelocity() = ZeroVector(3); } ///this function should find the element into which a given node is located ///and return a pointer to the element and the vector containing the ///shape functions that define the postion within the element ///if "false" is devolved the element is not found bool FindNodeOnMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) //that was easy! { return true; } //to begin with we check the neighbour elements; it is a bit more expensive WeakPointerVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); //the first we check is the one that has negative shape function, because it means it went outside in this direction: //commented, it is not faster than simply checking all the neighbours (branching) /* unsigned int checked_element=0; for (unsigned int i=0;i!=(TDim+1);i++) { if (N[i]<0.0) { checked_element=i; Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); N=aux_N; return true; } break; } } */ //we check all the neighbour elements for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); return true; } } //if checking all the neighbour elements did not work, we have to use the bins //ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); if(results_found>0){ //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); return true; } } } //if nothing worked, then: //not found case return false; } // VERSION INCLUDING PREDEFINED ELEMENTS FOLLOWING A TRAJECTORY bool FindNodeOnMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, WeakPointerVector< Element >& elements_in_trajectory, unsigned int & number_of_elements_in_trajectory, unsigned int & check_from_element_number, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) { return true; //that was easy! } //if it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element. for (unsigned int i=(check_from_element_number);i!=number_of_elements_in_trajectory;i++) { Geometry<Node<3> >& geom = elements_in_trajectory[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((elements_in_trajectory(i)))); N=aux_N; check_from_element_number = i+1 ; //now i element matches pelement, so to avoid cheching twice the same element we send the counter to the following element. return true; } } //now we check the neighbour elements: WeakPointerVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); //the first we check is the one that has negative shape function, because it means it went outside in this direction: //commented, it is not faster than simply checking all the neighbours (branching) /* unsigned int checked_element=0; for (unsigned int i=0;i!=(TDim+1);i++) { if (N[i]<0.0) { checked_element=i; Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); N=aux_N; return true; } break; } } */ //we check all the neighbour elements for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); if (number_of_elements_in_trajectory<20) { elements_in_trajectory(number_of_elements_in_trajectory)=pelement; number_of_elements_in_trajectory++; check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list } return true; } } //if checking all the neighbour elements did not work, we have to use the bins //ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); if(results_found>0) { //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); if (number_of_elements_in_trajectory<20) { elements_in_trajectory(number_of_elements_in_trajectory)=pelement; number_of_elements_in_trajectory++; check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list } return true; } } } //not found case return false; } ///this function should find the element into which a given node is located ///and return a pointer to the element and the vector containing the ///shape functions that define the postion within the element ///if "false" is devolved the element is not found bool FindNodeOnTopographicMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. //ModelPart::ElementsContainerType::iterator i = mr_model_part.ElementsBegin()+last_element; Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) { //pelement = (*(i)); return true; } //to begin with we check the neighbour elements: WeakPointerVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); return true; } } //ask to the container for the list of candidate elements SizeType results_found = mpTopographicBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); //KRATOS_WATCH(results_found) if(results_found>0){ //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); return true; } } } //not found case return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; //KRATOS_WATCH(N); if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //////////// //using the pre loaded nodal coordinates inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { const double& x0 = nodes_positions[0]; const double& y0 = nodes_positions[1]; const double& x1 = nodes_positions[3]; const double& y1 = nodes_positions[4]; const double& x2 = nodes_positions[6]; const double& y2 = nodes_positions[7]; double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; //KRATOS_WATCH(N); if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.000000000000000000000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } /////////////////// //using the pre loaded nodal coordinates inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { const double& x0 = nodes_positions[0]; const double& y0 = nodes_positions[1]; const double& z0 = nodes_positions[2]; const double& x1 = nodes_positions[3]; const double& y1 = nodes_positions[4]; const double& z1 = nodes_positions[5]; const double& x2 = nodes_positions[6]; const double& y2 = nodes_positions[7]; const double& z2 = nodes_positions[8]; const double& x3 = nodes_positions[9]; const double& y3 = nodes_positions[10]; const double& z3 = nodes_positions[11]; double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.000000000000000000000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } inline double CalculateVol(const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0)); } //*************************************** //*************************************** inline double CalculateVol(const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ * 0.1666666666666666666667; } void ComputeGaussPointPositions_4(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) { double one_third = 1.0 / 3.0; double one_sixt = 0.15; //1.0 / 6.0; double two_third = 0.7; //2.0 * one_third; N(0, 0) = one_sixt; N(0, 1) = one_sixt; N(0, 2) = two_third; N(1, 0) = two_third; N(1, 1) = one_sixt; N(1, 2) = one_sixt; N(2, 0) = one_sixt; N(2, 1) = two_third; N(2, 2) = one_sixt; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; //first pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X(); pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y(); pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z(); //second pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X(); pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y(); pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z(); //third pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X(); pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y(); pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); } void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) //2d { double one_third = 1.0 / 3.0; double one_eight = 0.12; //1.0 / 6.0; double three_quarters = 0.76; //2.0 * one_third; N(0, 0) = one_eight; N(0, 1) = one_eight; N(0, 2) = three_quarters; N(1, 0) = three_quarters; N(1, 1) = one_eight; N(1, 2) = one_eight; N(2, 0) = one_eight; N(2, 1) = three_quarters; N(2, 2) = one_eight; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; N(4, 0) = one_eight; N(4, 1) = 0.44; N(4, 2) = 0.44; N(5, 0) = 0.44; N(5, 1) = one_eight; N(5, 2) = 0.44; N(6, 0) = 0.44; N(6, 1) = 0.44; N(6, 2) = one_eight; //first pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X(); pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y(); pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z(); //second pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X(); pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y(); pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z(); //third pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X(); pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y(); pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); //fifth pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X(); pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y(); pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z(); //sixth pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X(); pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y(); pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z(); //seventh pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X(); pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y(); pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z(); } void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 9, 3 > & pos,BoundedMatrix<double, 9, 4 > & N) //3D { double one_quarter = 0.25; double small_fraction = 0.1; //1.0 / 6.0; double big_fraction = 0.7; //2.0 * one_third; double mid_fraction = 0.3; //2.0 * one_third; N(0, 0) = big_fraction; N(0, 1) = small_fraction; N(0, 2) = small_fraction; N(0, 3) = small_fraction; N(1, 0) = small_fraction; N(1, 1) = big_fraction; N(1, 2) = small_fraction; N(1, 3) = small_fraction; N(2, 0) = small_fraction; N(2, 1) = small_fraction; N(2, 2) = big_fraction; N(2, 3) = small_fraction; N(3, 0) = small_fraction; N(3, 1) = small_fraction; N(3, 2) = small_fraction; N(3, 3) = big_fraction; N(4, 0) = one_quarter; N(4, 1) = one_quarter; N(4, 2) = one_quarter; N(4, 3) = one_quarter; N(5, 0) = small_fraction; N(5, 1) = mid_fraction; N(5, 2) = mid_fraction; N(5, 3) = mid_fraction; N(6, 0) = mid_fraction; N(6, 1) = small_fraction; N(6, 2) = mid_fraction; N(6, 3) = mid_fraction; N(7, 0) = mid_fraction; N(7, 1) = mid_fraction; N(7, 2) = small_fraction; N(7, 3) = mid_fraction; N(8, 0) = mid_fraction; N(8, 1) = mid_fraction; N(8, 2) = mid_fraction; N(8, 3) = small_fraction; pos=ZeroMatrix(9,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=9; j++) //going through the 9 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 3, 3 > & pos,BoundedMatrix<double, 3, 3 > & N) //2D { N(0, 0) = 0.5; N(0, 1) = 0.25; N(0, 2) = 0.25; N(1, 0) = 0.25; N(1, 1) = 0.5; N(1, 2) = 0.25; N(2, 0) = 0.25; N(2, 1) = 0.25; N(2, 2) = 0.5; //first pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X(); pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y(); pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z(); //second pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X(); pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y(); pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z(); //third pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X(); pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y(); pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z(); } void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 4, 3 > & pos,BoundedMatrix<double, 4, 4 > & N) //3D { //creating 4 particles, each will be closer to a node and equidistant to the other nodes N(0, 0) = 0.4; N(0, 1) = 0.2; N(0, 2) = 0.2; N(0, 3) = 0.2; N(1, 0) = 0.2; N(1, 1) = 0.4; N(1, 2) = 0.2; N(1, 3) = 0.2; N(2, 0) = 0.2; N(2, 1) = 0.2; N(2, 2) = 0.4; N(2, 3) = 0.2; N(3, 0) = 0.2; N(3, 1) = 0.2; N(3, 2) = 0.2; N(3, 3) = 0.4; pos=ZeroMatrix(4,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=4; j++) //going through the 4 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } void ComputeGaussPointPositions_45(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 45, 3 > & pos,BoundedMatrix<double, 45, 3 > & N) { //std::cout << "NEW ELEMENT" << std::endl; unsigned int counter=0; for (unsigned int i=0; i!=9;i++) { for (unsigned int j=0; j!=(9-i);j++) { N(counter,0)=0.05+double(i)*0.1; N(counter,1)=0.05+double(j)*0.1; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 15, 3 > & pos,BoundedMatrix<double, 15, 3 > & N) //2D { //std::cout << "NEW ELEMENT" << std::endl; unsigned int counter=0; for (unsigned int i=0; i!=5;i++) { for (unsigned int j=0; j!=(5-i);j++) { N(counter,0)=0.05+double(i)*0.2; N(counter,1)=0.05+double(j)*0.2; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 20, 3 > & pos,BoundedMatrix<double, 20, 4 > & N) //3D { //std::cout << "NEW ELEMENT" << std::endl; //double total; double fraction_increment; unsigned int counter=0; for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles { //std::cout << "inside i" << i << std::endl; for (unsigned int j=0; j!=(4-i);j++) { //std::cout << "inside j" << j << std::endl; for (unsigned int k=0; k!=(4-i-j);k++) { //std::cout << "inside k" << k << std::endl; N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1) //total = 1.0 - N(counter,0); fraction_increment = 0.27; // N(counter,1)=fraction_increment * (0.175 + double(j)); N(counter,2)=fraction_increment * (0.175 + double(k)); N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } } // Bubble Sort Function for Descending Order void BubbleSort(array_1d<double,7> &distances , array_1d<int,7 > &positions, unsigned int & arrange_number) { int i, j; bool flag = true; // set flag to 1 to start first pass double temp; // holding variable int temp_position; int numLength = arrange_number; for(i = 1; (i <= numLength) && flag; i++) { flag = false; for (j=0; j < (numLength -1); j++) { if (distances[j+1] < distances[j]) // descending order simply changes to > { temp = distances[j]; // swap elements distances[j] = distances[j+1]; distances[j+1] = temp; temp_position = positions[j]; //swap positions positions[j] = positions[j+1]; positions[j+1] = temp_position; flag = true; // indicates that a swap occurred. } } } return; //arrays are passed to functions by address; nothing is returned } void BubbleSort(array_1d<double,9> &distances , array_1d<int,9 > &positions, unsigned int & arrange_number) { int i, j; bool flag = true; // set flag to 1 to start first pass double temp; // holding variable int temp_position; int numLength = arrange_number; for(i = 1; (i <= numLength) && flag; i++) { flag = false; for (j=0; j < (numLength -1); j++) { if (distances[j+1] < distances[j]) // descending order simply changes to > { temp = distances[j]; // swap elements distances[j] = distances[j+1]; distances[j+1] = temp; temp_position = positions[j]; //swap positions positions[j] = positions[j+1]; positions[j+1] = temp_position; flag = true; // indicates that a swap occurred. } } } return; //arrays are passed to functions by address; nothing is returned } template<class T> bool InvertMatrix(const T& input, T& inverse) { typedef permutation_matrix<std::size_t> pmatrix; // create a working copy of the input T A(input); // create a permutation matrix for the LU-factorization pmatrix pm(A.size1()); // perform LU-factorization int res = lu_factorize(A, pm); if (res != 0) return false; // create identity matrix of "inverse" inverse.assign(identity_matrix<double> (A.size1())); // backsubstitute to get the inverse lu_substitute(A, pm, inverse); return true; } bool InvertMatrix3x3(const BoundedMatrix<double, TDim+1 , TDim+1 >& A, BoundedMatrix<double, TDim+1 , TDim+1 >& result) { double determinant = +A(0,0)*(A(1,1)*A(2,2)-A(2,1)*A(1,2)) -A(0,1)*(A(1,0)*A(2,2)-A(1,2)*A(2,0)) +A(0,2)*(A(1,0)*A(2,1)-A(1,1)*A(2,0)); double invdet = 1/determinant; result(0,0) = (A(1,1)*A(2,2)-A(2,1)*A(1,2))*invdet; result(1,0) = -(A(0,1)*A(2,2)-A(0,2)*A(2,1))*invdet; result(2,0) = (A(0,1)*A(1,2)-A(0,2)*A(1,1))*invdet; result(0,1) = -(A(1,0)*A(2,2)-A(1,2)*A(2,0))*invdet; result(1,1) = (A(0,0)*A(2,2)-A(0,2)*A(2,0))*invdet; result(2,1) = -(A(0,0)*A(1,2)-A(1,0)*A(0,2))*invdet; result(0,2) = (A(1,0)*A(2,1)-A(2,0)*A(1,1))*invdet; result(1,2) = -(A(0,0)*A(2,1)-A(2,0)*A(0,1))*invdet; result(2,2) = (A(0,0)*A(1,1)-A(1,0)*A(0,1))*invdet; return true; } ModelPart& mr_model_part; ModelPart* mtopographic_model_part_pointer; array_1d<double, 3 > mcalculation_domain_complete_displacement; array_1d<double, 3 > mcalculation_domain_added_displacement; bool mintialized_transfer_tool; bool muse_mesh_velocity_to_convect; int m_nparticles; int mnelems; double mDENSITY_WATER; double mDENSITY_AIR; //vector<double> mareas_vector; UNUSED SO COMMENTED int max_nsubsteps; double max_substep_dt; int mmaximum_number_of_particles; std::vector< PFEM_Particle_Fluid > mparticles_vector; //Point<3> int mlast_elem_id; bool modd_timestep; bool mparticle_printing_tool_initialized; unsigned int mfilter_factor; unsigned int mlast_node_id; //ModelPart& mr_particle_model_part; vector<int> mnumber_of_particles_in_elems; vector<int> mnumber_of_particles_in_elems_aux; vector<ParticlePointerVector*> mpointers_to_particle_pointers_vectors; typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic; typename BinsObjectDynamic<Configure>::Pointer mpTopographicBinsObjectDynamic; void CalculateNormal(Geometry<Node<3> >& pGeometry, array_1d<double,3>& An ); }; template<> void MoveParticleUtilityPFEM2<2>::CalculateNormal(Geometry<Node<3> >& pGeometry, array_1d<double,3>& An ) { array_1d<double,2> v1; v1[0] = pGeometry[1].X() - pGeometry[0].X(); v1[1] = pGeometry[1].Y() - pGeometry[0].Y(); An[0] = -v1[1]; An[1] = v1[0]; An[2] = 0.0; //now checking orientation using the normal: const unsigned int NumNodes = 2; array_1d<double,3> nodal_normal = ZeroVector(3); for (unsigned int iNode = 0; iNode < NumNodes; ++iNode) nodal_normal += pGeometry[iNode].FastGetSolutionStepValue(NORMAL); double dot_prod = nodal_normal[0]*An[0] + nodal_normal[1]*An[1]; if (dot_prod<0.0) { //std::cout << "inverting the normal" << std::endl; An *= -1.0; // inverting the direction of the normal!!! } } template<> void MoveParticleUtilityPFEM2<3>::CalculateNormal(Geometry<Node<3> >& pGeometry, array_1d<double,3>& An ) { array_1d<double,3> v1,v2; v1[0] = pGeometry[1].X() - pGeometry[0].X(); v1[1] = pGeometry[1].Y() - pGeometry[0].Y(); v1[2] = pGeometry[1].Z() - pGeometry[0].Z(); v2[0] = pGeometry[2].X() - pGeometry[0].X(); v2[1] = pGeometry[2].Y() - pGeometry[0].Y(); v2[2] = pGeometry[2].Z() - pGeometry[0].Z(); MathUtils<double>::CrossProduct(An,v1,v2); An *= 0.5; //now checking orientation using the normal: const unsigned int NumNodes = 3; array_1d<double,3> nodal_normal = ZeroVector(3); for (unsigned int iNode = 0; iNode < NumNodes; ++iNode) nodal_normal += pGeometry[iNode].FastGetSolutionStepValue(NORMAL); double dot_prod = nodal_normal[0]*An[0] + nodal_normal[1]*An[1] + nodal_normal[2]*An[2]; if (dot_prod<0.0) { //std::cout << "inverting the normal!!" << std::endl; An *= -1.0; // inverting the direction of the normal!!! } } } // namespace Kratos. #endif // KRATOS_MOVE_PART_UTILITY_DIFF2_INCLUDED defined
gemm.c
#include "gemm.h" #include "utils.h" #include "cuda.h" #include <stdlib.h> #include <stdio.h> #include <math.h> void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ char A_PART = A[i*lda+k]; if(A_PART){ for(j = 0; j < N; ++j){ C[i*ldc+j] += B[k*ldb+j]; } } else { for(j = 0; j < N; ++j){ C[i*ldc+j] -= B[k*ldb+j]; } } } } } float *random_matrix(int rows, int cols) { int i; float *m = calloc(rows*cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<10; ++i){ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[i*lda+k]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; } C[i*ldc+j] += sum; } } } void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[k*lda+i]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); int i, j; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } if(!TA && !TB) gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(TA && !TB) gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(!TA && TB) gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); } #ifdef GPU #include <math.h> void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A_gpu, int lda, float *B_gpu, int ldb, float BETA, float *C_gpu, int ldc) { cublasHandle_t handle = blas_handle(); cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N), (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc); check_error(status); } #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<32; ++i){ gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void time_gpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m,k); float *b = random_matrix(k,n); int lda = (!TA)?k:m; int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *a_cl = cuda_make_array(a, m*k); float *b_cl = cuda_make_array(b, k*n); float *c_cl = cuda_make_array(c, m*n); int i; clock_t start = clock(), end; for(i = 0; i<iter; ++i){ gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n); cudaThreadSynchronize(); } double flop = ((double)m)*n*(2.*k + 2.)*iter; double gflop = flop/pow(10., 9); end = clock(); double seconds = sec(end-start); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds); cuda_free(a_cl); cuda_free(b_cl); cuda_free(c_cl); free(a); free(b); free(c); } void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *c_gpu = random_matrix(m,n); memset(c, 0, m*n*sizeof(float)); memset(c_gpu, 0, m*n*sizeof(float)); int i; //pm(m,k,b); gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n); //printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); //printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for(i = 0; i < m*n; ++i) { //printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i]-c_gpu[i], 2); } printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n)); free(a); free(b); free(c); free(c_gpu); } int test_gpu_blas() { /* test_gpu_accuracy(0,0,10,576,75); test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); test_gpu_accuracy(0,1,1000,10,100); test_gpu_accuracy(1,1,1000,10,100); test_gpu_accuracy(0,0,10,10,10); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,192,729,1600); time_gpu(0,0,384,196,1728); time_gpu(0,0,256,196,3456); time_gpu(0,0,256,196,2304); time_gpu(0,0,128,4096,12544); time_gpu(0,0,128,4096,4096); */ time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,576,12544); time_gpu(0,0,256,2304,784); time_gpu(1,1,2304,256,784); time_gpu(0,0,512,4608,196); time_gpu(1,1,4608,512,196); return 0; } #endif
pooling_plus_add_layer.c
#include "cuda.h" #include "pooling_plus_add_layer.h" #include <float.h> #include <pmmintrin.h> void forward_maxpool_plus_add_fusion_layer_with_openmp(int batch, int src1_in_h, int src1_in_w, int src1_in_c, int src2_in_h, int src2_in_w, int src2_in_c, int stride, int size, int pad, float *src1_pointer, float *src2_pointer, float *dst_pointer) { int b,i,j,k,m,n; int h = (src1_in_h + 2*pad - size)/stride + 1; int w = (src1_in_w + 2*pad - size)/stride + 1; int w_offset = -pad; int h_offset = -pad; #pragma omp parallel for for(b = 0; b < batch; ++b){ for(k = 0; k < src1_in_c; ++k){ for(i = 0; i < h; ++i){ for(j = 0; j < w; ++j){ int src1_index = j + w*(i + h*(k + src1_in_c*b)); int src2_index = (j + src2_in_w)%src2_in_w + src2_in_w*( (i + src2_in_h)%src2_in_h + src2_in_h*( (k + src2_in_c)%src2_in_c + src2_in_c*b)); float max = -FLT_MAX; int s_start = (i == 0 || j == 0) ? 0 : (size - stride); #pragma unroll for(n = s_start; n < size; ++n){ for(m = s_start; m < size; ++m){ int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + src1_in_w*(cur_h + src1_in_h*(k + b*src1_in_c)); int valid = (cur_h >= 0 && cur_h < src1_in_h && cur_w >= 0 && cur_w < src1_in_w); float val = (valid != 0) ? src1_pointer[index] : -FLT_MAX; max = (val > max) ? val : max; } } dst_pointer[src1_index] = max + src2_pointer[src2_index]; } } } } } void forward_maxpool_plus_add_fusion_layer_with_openmp_and_sse(int batch, int src1_in_h, int src1_in_w, int src1_in_c, int src2_in_h, int src2_in_w, int src2_in_c, int stride, int size, int pad, float *src1_pointer, float *src2_pointer, float *dst_pointer) { assert(NULL = src1_pointer); assert(NULL = src2_pointer); assert(NULL = dst_pointer); assert(batch>0); assert(src1_in_h > src2_in_h); assert(src1_in_w > src2_in_w); assert(src1_in_c > src2_in_c); assert(stride > 0); assert(size >= 2); assert(pad > 0); int b,i,j,k,m,n; int h = (src1_in_h + 2*pad - size)/stride + 1; int w = (src1_in_w + 2*pad - size)/stride + 1; int w_offset = -pad; int h_offset = -pad; #pragma omp parallel for for(b = 0; b < batch; ++b){ for(k = 0; k < src1_in_c; ++k){ for(i = 0; i < h; ++i){ for(j = 0; j < w; ++j){ int src1_index = j + w*(i + h*(k + src1_in_c*b)); int src2_index = (j + src2_in_w)%src2_in_w + src2_in_w*( (i + src2_in_h)%src2_in_h + src2_in_h*( (k + src2_in_c)%src2_in_c + src2_in_c*b)); __m128 max = _mm_set1_ps(-FLT_MAX); int s_start = (i == 0 || j == 0) ? 0 : (size - stride); #pragma unroll for(n = s_start; n < size; n+=2){ //#pragma unroll for(m = s_start; m < size; m+=2){ int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int cur_w_r = w_offset + j*stride + m + 1; int cur_h_d = h_offset + i*stride + n + 1; int index = cur_w + src1_in_w*(cur_h + src1_in_h*(k + b*src1_in_c)); int index_r = cur_w_r + src1_in_w*(cur_h + src1_in_h*(k + b*src1_in_c)); int index_d = cur_h + src1_in_w*(cur_h_d + src1_in_h*(k + b*src1_in_c)); int valid = (cur_h >= 0 && cur_h < src1_in_h && cur_w >= 0 && cur_w < src1_in_w &&cur_w_r>=0 && cur_w_r<src1_in_w && cur_h_d>=0 && cur_h_d<src1_in_h); __m128 vdata_1 = _mm_loadu_ps((float*) &src1_pointer[index]); __m128 vdata_2 = _mm_loadu_ps((float*) &src1_pointer[index_r]); __m128 vdata_3 = _mm_loadu_ps((float*) &src1_pointer[index_d]); __m128 max_temp = _mm_max_ps(vdata_1, vdata_2); max_temp = _mm_max_ps(max_temp, vdata_3); __m128 val = (valid != 0) ? max_temp : _mm_set1_ps(-FLT_MAX); max = _mm_max_ps(val, max); } } __m128 vdata_4 = _mm_loadu_ps((float*) &src2_pointer[src2_index]); __m128 vres = _mm_add_ps(max, vdata_4); _mm_store_ss((float*) &dst_pointer[src1_index], vres); } } } } } void forward_maxpool_plus_add_layer(int batch, int src1_in_h, int src1_in_w, int src1_in_c, int src2_in_h, int src2_in_w, int src2_in_c, int stride, int size, int pad, float *src1_pointer, float *src2_pointer, float *dst_pointer) { int b,i,j,k,m,n; int h = (src1_in_h + 2*pad - size)/stride + 1; int w = (src1_in_w + 2*pad - size)/stride + 1; int w_offset = -pad; int h_offset = -pad; for(b = 0; b < batch; ++b){ for(k = 0; k < src1_in_c; ++k){ for(i = 0; i < h; ++i){ for(j = 0; j < w; ++j){ int src1_index = j + w*(i + h*(k + src1_in_c*b)); float max = -FLT_MAX; int s_start = (i == 0 || j == 0) ? 0 : (size - stride); for(n = s_start; n < size; ++n){ for(m = s_start; m < size; ++m){ int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + src1_in_w*(cur_h + src1_in_h*(k + b*src1_in_c)); int valid = (cur_h >= 0 && cur_h < src1_in_h && cur_w >= 0 && cur_w < src1_in_w); float val = (valid != 0) ? src1_pointer[index] : -FLT_MAX; max = (val > max) ? val : max; } } dst_pointer[src1_index] = max; } } } } for(b = 0; b < batch; ++b){ for(k = 0; k < src1_in_c; ++k){ for(i = 0; i < h; ++i){ for(j = 0; j < w; ++j){ int src1_index = j + w*(i + h*(k + src1_in_c*b)); int src2_index = (j + src2_in_w)%src2_in_w + src2_in_w*( (i + src2_in_h)%src2_in_h + src2_in_h*( (k + src2_in_c)%src2_in_c + src2_in_c*b)); dst_pointer[src1_index] += src2_pointer[src2_index]; } } } } }
9335.c
/* * Compile using the command: * `cc 27Stencil.c -o oa -fopenmp -lm` */ #include <math.h> #include <omp.h> #include <stdint.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENACC #include <openacc.h> #endif #define DEFAULT_DATASIZE 1048576 /* Default datasize. */ #define DEFAULT_REPS 10 /* Default repetitions. */ #define CONF95 1.96 #define ITERATIONS 10 #define FAC (1./26) #define TOLERANCE 1.0e-15 extern int reps; /* Repetitions. */ extern double *times; /* Array to store results in. */ extern int flag; /* Flag to set CPU or GPU invocation. */ extern unsigned int datasize; /* Datasize passed to benchmark functions. */ unsigned int datasize = -1; /* Datasize for tests in bytes. */ int reps = -1; /* Repetitions. */ double *times; /* Array of doubles storing the benchmark times in microseconds. */ double testtime; /* The average test time in microseconds for reps runs. */ double testsd; /* The standard deviation in the test time in microseconds for reps runs. */ int flag = 0; /* 0 indicates CPU. */ /* * Function prototypes for common functions. */ void init(int argc, char **argv); void finalisetest(char *); void finalise(void); void benchmark(char *, double (*test)(void)); void print_results(char *, double, double); /* Forward Declarations of utility functions*/ double max_diff(double *, double *, int); void wul(); void usage(char *argv[]) { printf("Usage: %s \n" "\t--reps <repetitions> (default %d)\n" "\t--datasize <datasize> (default %d bytes)\n", argv[0], DEFAULT_REPS, DEFAULT_DATASIZE); } /* * This function parses the parameters from the command line. */ void parse_args(int argc, char *argv[]) { int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--reps") == 0) { reps = atoi(argv[++arg]); if (reps == 0) { printf("Invalid integer:--reps: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--datasize") == 0) { datasize = atoi(argv[++arg]); if (datasize == 0) { printf("Invalid integer:--datasize: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd; int i, good_reps; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; good_reps = 0; for (i = 0; i < reps; i++) { /* Skip entries where times is 0, this indicates an error occured */ if (times[i] != 0){ mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; good_reps++; } } meantime = totaltime / good_reps; sumsq = 0; for (i = 0; i < reps; i++) { if (times[i] != 0){ sumsq += (times[i] - meantime) * (times[i] - meantime); } } sd = sqrt(sumsq / good_reps); *mtp = meantime; *sdp = sd; } /* * This function prints the results of the tests. * If you use a compiler which sets a different preprocessor flag * you may wish to add it here. */ void print_results(char *name, double testtime, double testsd) { char compiler[20]; /* Set default compiler idetifier. */ sprintf(compiler, "COMPILER"); /* Set compiler identifier based on known preprocessor flags. */ #ifdef __PGI sprintf(compiler, "PGI"); #endif #ifdef __HMPP sprintf(compiler, "CAPS"); #endif //printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6); printf("%f\n", testtime*1e6); } /* * This function initialises the storage for the test results and set the defaults. */ void init(int argc, char **argv) { parse_args(argc, argv); if (reps == -1) { reps = DEFAULT_REPS; } if (datasize == (unsigned int)-1) { datasize = DEFAULT_DATASIZE; } times = (double *)malloc((reps) * sizeof(double)); /* #ifdef __PGI acc_init(acc_device_nvidia); // printf("PGI INIT\n"); #endif #ifdef __HMPP int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif #ifdef _CRAYC int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif */ } void finalise(void) { free(times); } /* * This function runs the benchmark specified. */ void benchmark(char *name, double (*test)(void)) { int i = 0; double tmp = 0; for (i=0; i<reps; i++) { tmp = test(); if (tmp == -10000){ printf("Memory allocation failure in %s\n", name); times[i] = 0; } else if (tmp == -11000){ printf("CPU/GPU mismatch in %s\n", name); times[i] = 0; } else{ times[i] = tmp; } } stats(&testtime, &testsd); //printf("in benchmark\n"); print_results(name, testtime, testsd); //printf("printed result\n"); } double stencil() { extern unsigned int datasize; int sz = cbrt((datasize/sizeof(double))/2); int i, j, k, iter; int n = sz-2; double fac = FAC; double t1, t2; double md; //printf("size = %d\n", sz); /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz); double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz); double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz); if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ return(-10000); } /* initialize input array a0 */ /* zero all of array (including halos) */ //printf("size = %d\n", sz); for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = 0.0; //printf("%d\t", (i*sz*sz+j*sz+k)); } } } //printf("\n"); //int size_of_a0 = sizeof(a0) / sizeof(*a0); //printf("size of a0 = %d\n", size_of_a0); /* use random numbers to fill interior */ for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX); } } } /* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */ /* save initial input array for later GPU run */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; } } } //printf("Host computation\n"); /* run main computation on host */ for (iter = 0; iter < ITERATIONS; iter++) { for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ /* save result */ /* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; // printf("%lf\t", a0[i*sz*sz+j*sz+k]); } } } //int size = sizeof(host_result)/sizeof(host_result[0]); //for(i = 0; i < size; i++) { // printf("%lf\t", host_result[i]); //} //printf("\n"); /* copy initial array back to a0 */ /* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k]; } } } //printf("Starting acc pragma code\n"); t1 = omp_get_wtime(); #pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n) { for (iter = 0; iter < ITERATIONS; iter++) { #pragma omp parallel for for (i = 1; i < n+1; i++) { #pragma omp parallel for num_threads(2) for (j = 1; j < n+1; j++) { #pragma omp simd for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } #pragma acc parallel loop for (i = 1; i < n+1; i++) { #pragma acc loop for (j = 1; j < n+1; j++) { #pragma acc loop for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ } /* end data region */ #pragma acc wait t2 = omp_get_wtime(); memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz); md = max_diff(&host_result[0],&device_result[0], sz); /* Free malloc'd memory to prevent leaks */ free(a0); free(a0_init); free(a1); free(host_result); free(device_result); //printf("md: %lf \t tolerance: %lf", md, TOLERANCE); if (md < TOLERANCE ){ //printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE); return(t2 - t1); } else{ // printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md); return(-11000); } } /* Utility Functions */ double max_diff(double *array1,double *array2, int sz) { double tmpdiff, diff; int i,j,k; int n = sz-2; diff=0.0; for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]); //printf("diff: %lf", tmpdiff); if (tmpdiff > diff) diff = tmpdiff; } } } return diff; } /* * This function ensures the device is awake. * It is more portable than acc_init(). */ void wul(){ int data = 8192; double *arr_a = (double *)malloc(sizeof(double) * data); double *arr_b = (double *)malloc(sizeof(double) * data); int i = 0; if (arr_a==NULL||arr_b==NULL) { printf("Unable to allocate memory in wul.\n"); } for (i=0;i<data;i++){ arr_a[i] = (double) (rand()/(1.0+RAND_MAX)); } #pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data]) { #pragma acc parallel loop for (i=0;i<data;i++){ arr_b[i] = arr_a[i] * 2; } } if (arr_a[0] < 0){ printf("Error in WUL\n"); /* * This should never be called as rands should be in the range (0,1]. * This stops clever optimizers. */ } free(arr_a); free(arr_b); } int main(int argc, char **argv) { char testName[32]; //printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n"); /* Initialise storage for test results & parse input arguements. */ init(argc, argv); /* Ensure device is awake. */ wul(); sprintf(testName, "27S"); benchmark(testName, &stencil); /* Print results & free results storage */ finalise(); return EXIT_SUCCESS; }
ex2.c
#include <stdio.h> #include <omp.h> int main(void) { int threadId; #pragma omp parallel { threadId = omp_get_thread_num(); printf("\nOi %d\n", threadId); } return 0; }
MMultiple3.c
/* Hi, everybody! * ===================================================================================== * * Filename: MMmultiple.c * * Description: Do Matrix Multiplication C = A x B with A and B blocks generated by * mkmatrices.c. * * Version: 1.0 * Created: 09/21/2016 22:53:31 * Revision: none * Compiler: gcc * * Author: Xiukun Hu * Organization: University of Wyoming, Department of Mathematics * * ===================================================================================== */ #ifdef _OPENMP #include <omp.h> #else #define omp_get_num_threads() 1; #define omp_get_thread_num() 0; #define omp_get_max_threads() 1; #endif #include <stdio.h> #include <stdlib.h> #include "matrices.h" #ifndef WIDTH #define WIDTH 30 #endif void ClearMatrix( double** matrix, int nrows, int ncols ) { int i, j; for ( i = 0 ; i < nrows ; i++ ) for ( j = 0 ; j < ncols ; j++ ) matrix[i][j] = 0; } int main(){ /* Local declarations */ const int NTH = omp_get_max_threads(); double tsc[NTH]; double tsc1; double t1; /* Time keeper */ double t2; /* Time keeper */ double tt1; double tt; double tio1; /* Private I/O time keeper */ double tio = 0; /* Private I/O time keeper */ double tc1; /* Compute time */ double tc = 0; /* Compute time */ double tw1; /* Wate time */ double tw = 0; /* Wate time */ double temp; /* Private pointer for saving results */ double mrun(); /* Get timing information */ double **ablock[2]; /* Pointer to one block of A */ double **bblock[2]; /* Pointer to one block of B */ double **cblock[2]; /* Pointer to one block of C */ int acols = 0; /* Block columns in A */ int arows = 0; /* Block rows in A */ int bcols = 0; /* Block columns in B */ int brows = 0; /* Block rows in B */ int ccols = 0; /* Block columns in C */ int crows = 0; /* Block rows in C */ int blk_cols = 0; /* Columns in a block */ int blk_rows = 0; /* Rows in a block */ int mopt_a = 0; /* How to allocate space in A blocks */ int mopt_b = 1; /* How to allocate space in B blocks */ int mopt_c = 1; /* How to allocate space in C blocks */ int colleft; /* Block columns residue by WIDTH */ int i = 0; /* Loop index */ int j = 0; /* Loop index */ int k = 0; /* Loop index */ int I,J,K; /* Loop index */ int iplus; /* Loop index */ int jplus; /* Loop index */ int kplus; /* Loop index */ int tog = 0; /* Toggle for a&bblock */ int ctog = 0; /* Toggle for cblock */ int TID; /* Thread ID */ int ar; /* ablock row index */ int ac; /* ablock col index */ int rc; int nI; int nThreads; char c = ' '; /* Input character */ tt1 = mrun(); /* Get matrix information from disk */ matrix_info_read( &blk_rows, &blk_cols, &arows, &acols, &brows, &bcols, &crows, &ccols ); /* Preprocess message */ colleft = blk_cols % WIDTH; /* Colunms left for each block over WIDTH */ nI = blk_rows * (blk_cols / WIDTH); /* Number of iterations for each block */ rc = blk_cols - colleft; /* The starting index of the residue column */ /* Allocate 6 block matrices (two each for A, B and C) */ ablock[0] = block_allocate( blk_rows, blk_cols, mopt_a ); bblock[0] = block_allocate( blk_rows, blk_cols, mopt_b ); cblock[0] = block_allocate( blk_rows, blk_cols, mopt_c ); ablock[1] = block_allocate( blk_rows, blk_cols, mopt_a ); bblock[1] = block_allocate( blk_rows, blk_cols, mopt_b ); cblock[1] = block_allocate( blk_rows, blk_cols, mopt_c ); ClearMatrix( cblock[0], blk_rows, blk_cols ); ClearMatrix( cblock[1], blk_rows, blk_cols ); /* Enter parallel region */ #pragma omp parallel default(none) \ shared(blk_cols, blk_rows, \ ablock, bblock, cblock, \ mopt_a, mopt_b, mopt_c, \ acols, crows, ccols, \ colleft, nI, nThreads, \ rc, t1, t2, tsc, tsc1) \ firstprivate( tog, ctog, i, j, k, tio, tc, tw ) \ private( TID, I, J, K, iplus, jplus, kplus, temp, ar, ac, tio1, tc1, tw1 ) { #pragma omp single { nThreads = omp_get_num_threads(); t1 = mrun(); } tc1 = t1; TID = omp_get_thread_num(); /* Single thread reading the A00 B00 for calculating */ #pragma omp single { tio1 = mrun(); tc += tio1 - tc1; block_readdisk( blk_rows, blk_cols, "A", 0, 0, ablock[0], mopt_a, 0 ); block_readdisk( blk_rows, blk_cols, "B", 0, 0, bblock[0], mopt_a, 0 ); tc1 = mrun(); tio += tc1 - tio1; //printf("Thread %d reading A00 and B00 in %les\n", TID, tio); } // single thread reading A00 B00 /* Reading and calculating at the same time */ while ( i < crows ){ /* Get next loop's index i+, j+ and k+ */ kplus = (k+1) % acols; jplus = (kplus==0)? ((j+1)%ccols) : j; iplus = (jplus==0 && kplus==0)? i+1 : i; /* Single thread reading A_i+k+ & B_k+j+ */ #pragma omp single nowait { if ( iplus < crows ) { tio1 = mrun(); tc += tio1 - tc1; block_readdisk( blk_rows, blk_cols, "A", iplus, kplus, ablock[1-tog], mopt_a, 0 ); block_readdisk( blk_rows, blk_cols, "B", kplus, jplus, bblock[1-tog], mopt_b, 0 ); tc1 = mrun(); tio += tc1 - tio1; } } #pragma omp single nowait if ( i == 0 && j == 0 && k == 0 ) tsc1 = mrun(); /* Multithreads calculating A_ik x B_kj */ #pragma omp for nowait schedule(dynamic) for ( I = 0 ; I < nI; I++ ) { ar = I % blk_rows, ac = (I / blk_rows) * WIDTH; for ( K = 0 ; K < blk_cols ; K++ ) { temp = 0; for ( J = 0 ; J < WIDTH ; J++ ) temp += ablock[tog][ar][ac+J] * bblock[tog][ac+J][K]; #pragma omp atomic update cblock[ctog][ar][K] += temp; } } /* Multithreads taking care of the residue */ if ( colleft ) { #pragma omp for nowait schedule(dynamic) for ( ar = 0 ; ar < blk_rows ; ar++ ) { ac = rc; for ( K = 0 ; K < blk_cols ; K++ ) { temp = 0; for ( J = 0 ; J < colleft ; J++ ) temp += ablock[tog][ar][ac+J] * bblock[tog][ac+J][K]; #pragma omp atomic update cblock[ctog][ar][K] += temp; } } } tw1 = mrun(); tc += tw1 - tc1; if ( i == 0 && j == 0 && k == 0 ) tsc[TID] = mrun(); /* Barrier for reading A_i+k+ B_k+j+ and calculating A_ik x B_kj */ #pragma omp barrier tc1 = mrun(); tw += tc1 - tw1; /* Every thread check but single thread write to disk */ if ( kplus==0 ) { #pragma omp single nowait { tio1 = mrun(); tc += tio1 - tc1; block_write2disk( blk_rows, blk_cols, "C", i, j, cblock[ctog][0] ); ClearMatrix( cblock[ctog], blk_rows, blk_cols ); tc1 = mrun(); tio += tc1 - tio1; } // Write cblock: OMP single nowait ctog = 1-ctog; // Every thread change ctog if k+ = 0. } /* Every thread change to another ablock and bblock and update index */ tog = 1 - tog; i = iplus; j = jplus; k = kplus; } /* While loop for blocks */ //printf("Thread %d, compute for %les, io for %les, wait for %le\n", TID, tc, tio, tw); #pragma omp master { t2 = mrun() - t1; } }// End of parallel region //printf("Time in parallel region: %les\n", t2); for ( i = 1 ; i < nThreads ; i++ ) tsc[0] = (tsc[0] < tsc[i])? tsc[i] : tsc[0]; tt = mrun() - tt1; /* Print matrix info */ printf("\n|step3 code|\tblk:%5dx%5d, matrix:%2dx%2d, %2dThreads\n",blk_rows,blk_cols,arows,acols,nThreads); /* Print time */ printf("Total time: %les\n", tt); printf("--------------------------------------------------------\n"); //printf("Time for multiplying A00 x B00 in parallel: %le\n", tsc[0]-tsc1); /* End */ return 0; }
omp_hello.c
/****************************************************************************** * OpenMP Example - Hello World - C/C++ Version * FILE: omp_hello.c * DESCRIPTION: * In this simple example, the master thread forks a parallel region. * All threads in the team obtain their unique thread number and print it. * The master thread only prints the total number of threads. Two OpenMP * library routines are used to obtain the number of threads and each * thread's number. * SOURCE: Blaise Barney 5/99 * LAST REVISED: ******************************************************************************/ #include <omp.h> main () { int nthreads, tid; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); /* Only master thread does this */ if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } /* All threads join master thread and disband */ }
conv_dw_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "conv_dw_kernel_x86.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <stdint.h> #include <stdlib.h> #include <string.h> #include <math.h> #if __SSE2__ #include <emmintrin.h> #endif #if __AVX__ #include <immintrin.h> #endif #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) static void relu(float* data, int size, int activation) { for (int i = 0; i < size; i++) { data[i] = max(data[i], ( float )0); if (activation > 0) { data[i] = min(data[i], ( float )activation); } } } static void pad(float* input, float* output, int in_h, int in_w, int out_h, int out_w, int top, int left, float v) { float* ptr = input; float* outptr = output; int y = 0; // fill top for (; y < top; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } // fill center for (; y < (top + in_h); y++) { int x = 0; for (; x < left; x++) { outptr[x] = v; } if (in_w < 12) { for (; x < (left + in_w); x++) { outptr[x] = ptr[x - left]; } } else { memcpy(outptr + left, ptr, in_w * sizeof(float)); x += in_w; } for (; x < out_w; x++) { outptr[x] = v; } ptr += in_w; outptr += out_w; } // fill bottom for (; y < out_h; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } } #if __AVX__ static void convdw3x3s1(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 3; int channel_remain = inc - (channel_count << 3); // generate the image tmp float* img_tmp = ( float* )sys_malloc(8 * (unsigned long)inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(8 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 8; const float* k0 = img_data + (ii + 0) * inwh; const float* k1 = img_data + (ii + 1) * inwh; const float* k2 = img_data + (ii + 2) * inwh; const float* k3 = img_data + (ii + 3) * inwh; const float* k4 = img_data + (ii + 4) * inwh; const float* k5 = img_data + (ii + 5) * inwh; const float* k6 = img_data + (ii + 6) * inwh; const float* k7 = img_data + (ii + 7) * inwh; const float* f0 = kernel_data + (ii + 0) * 9; const float* f1 = kernel_data + (ii + 1) * 9; const float* f2 = kernel_data + (ii + 2) * 9; const float* f3 = kernel_data + (ii + 3) * 9; const float* f4 = kernel_data + (ii + 4) * 9; const float* f5 = kernel_data + (ii + 5) * 9; const float* f6 = kernel_data + (ii + 6) * 9; const float* f7 = kernel_data + (ii + 7) * 9; const float* b0 = bias_data + (ii + 0); const float* b1 = bias_data + (ii + 1); const float* b2 = bias_data + (ii + 2); const float* b3 = bias_data + (ii + 3); const float* b4 = bias_data + (ii + 4); const float* b5 = bias_data + (ii + 5); const float* b6 = bias_data + (ii + 6); const float* b7 = bias_data + (ii + 7); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0[4] = k4[0]; tmp0[5] = k5[0]; tmp0[6] = k6[0]; tmp0[7] = k7[0]; tmp0 += 8; k0++; k1++; k2++; k3++; k4++; k5++; k6++; k7++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1[4] = f4[0]; tmp1[5] = f5[0]; tmp1[6] = f6[0]; tmp1[7] = f7[0]; tmp1 += 8; f0++; f1++; f2++; f3++; f4++; f5++; f6++; f7++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; tmp2[4] = b4[0]; tmp2[5] = b5[0]; tmp2[6] = b6[0]; tmp2[7] = b7[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; tmp2[4] = 0; tmp2[5] = 0; tmp2[6] = 0; tmp2[7] = 0; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 8; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 8; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + channel_count * 8; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 8; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 8; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc((unsigned long)outwh * (channel_count + 1) * 8 * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 8 * 9; float* btmp = bias_tmp + c * 8; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 8 * inwh + 8 * i * inw; float* itmp1 = img_tmp + c * 8 * inwh + 8 * (i + 1) * inw; float* itmp2 = img_tmp + c * 8 * inwh + 8 * (i + 2) * inw; float* otmp = output_tmp + c * 8 * outwh + 8 * i * outw; for (; j + 7 < outw; j += 8) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _sum4 = _mm256_loadu_ps(btmp); __m256 _sum5 = _mm256_loadu_ps(btmp); __m256 _sum6 = _mm256_loadu_ps(btmp); __m256 _sum7 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _va6 = _mm256_loadu_ps(itmp0 + 48); __m256 _va7 = _mm256_loadu_ps(itmp0 + 56); __m256 _va8 = _mm256_loadu_ps(itmp0 + 64); __m256 _va9 = _mm256_loadu_ps(itmp0 + 72); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _va6 = _mm256_loadu_ps(itmp1 + 48); _va7 = _mm256_loadu_ps(itmp1 + 56); _va8 = _mm256_loadu_ps(itmp1 + 64); _va9 = _mm256_loadu_ps(itmp1 + 72); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _va6 = _mm256_loadu_ps(itmp2 + 48); _va7 = _mm256_loadu_ps(itmp2 + 56); _va8 = _mm256_loadu_ps(itmp2 + 64); _va9 = _mm256_loadu_ps(itmp2 + 72); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); _mm256_storeu_ps(otmp + 32, _sum4); _mm256_storeu_ps(otmp + 40, _sum5); _mm256_storeu_ps(otmp + 48, _sum6); _mm256_storeu_ps(otmp + 56, _sum7); itmp0 += 64; itmp1 += 64; itmp2 += 64; otmp += 64; } for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _mm256_storeu_ps(otmp, _sum0); itmp0 += 8; itmp1 += 8; itmp2 += 8; otmp += 8; } } } // load_data { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 8 * outwh; float* tmp0 = output + i * 8 * outwh; float* tmp1 = output + i * 8 * outwh + 1 * outwh; float* tmp2 = output + i * 8 * outwh + 2 * outwh; float* tmp3 = output + i * 8 * outwh + 3 * outwh; float* tmp4 = output + i * 8 * outwh + 4 * outwh; float* tmp5 = output + i * 8 * outwh + 5 * outwh; float* tmp6 = output + i * 8 * outwh + 6 * outwh; float* tmp7 = output + i * 8 * outwh + 7 * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; tmp4[0] = otmp[4]; tmp5[0] = otmp[5]; tmp6[0] = otmp[6]; tmp7[0] = otmp[7]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; tmp4++; tmp5++; tmp6++; tmp7++; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* otmp = output_tmp + ii * outwh; float* tmp0 = output + ii * outwh; float* tmp1 = output + ii * outwh + 1 * outwh; float* tmp2 = output + ii * outwh + 2 * outwh; float* tmp3 = output + ii * outwh + 3 * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* otmp = output_tmp + channel_count * 8 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 8; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } static void convdw3x3s2(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 3; int channel_remain = inc - (channel_count << 3); // generate the image tmp float* img_tmp = ( float* )sys_malloc(8 * (unsigned long)inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(8 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 8; const float* k0 = img_data + (ii + 0) * inwh; const float* k1 = img_data + (ii + 1) * inwh; const float* k2 = img_data + (ii + 2) * inwh; const float* k3 = img_data + (ii + 3) * inwh; const float* k4 = img_data + (ii + 4) * inwh; const float* k5 = img_data + (ii + 5) * inwh; const float* k6 = img_data + (ii + 6) * inwh; const float* k7 = img_data + (ii + 7) * inwh; const float* f0 = kernel_data + (ii + 0) * 9; const float* f1 = kernel_data + (ii + 1) * 9; const float* f2 = kernel_data + (ii + 2) * 9; const float* f3 = kernel_data + (ii + 3) * 9; const float* f4 = kernel_data + (ii + 4) * 9; const float* f5 = kernel_data + (ii + 5) * 9; const float* f6 = kernel_data + (ii + 6) * 9; const float* f7 = kernel_data + (ii + 7) * 9; const float* b0 = bias_data + (ii + 0); const float* b1 = bias_data + (ii + 1); const float* b2 = bias_data + (ii + 2); const float* b3 = bias_data + (ii + 3); const float* b4 = bias_data + (ii + 4); const float* b5 = bias_data + (ii + 5); const float* b6 = bias_data + (ii + 6); const float* b7 = bias_data + (ii + 7); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0[4] = k4[0]; tmp0[5] = k5[0]; tmp0[6] = k6[0]; tmp0[7] = k7[0]; tmp0 += 8; k0++; k1++; k2++; k3++; k4++; k5++; k6++; k7++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1[4] = f4[0]; tmp1[5] = f5[0]; tmp1[6] = f6[0]; tmp1[7] = f7[0]; tmp1 += 8; f0++; f1++; f2++; f3++; f4++; f5++; f6++; f7++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; tmp2[4] = b4[0]; tmp2[5] = b5[0]; tmp2[6] = b6[0]; tmp2[7] = b7[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; tmp2[4] = 0; tmp2[5] = 0; tmp2[6] = 0; tmp2[7] = 0; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 8; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 8; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + channel_count * 8; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 8; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 8; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc((unsigned long)outwh * (channel_count + 1) * 8 * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 8 * 9; float* btmp = bias_tmp + c * 8; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 8 * inwh + 8 * i * 2 * inw; float* itmp1 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 1) * inw; float* itmp2 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 2) * inw; float* otmp = output_tmp + c * 8 * outwh + 8 * i * outw; for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _va6 = _mm256_loadu_ps(itmp0 + 48); __m256 _va7 = _mm256_loadu_ps(itmp0 + 56); __m256 _va8 = _mm256_loadu_ps(itmp0 + 64); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _va6 = _mm256_loadu_ps(itmp1 + 48); _va7 = _mm256_loadu_ps(itmp1 + 56); _va8 = _mm256_loadu_ps(itmp1 + 64); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _va6 = _mm256_loadu_ps(itmp2 + 48); _va7 = _mm256_loadu_ps(itmp2 + 56); _va8 = _mm256_loadu_ps(itmp2 + 64); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); itmp0 += 64; itmp1 += 64; itmp2 += 64; otmp += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _mm256_storeu_ps(otmp, _sum0); itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 8; } } } // load_data { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 8 * outwh; float* tmp0 = output + i * 8 * outwh; float* tmp1 = output + i * 8 * outwh + 1 * outwh; float* tmp2 = output + i * 8 * outwh + 2 * outwh; float* tmp3 = output + i * 8 * outwh + 3 * outwh; float* tmp4 = output + i * 8 * outwh + 4 * outwh; float* tmp5 = output + i * 8 * outwh + 5 * outwh; float* tmp6 = output + i * 8 * outwh + 6 * outwh; float* tmp7 = output + i * 8 * outwh + 7 * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; tmp4[0] = otmp[4]; tmp5[0] = otmp[5]; tmp6[0] = otmp[6]; tmp7[0] = otmp[7]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; tmp4++; tmp5++; tmp6++; tmp7++; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* otmp = output_tmp + ii * outwh; float* tmp0 = output + ii * outwh; float* tmp1 = output + ii * outwh + 1 * outwh; float* tmp2 = output + ii * outwh + 2 * outwh; float* tmp3 = output + ii * outwh + 3 * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* otmp = output_tmp + channel_count * 8 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 8; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } #elif __SSE2__ static void convdw3x3s1(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 2; int channel_remain = inc - (channel_count << 2); // generate the image tmp float* img_tmp = ( float* )sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(4 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 4; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 4; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 4; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 4 * inwh; float* tmp1 = kernel_tmp + channel_count * 4 * 9; float* tmp2 = bias_tmp + channel_count * 4; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 4; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 4; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 4 * 9; float* btmp = bias_tmp + c * 4; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 4 * inwh + 4 * i * inw; float* itmp1 = img_tmp + c * 4 * inwh + 4 * (i + 1) * inw; float* itmp2 = img_tmp + c * 4 * inwh + 4 * (i + 2) * inw; float* otmp = output_tmp + c * 4 * outwh + 4 * i * outw; for (; j + 7 < outw; j += 8) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _sum4 = _mm_loadu_ps(btmp); __m128 _sum5 = _mm_loadu_ps(btmp); __m128 _sum6 = _mm_loadu_ps(btmp); __m128 _sum7 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _va6 = _mm_loadu_ps(itmp0 + 24); __m128 _va7 = _mm_loadu_ps(itmp0 + 28); __m128 _va8 = _mm_loadu_ps(itmp0 + 32); __m128 _va9 = _mm_loadu_ps(itmp0 + 36); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _va6 = _mm_loadu_ps(itmp1 + 24); _va7 = _mm_loadu_ps(itmp1 + 28); _va8 = _mm_loadu_ps(itmp1 + 32); _va9 = _mm_loadu_ps(itmp1 + 36); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _va6 = _mm_loadu_ps(itmp2 + 24); _va7 = _mm_loadu_ps(itmp2 + 28); _va8 = _mm_loadu_ps(itmp2 + 32); _va9 = _mm_loadu_ps(itmp2 + 36); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); _mm_storeu_ps(otmp + 16, _sum4); _mm_storeu_ps(otmp + 20, _sum5); _mm_storeu_ps(otmp + 24, _sum6); _mm_storeu_ps(otmp + 28, _sum7); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum4[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum5[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum6[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum7[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 4] * ktmp[k]; sum1[k] += itmp1[k + 4] * ktmp[k + 12]; sum1[k] += itmp2[k + 4] * ktmp[k + 24]; sum1[k] += itmp0[k + 8] * ktmp[k + 4]; sum1[k] += itmp1[k + 8] * ktmp[k + 16]; sum1[k] += itmp2[k + 8] * ktmp[k + 28]; sum1[k] += itmp0[k + 12] * ktmp[k + 8]; sum1[k] += itmp1[k + 12] * ktmp[k + 20]; sum1[k] += itmp2[k + 12] * ktmp[k + 32]; sum2[k] += itmp0[k + 8] * ktmp[k]; sum2[k] += itmp1[k + 8] * ktmp[k + 12]; sum2[k] += itmp2[k + 8] * ktmp[k + 24]; sum2[k] += itmp0[k + 12] * ktmp[k + 4]; sum2[k] += itmp1[k + 12] * ktmp[k + 16]; sum2[k] += itmp2[k + 12] * ktmp[k + 28]; sum2[k] += itmp0[k + 16] * ktmp[k + 8]; sum2[k] += itmp1[k + 16] * ktmp[k + 20]; sum2[k] += itmp2[k + 16] * ktmp[k + 32]; sum3[k] += itmp0[k + 12] * ktmp[k]; sum3[k] += itmp1[k + 12] * ktmp[k + 12]; sum3[k] += itmp2[k + 12] * ktmp[k + 24]; sum3[k] += itmp0[k + 16] * ktmp[k + 4]; sum3[k] += itmp1[k + 16] * ktmp[k + 16]; sum3[k] += itmp2[k + 16] * ktmp[k + 28]; sum3[k] += itmp0[k + 20] * ktmp[k + 8]; sum3[k] += itmp1[k + 20] * ktmp[k + 20]; sum3[k] += itmp2[k + 20] * ktmp[k + 32]; sum4[k] += itmp0[k + 16] * ktmp[k]; sum4[k] += itmp1[k + 16] * ktmp[k + 12]; sum4[k] += itmp2[k + 16] * ktmp[k + 24]; sum4[k] += itmp0[k + 20] * ktmp[k + 4]; sum4[k] += itmp1[k + 20] * ktmp[k + 16]; sum4[k] += itmp2[k + 20] * ktmp[k + 28]; sum4[k] += itmp0[k + 24] * ktmp[k + 8]; sum4[k] += itmp1[k + 24] * ktmp[k + 20]; sum4[k] += itmp2[k + 24] * ktmp[k + 32]; sum5[k] += itmp0[k + 20] * ktmp[k]; sum5[k] += itmp1[k + 20] * ktmp[k + 12]; sum5[k] += itmp2[k + 20] * ktmp[k + 24]; sum5[k] += itmp0[k + 24] * ktmp[k + 4]; sum5[k] += itmp1[k + 24] * ktmp[k + 16]; sum5[k] += itmp2[k + 24] * ktmp[k + 28]; sum5[k] += itmp0[k + 28] * ktmp[k + 8]; sum5[k] += itmp1[k + 28] * ktmp[k + 20]; sum5[k] += itmp2[k + 28] * ktmp[k + 32]; sum6[k] += itmp0[k + 24] * ktmp[k]; sum6[k] += itmp1[k + 24] * ktmp[k + 12]; sum6[k] += itmp2[k + 24] * ktmp[k + 24]; sum6[k] += itmp0[k + 28] * ktmp[k + 4]; sum6[k] += itmp1[k + 28] * ktmp[k + 16]; sum6[k] += itmp2[k + 28] * ktmp[k + 28]; sum6[k] += itmp0[k + 32] * ktmp[k + 8]; sum6[k] += itmp1[k + 32] * ktmp[k + 20]; sum6[k] += itmp2[k + 32] * ktmp[k + 32]; sum7[k] += itmp0[k + 28] * ktmp[k]; sum7[k] += itmp1[k + 28] * ktmp[k + 12]; sum7[k] += itmp2[k + 28] * ktmp[k + 24]; sum7[k] += itmp0[k + 32] * ktmp[k + 4]; sum7[k] += itmp1[k + 32] * ktmp[k + 16]; sum7[k] += itmp2[k + 32] * ktmp[k + 28]; sum7[k] += itmp0[k + 36] * ktmp[k + 8]; sum7[k] += itmp1[k + 36] * ktmp[k + 20]; sum7[k] += itmp2[k + 36] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; otmp[k + 16] = sum4[k]; otmp[k + 20] = sum5[k]; otmp[k + 24] = sum6[k]; otmp[k + 28] = sum7[k]; } #endif itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 32; } for (; j + 3 < outw; j += 4) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 4] * ktmp[k]; sum1[k] += itmp1[k + 4] * ktmp[k + 12]; sum1[k] += itmp2[k + 4] * ktmp[k + 24]; sum1[k] += itmp0[k + 8] * ktmp[k + 4]; sum1[k] += itmp1[k + 8] * ktmp[k + 16]; sum1[k] += itmp2[k + 8] * ktmp[k + 28]; sum1[k] += itmp0[k + 12] * ktmp[k + 8]; sum1[k] += itmp1[k + 12] * ktmp[k + 20]; sum1[k] += itmp2[k + 12] * ktmp[k + 32]; sum2[k] += itmp0[k + 8] * ktmp[k]; sum2[k] += itmp1[k + 8] * ktmp[k + 12]; sum2[k] += itmp2[k + 8] * ktmp[k + 24]; sum2[k] += itmp0[k + 12] * ktmp[k + 4]; sum2[k] += itmp1[k + 12] * ktmp[k + 16]; sum2[k] += itmp2[k + 12] * ktmp[k + 28]; sum2[k] += itmp0[k + 16] * ktmp[k + 8]; sum2[k] += itmp1[k + 16] * ktmp[k + 20]; sum2[k] += itmp2[k + 16] * ktmp[k + 32]; sum3[k] += itmp0[k + 12] * ktmp[k]; sum3[k] += itmp1[k + 12] * ktmp[k + 12]; sum3[k] += itmp2[k + 12] * ktmp[k + 24]; sum3[k] += itmp0[k + 16] * ktmp[k + 4]; sum3[k] += itmp1[k + 16] * ktmp[k + 16]; sum3[k] += itmp2[k + 16] * ktmp[k + 28]; sum3[k] += itmp0[k + 20] * ktmp[k + 8]; sum3[k] += itmp1[k + 20] * ktmp[k + 20]; sum3[k] += itmp2[k + 20] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; } #endif itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 16; } for (; j < outw; j++) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _mm_storeu_ps(otmp, _sum0); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; } #endif itmp0 += 4; itmp1 += 4; itmp2 += 4; otmp += 4; } } } { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 4 * outwh; float* tmp0 = output + i * 4 * outwh; float* tmp1 = output + i * 4 * outwh + 1 * outwh; float* tmp2 = output + i * 4 * outwh + 2 * outwh; float* tmp3 = output + i * 4 * outwh + 3 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 4; tmp0++; tmp1++; tmp2++; tmp3++; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* otmp = output_tmp + channel_count * 4 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 4; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } static void convdw3x3s2(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 2; int channel_remain = inc - (channel_count << 2); // generate the image tmp float* img_tmp = ( float* )sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(4 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 4; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 4; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 4; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 4 * inwh; float* tmp1 = kernel_tmp + channel_count * 4 * 9; float* tmp2 = bias_tmp + channel_count * 4; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 4; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 4; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 4 * 9; float* btmp = bias_tmp + c * 4; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 4 * inwh + 4 * i * 2 * inw; float* itmp1 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 1) * inw; float* itmp2 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 2) * inw; float* otmp = output_tmp + c * 4 * outwh + 4 * i * outw; for (; j + 3 < outw; j += 4) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _va6 = _mm_loadu_ps(itmp0 + 24); __m128 _va7 = _mm_loadu_ps(itmp0 + 28); __m128 _va8 = _mm_loadu_ps(itmp0 + 32); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _va6 = _mm_loadu_ps(itmp1 + 24); _va7 = _mm_loadu_ps(itmp1 + 28); _va8 = _mm_loadu_ps(itmp1 + 32); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _va6 = _mm_loadu_ps(itmp2 + 24); _va7 = _mm_loadu_ps(itmp2 + 28); _va8 = _mm_loadu_ps(itmp2 + 32); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 8] * ktmp[k]; sum1[k] += itmp1[k + 8] * ktmp[k + 12]; sum1[k] += itmp2[k + 8] * ktmp[k + 24]; sum1[k] += itmp0[k + 12] * ktmp[k + 4]; sum1[k] += itmp1[k + 12] * ktmp[k + 16]; sum1[k] += itmp2[k + 12] * ktmp[k + 28]; sum1[k] += itmp0[k + 16] * ktmp[k + 8]; sum1[k] += itmp1[k + 16] * ktmp[k + 20]; sum1[k] += itmp2[k + 16] * ktmp[k + 32]; sum2[k] += itmp0[k + 16] * ktmp[k]; sum2[k] += itmp1[k + 16] * ktmp[k + 12]; sum2[k] += itmp2[k + 16] * ktmp[k + 24]; sum2[k] += itmp0[k + 20] * ktmp[k + 4]; sum2[k] += itmp1[k + 20] * ktmp[k + 16]; sum2[k] += itmp2[k + 20] * ktmp[k + 28]; sum2[k] += itmp0[k + 24] * ktmp[k + 8]; sum2[k] += itmp1[k + 24] * ktmp[k + 20]; sum2[k] += itmp2[k + 24] * ktmp[k + 32]; sum3[k] += itmp0[k + 24] * ktmp[k]; sum3[k] += itmp1[k + 24] * ktmp[k + 12]; sum3[k] += itmp2[k + 24] * ktmp[k + 24]; sum3[k] += itmp0[k + 28] * ktmp[k + 4]; sum3[k] += itmp1[k + 28] * ktmp[k + 16]; sum3[k] += itmp2[k + 28] * ktmp[k + 28]; sum3[k] += itmp0[k + 32] * ktmp[k + 8]; sum3[k] += itmp1[k + 32] * ktmp[k + 20]; sum3[k] += itmp2[k + 32] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; } #endif itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 16; } for (; j < outw; j++) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _mm_storeu_ps(otmp, _sum0); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; } #endif itmp0 += 8; itmp1 += 8; itmp2 += 8; otmp += 4; } } } { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 4 * outwh; float* tmp0 = output + i * 4 * outwh; float* tmp1 = output + i * 4 * outwh + 1 * outwh; float* tmp2 = output + i * 4 * outwh + 2 * outwh; float* tmp3 = output + i * 4 * outwh + 3 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 4; tmp0++; tmp1++; tmp2++; tmp3++; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* otmp = output_tmp + channel_count * 4 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 4; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } #else static void convdw3x3s1(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w, int out_h, int out_w, int num_thread) { int w = in_w; int h = in_h; int c_step_in = w * h; int outw = out_w; int outh = out_h; int c_step_out = outw * outh; const int group = channel; const float* kernel = _kernel; #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < group; g++) { float* out = output + g * c_step_out; float* outptr = out; float* outptr2 = outptr + outw; const float bias0 = _bias ? _bias[g] : 0.f; const float* kernel0 = kernel + g * 9; const float* img0 = input + g * c_step_in; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i + 1 < outh; i += 2) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; float sum2 = bias0; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr = sum; *outptr2 = sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w, int out_h, int out_w, int num_thread) { int w = in_w; int h = in_h; int c_step_in = w * h; int outw = out_w; int outh = out_h; int c_step_out = outw * outh; const int group = channel; const int tailstep = w - 2 * outw + w; const float* kernel = _kernel; #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < group; g++) { float* out = output + g * c_step_out; float* outptr = out; const float* kernel0 = kernel + g * 9; const float bias0 = _bias ? _bias[g] : 0.f; const float* img0 = input + g * c_step_in; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } #endif int conv_dw_run(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_priv_info* conv_info, struct conv_param* param, int num_thread, int cpu_affinity) { float* input = ( float* )input_tensor->data; float* output = ( float* )output_tensor->data; float* kernel = ( float* )weight_tensor->data; float* biases = NULL; if (bias_tensor) biases = ( float* )bias_tensor->data; int batch_number = input_tensor->dims[0]; int inc = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_chw = inc * inh * inw; int outc = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_chw = out_hw * outc; int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int stride_w = param->stride_w; int stride_h = param->stride_h; int dilation_w = param->dilation_w; int dilation_h = param->dilation_h; int group = param->group; int activation = param->activation; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; float* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input; else { input_tmp = ( float* )sys_malloc((size_t)inh_tmp * inw_tmp * group * sizeof(float)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < group; g++) { float* pad_in = input + g * inh * inw; float* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0.f); } } /* process */ for (int i = 0; i < batch_number; i++) { if (stride_h == 1) convdw3x3s1(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread); else convdw3x3s2(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread); } /* relu */ if (activation >= 0) relu(output, batch_number * out_chw, activation); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; }
l7_setup.c
/* * Copyright (c) 2011-2019, Triad National Security, LLC. * All rights Reserved. * * CLAMR -- LA-CC-11-094 * * Copyright 2011-2019. Triad National Security, LLC. This software was produced * under U.S. Government contract 89233218CNA000001 for Los Alamos National * Laboratory (LANL), which is operated by Triad National Security, LLC * for the U.S. Department of Energy. The U.S. Government has rights to use, * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR * TRIAD NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified * to produce derivative works, such modified software should be clearly marked, * so as not to confuse it with the version available from LANL. * * Additionally, redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the Triad National Security, LLC, Los Alamos * National Laboratory, LANL, the U.S. Government, nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE TRIAD NATIONAL SECURITY, LLC AND * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TRIAD NATIONAL * SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "l7.h" #include "l7p.h" #include <stdlib.h> #define L7_LOCATION "L7_SETUP" int L7_Setup( const int num_base, const int my_start_index, const int num_indices_owned, int *indices_needed, const int num_indices_needed, int *l7_id ) { /* Purpose * ======= * L7_Setup is used to setup the update/scatter database as * defined by the global indexing scheme. Each process passes * in parameters which define the indices it owns (i.e. as * defined by 'my_start_index' and 'num_indices_owned') and * lists the indices it needs ('indices_needed'). From this, * a database is defined that allows subsequent calls to * L7_Update. * * Notes: * ====== * 1) Assumes a global indexing set, linearly decomposed across * all processes. * * Arguments * ========= * num_base (input) const L7_INT * global indexing set starts with 1 (Fortran) * or with 0 (C) * * my_start_index (input) const L7_INT * Starting index number of calling process * in global indexing set. * * num_indices_owned (input) const L7_INT * Number of indices owned by calling process. * * indices_needed (input) const L7_INT* * Array containing indices needed by * calling process. * * num_indices_needed (input) const L7_INT * Number of indices of interest listed * in array 'num_indices_needed'. * * l7_id (input/output) int* * Handle to database to be setup. * * 0: L7 sets up a new database, and * assigns it a value. * > 0: L7 resets existing database with * input information. That is, it reuses * the allocated memory. * < 0: An error is returned. * * Notes: * ===== * 1) The handling of 0-based arrays for C and 1-based arrays for Fortran * is handled in L7_Setup. This is done by taking the input global * indices stored in 'indices_global_to_send' and converting them to * 1-based and storing them in 'indices_local_to_send'. * * 2) The indices are handled as 4-byte integers. * * 3) Serial compilation creates a no-op. * * Program Flow * ============ * 0) Check input for basic validity. * 1) Set communication parameters within database. * 2) Deternine processes this pe receives from. * 3) Determine the number of processes this pe sends to. * 4) Send number of as well as the indices needed from each sending process. * 5) Set up array containing the pes this pe sends indices to. * 6) Set up array containing the indices this pe sends to others. */ /* * Local variables. */ int ierr; /* Error code for return */ #ifdef HAVE_MPI int base_adj, /* 0 or 1 based arrays adjustment */ count_total, i, j, /* Counters */ max_sizeof_type, num_msgs, /* Number of sends and recvs needed */ numpes, /* Alias for l7_id_db.numpes. */ num_indices_acctd_for, num_outstanding_requests = 0, num_sends, offset, penum, /* Alias for l7_id_db.penum. */ *pi4_in, /* (int *)l7.receive_buffer */ *pi4_out, /* (int *)l7.send_buffer */ send_buffer_bytes_needed, /* Buffer space requirement. */ start_indices_needed, this_index; /* Offset into indexing set. */ l7_id_database *l7_id_db; MPI_Request *mpi_request; /* Local alias for l7_id_db->mpi_request. */ MPI_Status *mpi_status; /* Local alias for l7_id_db->mpi_status. */ #if defined (_L7_DEBUG) int k; /* Counter */ #endif /* * Executable Statements */ if (! l7.mpi_initialized){ return(0); } if (l7.initialized != 1){ ierr = -1; L7_ASSERT( l7.initialized == 1, "L7 not initialized", ierr); } /* * Check input */ if (num_base){ base_adj = 1; } else { base_adj = 0; } if (my_start_index < 0){ ierr = -1; L7_ASSERT( my_start_index >= 0, "my_start_index < 0", ierr); } if (num_indices_owned < 0){ ierr = -1; L7_ASSERT( num_indices_owned >= 0, "num_indices_owned < 0", ierr); } if (num_indices_needed > 0){ if (indices_needed == NULL){ ierr = -1; L7_ASSERT( (int *)indices_needed != NULL, "indices_needed == NULL", ierr); } } if (*l7_id < 0){ ierr = *l7_id; L7_ASSERT( *l7_id >=0, "L7 Id must be either 0 (new id) or > 0 (existing id)", ierr); } /* * Setup database structure. */ if (*l7_id != 0){ /* * Find it in the database and update based on new input. */ if (l7.first_db == NULL){ L7_ASSERT(l7.first_db != NULL, "Uninitialized l7_id input, but no ids in database", ierr); } l7_id_db = l7.first_db; while (l7_id_db){ if (l7_id_db->l7_id == *l7_id) break; l7_id_db = l7_id_db->next_db; } if (l7.first_db == NULL){ ierr = -1; L7_ASSERT( l7.first_db != NULL, "Uninitialized l7_id input, but not found in this list", ierr); } } else{ /* * Allocate new database, insert into linked list. */ if (l7.num_dbs >= L7_MAX_NUM_DBS){ ierr = -1; L7_ASSERT(l7.num_dbs < L7_MAX_NUM_DBS, "Too many L7 databases allocataed", ierr); } l7_id_db = (l7_id_database*)calloc(1L, sizeof(l7_id_database) ); if (l7_id_db == NULL){ ierr = -1; L7_ASSERT( l7_id_db != NULL, "Failed to allocate new database", ierr); } if ( !(l7.first_db) ){ l7.first_db = l7_id_db; l7.last_db = l7_id_db; l7_id_db->next_db = NULL; /* Paranoia */ l7_id_db->l7_id = 1; l7.num_dbs = 1; } else{ /* * Assign a l7_id. */ l7_id_db->l7_id = l7.last_db->l7_id + 1; /* * Reset links. */ l7.last_db->next_db = l7_id_db; l7.last_db = l7_id_db; l7.num_dbs++; } *l7_id = l7_id_db->l7_id; /* * Initialize some parameters. */ l7_id_db->recv_counts_len = 0; l7_id_db->recv_from_len = 0; l7_id_db->send_to_len = 0; l7_id_db->send_counts_len = 0; l7_id_db->indices_to_send_len = 0; l7_id_db->mpi_request_len = 0; l7_id_db->mpi_status_len = 0; } /* * Store input in database. */ l7_id_db->my_start_index = my_start_index; l7_id_db->num_indices_owned = num_indices_owned; if ( (l7_id_db->indices_needed_len < num_indices_needed ) && (num_indices_needed > 0) ){ if (l7_id_db->indices_needed) free(l7_id_db->indices_needed); l7_id_db->indices_needed = (int *)calloc((unsigned long long)num_indices_needed, sizeof(int) ); if (l7_id_db->indices_needed == NULL){ ierr = -1; L7_ASSERT( (int*)(l7_id_db->indices_needed) != NULL, "Memory failure for indices_needed", ierr); } l7_id_db->indices_needed_len = num_indices_needed; } #ifdef _OPENMP #pragma omp parallel for #else #pragma omp simd #endif for (i=0; i<num_indices_needed; i++){ l7_id_db->indices_needed[i] = indices_needed[i]; } l7_id_db->num_indices_needed = num_indices_needed; ierr = MPI_Comm_rank (MPI_COMM_WORLD, &l7_id_db->penum ); L7_ASSERT( ierr == MPI_SUCCESS, "MPI_Comm_rank", ierr); ierr = MPI_Comm_size (MPI_COMM_WORLD, &l7_id_db->numpes ); L7_ASSERT( ierr == MPI_SUCCESS, "MPI_Comm_size", ierr); l7.penum = l7_id_db->penum; /* Local shorthand */ numpes = l7_id_db->numpes; penum = l7_id_db->penum; if (numpes == 1){ return(0); } /* * Create array containing starting (global) index numbers * for all processes. * * 1) Allgather num_indices_owned. * 2) Scan to create starting_index. * 3) Shift all array elements up 1 position. * 4) Set starting_indices[0] = 0. * * The latter two steps allows arrays to be used as below. */ l7_id_db->starting_indices = (int *)calloc((unsigned long long)(numpes+1), sizeof(int)); if(l7_id_db->starting_indices == NULL){ ierr = -1; L7_ASSERT(l7_id_db->starting_indices != NULL, "No memory for l7_id_db->starting_indices", ierr); } ierr = MPI_Allgather( &(l7_id_db->num_indices_owned), 1, MPI_INT, &(l7_id_db->starting_indices[1]), 1, MPI_INT, MPI_COMM_WORLD); L7_ASSERT( ierr == MPI_SUCCESS, "MPI_Allgather (num_indices_owned)", ierr); l7_id_db->starting_indices[0] = 0; // l7_id_db->starting_indices[0] = 1; for (i=0; i<numpes; i++) l7_id_db->starting_indices[i+1] += l7_id_db->starting_indices[i]; /* * Determine the number of processes this pe receives from. */ l7_id_db->num_recvs = 0; start_indices_needed = -1; this_index = 0; if (num_indices_needed > 0){ for (j=0; j<numpes; j++){ if ( indices_needed[this_index] >= l7_id_db->starting_indices[j]){ if (indices_needed[this_index] < l7_id_db->starting_indices[j+1]){ l7_id_db->num_recvs++; #if defined _L7_DEBUG printf("[pe %d] Found first one on pe %d. \n", penum, j); #endif /* Skip through all the rest on pe j. */ /* SKG - Update order to silence valgrind. Don't know if * this is okay... */ while ( ( this_index < num_indices_needed) && ( indices_needed[this_index] < l7_id_db->starting_indices[j+1] ) ) this_index++; /* Remember where we found the first one. */ if ( start_indices_needed == -1) start_indices_needed = j; if (this_index == num_indices_needed) break; } } } if (l7_id_db->num_recvs == 0){ ierr = -1; L7_ASSERT(l7_id_db->num_recvs != 0, "No indices found", ierr); } } if (this_index != num_indices_needed){ printf("[pe %d] ERROR -- can't find all the indices I need. I have %d, need %d\n", penum, this_index, num_indices_needed); } #if defined _L7_DEBUG printf("[pe %d] l7_id_dp->num_recvs = %d\n", penum, l7_id_db->num_recvs); #endif /* * Allocate space for counts for each pe sending to this one. */ if (l7_id_db->num_recvs > l7_id_db->recv_counts_len){ if (l7_id_db->recv_counts) free(l7_id_db->recv_counts); l7_id_db->recv_counts = (int *)calloc((unsigned long long)l7_id_db->num_recvs, sizeof(int) ); if (l7_id_db->recv_counts == NULL){ ierr = -1; L7_ASSERT(l7_id_db->recv_counts != NULL, "No space for l7_id_db->recv_counts", ierr); } l7_id_db->recv_counts_len = l7_id_db->num_recvs; int num_recvs = l7_id_db->num_recvs; // for vectorization #ifdef _OPENMP #pragma omp parallel for #endif for (i=0; i<num_recvs; i++) l7_id_db->recv_counts[i] = 0; /* calloc does not guarantee = 0. */ } if (l7_id_db->num_recvs > l7_id_db->recv_from_len){ if (l7_id_db->recv_from) free(l7_id_db->recv_from); l7_id_db->recv_from = (int *)calloc((unsigned long long)l7_id_db->num_recvs, sizeof(int) ); if (l7_id_db->recv_from == NULL){ ierr = -1; L7_ASSERT(l7_id_db->recv_from != NULL, "No space for l7_id_db->recv_from", ierr); } l7_id_db->recv_from_len = l7_id_db->num_recvs; int num_recvs = l7_id_db->num_recvs; // for vectorization #ifdef _OPENMP #pragma omp parallel for #endif for (i=0; i<num_recvs; i++) l7_id_db->recv_from[i] = -999; } /* * Determine process and the number of indices this pe recvs from it. */ if (num_indices_needed > 0){ this_index = 0; num_indices_acctd_for = 0; i=0; for (j=start_indices_needed; j<numpes; j++){ if (indices_needed[this_index] >= l7_id_db->starting_indices[j] ){ if (indices_needed[this_index] < l7_id_db->starting_indices[j+1]){ /* Found the first one on pe j. */ l7_id_db->recv_from[i] = j; l7_id_db->recv_counts[i] = 1; num_indices_acctd_for++; if (num_indices_acctd_for == num_indices_needed) break; this_index++; /* SKG - Update order to silence valgrind. Don't know if * this is okay... */ while ( ( num_indices_acctd_for < num_indices_needed ) && ( indices_needed[this_index] < l7_id_db->starting_indices[j+1] )) { /* Find the rest on pe j. */ l7_id_db->recv_counts[i]++; this_index++; num_indices_acctd_for++; } if (num_indices_acctd_for == num_indices_needed) break; i++; } } } if (num_indices_needed != num_indices_acctd_for){ ierr = -1; L7_ASSERT(num_indices_needed == num_indices_acctd_for, "Failed to find all the needed indices", ierr); } } /* * Determine number of processes for which this pe owns indices * those pes need. This is done use a reduction (MPI_Allreduce). */ if (l7.sizeof_send_buffer < numpes * (int)sizeof(int)){ if (l7.send_buffer) free(l7.send_buffer); l7.send_buffer = calloc ((unsigned long long)(2*numpes), sizeof(int)); if (l7.send_buffer == NULL){ ierr = -1; L7_ASSERT(l7.send_buffer != NULL, "No memory for send buffer", ierr); } l7.sizeof_send_buffer = 2 * numpes * (int)sizeof(int); } pi4_in = (int*)l7.send_buffer; pi4_out = &pi4_in[numpes]; for (i=0; i<numpes; i++) pi4_in[i] = 0; for (i=0; i<l7_id_db->num_recvs; i++) pi4_in[l7_id_db->recv_from[i]] = 1; ierr = MPI_Allreduce(pi4_in, pi4_out, numpes, MPI_INT, MPI_SUM, MPI_COMM_WORLD); L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Allreduce ( l7_id_db->recv_from )", ierr); l7_id_db->num_sends = pi4_out[penum]; #if defined _L7_DEBUG printf("[pe %d] l7_id_db->num_sends = %d \n", penum, l7_id_db->num_sends); #endif /* * Allocate request and status arrays. */ num_msgs = ( 2 * l7_id_db->num_recvs ) + l7_id_db->num_sends; /* Ensure enough outstanding messages for L7_Update_pack model. */ if (num_msgs < (L7_MIN_MPI_REQS * l7_id_db->num_recvs ) ) num_msgs = L7_MIN_MPI_REQS * l7_id_db->num_recvs; if (num_msgs > l7_id_db->mpi_request_len) { if (l7_id_db->mpi_request) free(l7_id_db->mpi_request); l7_id_db->mpi_request = (MPI_Request *) calloc ((unsigned long long)num_msgs, sizeof(MPI_Request)); if (l7_id_db->mpi_request == NULL){ ierr = -1; L7_ASSERT(l7_id_db->mpi_request != NULL, "Allocation of l7_id_db->mpi_request failed", ierr); } l7_id_db->mpi_request_len = num_msgs; } if (num_msgs > l7_id_db->mpi_status_len){ if (l7_id_db->mpi_status) free(l7_id_db->mpi_status); l7_id_db->mpi_status = (MPI_Status *) calloc((unsigned long long)num_msgs, sizeof(MPI_Status) ); if (l7_id_db->mpi_status == NULL){ ierr = -1; L7_ASSERT(l7_id_db->mpi_status != NULL, "Allocation of l7_id_db->mpi_status failed", ierr); } l7_id_db->mpi_status_len = num_msgs; } /* Local shorthand */ mpi_request = l7_id_db->mpi_request; mpi_status = l7_id_db->mpi_status; /* * Send number of indices needed from each sending process. */ num_outstanding_requests = 0; for (i=0; i<l7_id_db->num_recvs; i++){ #if defined _L7_DEBUG printf("[pe %d] recv_counts[%d] = %d to pe %d \n", penum, i, l7_id_db->recv_counts[i], l7_id_db->recv_from[i] ); #endif ierr = MPI_Isend(&l7_id_db->recv_counts[i], 1, MPI_INT, l7_id_db->recv_from[i], L7_SETUP_SEND_COUNT_TAG, MPI_COMM_WORLD, &mpi_request[num_outstanding_requests++] ); L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Isend (recv_counts[i] )", ierr); } /* * Receive counts for the processes to which this pe sends. * This pe doesn't know who needs what it has, so we must * use wildcard receives. */ if (l7_id_db->num_sends > l7_id_db->send_counts_len){ if (l7_id_db->send_counts) free(l7_id_db->send_counts); l7_id_db->send_counts = (int *) calloc((unsigned long long)l7_id_db->num_sends, sizeof(int) ); if (l7_id_db->send_counts == NULL){ ierr = -1; L7_ASSERT(l7_id_db->send_counts != NULL, "Failed to allocate l7_id_db->send_counts", ierr); } l7_id_db->send_counts_len = l7_id_db->num_sends; } if (l7_id_db->num_sends > l7_id_db->send_to_len){ if (l7_id_db->send_to) free(l7_id_db->send_to); l7_id_db->send_to = (int *) calloc((unsigned long long)l7_id_db->num_sends, sizeof(int) ); if (l7_id_db->send_to == NULL){ ierr = -1; L7_ASSERT(l7_id_db->send_to != NULL, "Failed to allocate l7_id_db->send_to", ierr); } l7_id_db->send_to_len = l7_id_db->num_sends; } for (i=0; i<l7_id_db->num_sends; i++){ ierr = MPI_Irecv(&l7_id_db->send_counts[i], 1, MPI_INT, MPI_ANY_SOURCE, L7_SETUP_SEND_COUNT_TAG, MPI_COMM_WORLD, &mpi_request[num_outstanding_requests++] ); L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Irecv ( indices_needed[i] )", ierr); } if (num_outstanding_requests > 0){ ierr = MPI_Waitall(num_outstanding_requests, mpi_request, mpi_status); L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Waitall ( counts )", ierr); } num_outstanding_requests = 0; /* * Determine which processes sent the above messages. * These are the 'send_to' processes. */ offset = l7_id_db->num_recvs; for (i=0; i<l7_id_db->num_sends; i++){ l7_id_db->send_to[i] = mpi_status[offset+i].MPI_SOURCE; } /* * Allocate space for 'indices_global_to_send' and * 'indices_local_to_send'. */ count_total = 0; for (i=0; i<l7_id_db->num_sends; i++){ count_total += l7_id_db->send_counts[i]; } if (count_total > l7_id_db->indices_to_send_len){ if (l7_id_db->indices_global_to_send) free(l7_id_db->indices_global_to_send); l7_id_db->indices_global_to_send = (int *) calloc((unsigned long long)count_total, sizeof(int) ); if (l7_id_db->indices_global_to_send == NULL){ ierr = -1; L7_ASSERT(l7_id_db->indices_global_to_send != NULL, "No memory for l7_id_db->indices_global_to_send.", ierr); } if (l7_id_db->indices_local_to_send) free(l7_id_db->indices_local_to_send); l7_id_db->indices_local_to_send = (int *) calloc((unsigned long long)count_total, sizeof(int) ); if (l7_id_db->indices_local_to_send == NULL){ ierr = -1; L7_ASSERT(l7_id_db->indices_local_to_send != NULL, "No memory for l7_id_db->indices_local_to_send.", ierr); } l7_id_db->indices_to_send_len = count_total; } /* * Send (global) indices needed from each sending process. */ offset = 0; for (i=0; i<l7_id_db->num_recvs; i++){ #if defined _L7_DEBUG printf("[pe %d] Sending %d indices to pe %d. \n", penum, l7_id_db->recv_counts[i], l7_id_db->recv_from[i] ); for (k=offset; k<offset+l7_id_db->recv_counts[i]; k++){ printf(" index[%d] = %d \n", k, l7_id_db->indices_needed[k] ); } #endif ierr = MPI_Isend(&l7_id_db->indices_needed[offset], l7_id_db->recv_counts[i], MPI_INT, l7_id_db->recv_from[i], L7_SETUP_INDICES_NEEDED_TAG, MPI_COMM_WORLD, &mpi_request[num_outstanding_requests++] ); L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Isend ( indices_needed[i] )", ierr); offset+=l7_id_db->recv_counts[i]; } /* * Receive (global) indices needed by the pes to which this pe sends. * Note that these receives are from expected sources. */ offset = 0; for (i=0; i<l7_id_db->num_sends; i++){ ierr = MPI_Irecv(&l7_id_db->indices_global_to_send[offset], l7_id_db->send_counts[i], MPI_INT, l7_id_db->send_to[i], L7_SETUP_INDICES_NEEDED_TAG, MPI_COMM_WORLD, &mpi_request[num_outstanding_requests++] ); L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Irecv ( indices_global_to_send )", ierr); offset += l7_id_db->send_counts[i]; } /* * Complete indices communication. */ if (num_outstanding_requests > 0){ ierr = MPI_Waitall(num_outstanding_requests, mpi_request, mpi_status ); L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Waitall ( indices )", ierr); } #if defined _L7_DEBUG ierr = MPI_Barrier(MPI_COMM_WORLD); offset = 0; for (j=0; j<numpes; j++){ if (penum == j){ for (i=0; i<l7_id_db->num_sends; i++){ printf("[pe %d] Recvd %d indices from pe %d. \n", penum, l7_id_db->send_counts[i], l7_id_db->send_to[i] ); for (k=offset; k<offset+l7_id_db->send_counts[i]; k++){ printf(" index[%d] = %d \n",k l7_id_db->indices_global_to_send[k] ); } offset += l7_id_db->send_counts[i]; } } sleep(1); } #endif /* Create array of local indices corresponding to * array of global indices requested. Note the * conversion from 1-based indices to 0-based is * accomplished here. (See note in header). */ offset = 0; for (i=0; i<l7_id_db->num_sends; i++){ int counts = l7_id_db->send_counts[i]; // for vectorization int adj = (int)(my_start_index) - base_adj; // for vectorization #ifdef _OPENMP #pragma omp parallel for #else #pragma omp simd #endif for (j=0; j<counts; j++){ l7_id_db->indices_local_to_send[offset+j] = l7_id_db->indices_global_to_send[offset+j] - adj; } offset += counts; } #if defined _L7_DEBUG ierr = MPI_Barrier(MPI_COMM_WORLD); for (i=0; i<numpes; i++){ if (penum == i){ for (j=0; j<l7_id_db->num_sends; j++){ printf("[pe %d] send %d indices to pe %d \n", penum, l7_id_db->send_counts[j], l7_id_db->send_to[] ); ierr = MPI_Barrier(MPI_COMM_WORLD); } } } flush(stdout); ierr = MPI_Barrier(MPI_COMM_WORLD); L7_ASSERT(ierr == MPI_SUCCESS, "MPI_Barrier failure", ierr); for (i=0; i<numpes; i++){ if (penum == i){ printf("----------------------------------------------------\n") for (j=0; j<l7_id_db->num_sends; j++){ printf("[pe %d] Send (index %d) to pe %d. \n",penum, l7_id_db->indices_global_to_send[j], l7_id_db->send_to[j] ); } for (j=0; j<l7_id_db->num_recvs; j++){ printf("[pe %d] Recving (index %d) from pe %d. \n",penum, l7_id_db->indices_needed[j], l7_id_db->recv_from[j] ); } printf("----------------------------------------------------\n") fflush(stdout); } sleep(2); } #endif /* _L7_DEBUG */ /* * Ensure buffer available for data to be sent. */ send_buffer_bytes_needed = 0; num_sends = l7_id_db->num_sends; max_sizeof_type = sizeof(double); for (i=0; i<num_sends; i++) send_buffer_bytes_needed += l7_id_db->send_counts[i] * max_sizeof_type; if (send_buffer_bytes_needed > l7.sizeof_send_buffer ){ if (l7.send_buffer) free(l7.send_buffer); l7.send_buffer = (char *)calloc((unsigned long long)send_buffer_bytes_needed, sizeof (char) ); if (l7.send_buffer == NULL){ ierr = -1; L7_ASSERT(l7.send_buffer != NULL, "No memory for send buffer", ierr); } l7.sizeof_send_buffer = send_buffer_bytes_needed; } /* * Message tag management */ l7_id_db->this_tag_update = L7_UPDATE_TAGS_MIN; /* * Database is setup for this l7_id -- return. */ #endif /* HAVE_MPI */ ierr = L7_OK; return(ierr); } /* End L7_Setup */ void L7_SETUP( const int *my_start_index, const int *num_indices_owned, int *indices_needed, const int *num_indices_needed, int *l7_id ) { L7_Setup(0, *my_start_index, *num_indices_owned, indices_needed, *num_indices_needed, l7_id); }
example2.c
// calculation example for electromagnetic field intensity distribution #include "multi_fbeam.h" int main() { Bobj bm; FILE *fp1,*fp2; double complex e[3],h[3]; double x[3],rang,dr,*ie,*ih; int max,i,j; init_mfb(&bm); read_data_mfb(&bm); print_data_mfb(&bm); setup_mfb(&bm); max=200; rang=4.0*bm.lambda_0; dr=rang*2/(double)(max-1); ie=(double *)m_alloc2(max,sizeof(double),"example2.c,ie"); ih=(double *)m_alloc2(max,sizeof(double),"example2.c,ih"); // x=0 plane if((fp1=fopen("Ie_yz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","# y z electric_field_intensity"); if((fp2=fopen("Ih_yz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","# y z magnetic_field_intensity"); x[0]=0.0; for(i=0;i<max;i++){ x[1]=-rang+(double)i*dr; #pragma omp parallel for schedule(dynamic) firstprivate(x) private(e,h) // omp parallel for(j=0;j<max;j++){ x[2]=-rang+(double)j*dr; calc_mfb_EH(e,h,x,&bm); ie[j]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[j]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); } for(j=0;j<max;j++){ x[2]=-rang+(double)j*dr; fprintf(fp1,"%g %g %15.14e\n",x[1],x[2],ie[j]); fprintf(fp2,"%g %g %15.14e\n",x[1],x[2],ih[j]); } fprintf(fp1,"\n"); fprintf(fp2,"\n"); } fclose(fp1); fclose(fp2); // y=0 plane if((fp1=fopen("Ie_xz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","# x z electric_field_intensity"); if((fp2=fopen("Ih_xz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","# x z magnetic_field_intensity"); x[1]=0.0; for(i=0;i<max;i++){ x[0]=-rang+(double)i*dr; #pragma omp parallel for schedule(dynamic) firstprivate(x) private(e,h) // omp parallel for(j=0;j<max;j++){ x[2]=-rang+(double)j*dr; calc_mfb_EH(e,h,x,&bm); ie[j]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[j]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); }// end parallel for(j=0;j<max;j++){ x[2]=-rang+(double)j*dr; fprintf(fp1,"%g %g %15.14e\n",x[0],x[2],ie[j]); fprintf(fp2,"%g %g %15.14e\n",x[0],x[2],ih[j]); } fprintf(fp1,"\n"); fprintf(fp2,"\n"); } fclose(fp1); fclose(fp2); // z=0 plane if((fp1=fopen("Ie_xy.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","# x y electric_field_intensity"); if((fp2=fopen("Ih_xy.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","# x y electric_field_intensity"); x[2]=0.0; for(i=0;i<max;i++){ x[0]=-rang+(double)i*dr; #pragma omp parallel for schedule(dynamic) firstprivate(x) private(e,h) // omp parallel for(j=0;j<max;j++){ x[1]=-rang+(double)j*dr; calc_mfb_EH(e,h,x,&bm); ie[j]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[j]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); } for(j=0;j<max;j++){ x[1]=-rang+(double)j*dr; fprintf(fp1,"%g %g %15.14e\n",x[0],x[1],ie[j]); fprintf(fp2,"%g %g %15.14e\n",x[0],x[1],ih[j]); } fprintf(fp1,"\n"); fprintf(fp2,"\n"); } fclose(fp1); fclose(fp2); printf("intensity plot is finished\n"); free(ie); free(ih); free_mfb(&bm); return 0; }
RefTraceTools.h
/////////////////////////////////////////////////////////////////////////////// // SOFTWARE COPYRIGHT NOTICE AGREEMENT // // This software and its documentation are copyright (2013) by the // // Broad Institute. All rights are reserved. This software is supplied // // without any warranty or guaranteed support whatsoever. The Broad // // Institute is not responsible for its use, misuse, or functionality. // /////////////////////////////////////////////////////////////////////////////// #ifndef REFTRACE_TOOLS_H #define REFTRACE_TOOLS_H // MakeDepend: library OMP // MakeDepend: cflags OMP_FLAGS #include "Basevector.h" #include "CoreTools.h" #include "paths/HyperBasevector.h" #include "pairwise_aligners/SmithWatAffine.h" #include "pairwise_aligners/SmithWatBandedA.h" #include "paths/long/MakeKmerStuff.h" #include "PrintAlignment.h" #include "paths/long/RefTrace.h" // Create a HyperBasevector hbp that equals hb plus its reverse complement. // However only do this for components that need it. void CreateHBPlus(const HyperBasevector& hb, const vec<int>& inv, HyperBasevector& hbp, vec<pair<int,Bool>>& hbp_to_hb); // Linearized reference sequences. Expand the paths from the source to the sink // of the reference graph, built a vecbasevector of expended sequence, and // record the origion of of the chromosome id. class LinearRef { public: LinearRef(const vec<HyperBasevector>& GH,const vec<bool>& c=vec<bool>()); int N() const { return G.size(); } int Source(int g) const {return G_source[g]; } bool IsDoubled(int g) const {return isDoubled[g]; } const basevector& Seq(int g) const { return G[g]; } const vecbasevector& Seqs() const { return G; } private: vec<int> G_source; vecbasevector G; vec<bool> isDoubled; }; // Some data structures. // The structure vedata has the following structure: // { (genome_tig_id, start_pos_on_genome_tig, left_vertex_of_edge_in_hbp ), // (genome_tig_id, stop_pos_on_genome_tig-K+1, right_vertex_of_edge_in_hbp ), // (hbp_edge_id, error_count), // (start_pos_on_hbp_edge, stop_pos_on_hbp_edge) }. class EdgePlacements { public: EdgePlacements(const HyperBasevector& hbp, const vec<pair<int,Bool>>& hbp_to_hb, const vecbasevector& G) : hbp(hbp), hbp_to_hb(hbp_to_hb), G(G) {} // Align edges of hbp to reference. template<int L> void AlignEdgesToRef( // heuristics: const double min_cov_frac, const double max_error_rate, const int max_offset_diff, const double min_group_frac, const int offset_add, const int min_group_save, const Bool fix_bug, // logging: bool REFTRACE_VARIANTS, const int verbosity, ostream& out ); template <int L> void AlignEdgesToRefExp(const int verbosity, ostream& out); void RemoveBadPlacements(); void Twiddle(const int max_twiddle); void TwiddleSmart(); // Generate the matching sequences from the best path. basevector BestSeq(const vec<int>& best_path, const vec<int>& eids , const vec<std::pair<int,int>>& limits , vec<std::tuple<int64_t,int64_t,int,int64_t,int64_t,int64_t,int64_t>>& coors_edge); public: const HyperBasevector& hbp; const vec<pair<int,Bool>>& hbp_to_hb; const vecbasevector& G; vec< quad< triple<int,int,int>, triple<int,int,int>, pair<int,int>, pair<int,int> > > vedata; vec<align> aligns; vec<int> xto_left, xto_right; private: int CorrelatePositionsAlways(const align& a, const int x1)const; }; class GraphZ { public: typedef int (*PenaltyFuncT)(int, int, int); GraphZ(const EdgePlacements& ep, PenaltyFuncT pf) : edge_placements(ep), hbp(ep.hbp), hbp_to_hb(ep.hbp_to_hb), G(ep.G) { Penalty = pf; } void FindShortestPath(const int min_dist, const int max_dist, vec< vec<int> >& spaths, vec< triple<int,int,int> >& spaths_egd, vec< pair<int,int> >& spaths_gg_pen, ostream& out, int verbosity = 0); // Find the corresponding best path in hbp edges. void FindBestEdgePath( const vec< triple<int,int,int> >& spaths_egd, const vec< vec<int> >& spaths, vec<vec<int>>& best_path, vec<vec<int>>& eids, int& best_g) ; public: const EdgePlacements& edge_placements; const HyperBasevector& hbp; const vec<pair<int,Bool>>& hbp_to_hb; const vecbasevector& G; PenaltyFuncT Penalty; vec< triple<int,int,int> > verts; vec< triple< int, int, pair<int,int> > > edges; vec< triple<int,int,int> > egd; digraphE<int> Z; private: void BuildGraph(const int verbosity, ostream& out); void AddGapEdges(const int min_dist, const int max_dist, const int verbosity, ostream& out ,const bool bPreserveDisconnectedComponents=false); void AddConnectedGapEdges(const int min_dist, const int max_dist, const int verbosity, ostream& out ,const bool bPreserveDisconnectedComponents=false); void AddSpecialVerts( const int K, const vec<int>& sources, const vec<int>& sinks, const bool bPreserveDisconnectedComponents=false); void AnnouncePaths( const vec< vec<int> >& spaths, const int K, const vec< triple<int,int,int> >& spaths_egd, const int verbosity, ostream& out ) const; void FindShortestPathBetween( const int this_source, const int this_sink, const digraphE<int>& ZS, const vec<int>& suc, vec< vec<int> >& spaths, vec< triple<int,int,int> >& spaths_egd, vec< pair<int,int> >& spaths_gg_pen, const int verbosity, ostream& out ) const; void MakeZDot(ostream& os); }; template<int L> void EdgePlacements::AlignEdgesToRef( const double min_cov_frac, const double max_error_rate, const int max_offset_diff, const double min_group_frac, const int offset_add, const int min_group_save, const Bool fix_bug, // logging: bool REFTRACE_VARIANTS, const int verbosity, ostream& out ) { // Setup for alignment. vecbasevector all(G); vec< triple<kmer<L>,int,int> > kmers_plus; MakeKmerLookup0( all, kmers_plus ); vec< kmer<L> > kmers( kmers_plus.size( ) ); for ( int64_t i = 0; i < kmers_plus.jsize( ); i++ ) kmers[i] = kmers_plus[i].first; hbp.ToLeft(xto_left), hbp.ToRight(xto_right); // Go through the edges of the (doubled) assembly. #pragma omp parallel for schedule(dynamic,1) for ( int i = 0; i < hbp.EdgeObjectCount( ); i++ ) { const basevector& e = hbp.EdgeObject(i); // For each kmer in the edge, find its hits to the reference and find // the kmers having the most hits. int nkmers = e.isize( ) - L + 1; vec< triple<int64_t,int64_t,int64_t> > locs(nkmers); vec<int> pos( nkmers, vec<int>::IDENTITY ); kmer<L> x; for ( int j = 0; j < nkmers; j++ ) { x.SetToSubOf( e, j ); int64_t low = LowerBound(kmers, x), high = UpperBound(kmers, x); locs[j].first = high - low; locs[j].second = low, locs[j].third = high; } if (fix_bug) ReverseSortSync( locs, pos ); else SortSync( locs, pos ); // Determine cutoff 'top'. double mcf = min_cov_frac; if ( REFTRACE_VARIANTS ) mcf = 0.6; int t = int( floor( nkmers * mcf ) ), top; for ( top = t + 1; top < nkmers; top++ ) if ( locs[top].first > locs[t].first ) break; // Find the associated offsets. vec< pair<int,int> > offset; for ( int j = 0; j < top; j++ ) { for ( int64_t m = locs[j].second; m < locs[j].third; m++ ) { int g = kmers_plus[m].second, o = kmers_plus[m].third - pos[j]; offset.push( g, o ); } } Sort(offset); // Form offsets into groups. vec< triple< int, int, pair<int,int> > > og; int mod = max_offset_diff; if ( REFTRACE_VARIANTS ) mod = 500; for ( int j = 0; j < offset.isize( ); j++ ) { int k; for ( k = j + 1; k < offset.isize( ); k++ ) { if ( offset[k].first != offset[j].first ) break; if ( offset[k].second - offset[k-1].second > mod ) break; } og.push( k - j, offset[j].first, make_pair( offset[j].second, offset[k-1].second ) ); j = k - 1; } ReverseSort(og); if ( verbosity >= 4 ) { #pragma omp critical { out << "\noriginal edge " << hbp_to_hb[i].first << ": "; PRINT4_TO( out, nkmers, top, offset.size( ), og.size( ) ); for ( int j = 0; j < og.isize( ); j++ ) PRINT2_TO( out, j, og[j].first ); } } // Filter offset groups. double mgf = min_group_frac; if ( REFTRACE_VARIANTS ) mgf = 0.65; int gj; for ( gj = 0; gj < og.isize( ); gj++ ) { if ( og[gj].first < min_group_save && og[gj].first < mgf * og[0].first ) { break; } } og.resize(gj); if ( verbosity >= 3 && og.nonempty( ) ) { #pragma omp critical { out << "\noffsets for edge " << i << " (hb_edge=" << hbp_to_hb[i].first << ", nkmers=" << nkmers << ")" << endl; for ( int j = 0; j < og.isize( ); j++ ) { out << "[" << j << "] " << og[j].second << "." << og[j].third.first << "-" << og[j].third.second << " (" << og[j].first << ")" << endl; } } } // Align. The reason for adding to the offset is that there could be in // indel in the first or last L bases. for ( int j = 0; j < og.isize( ); j++ ) { int g = og[j].second; int off_low = og[j].third.first, off_high = og[j].third.second; int mid_offset = ( off_low + off_high ) / 2; int bandwidth = Max(mid_offset - off_low, off_high - mid_offset) + offset_add; // Do the alignment. This is kludgy. If the alignment has too // many errors and the edge is long, we suspect that the problem // might be with a big indel, so we align using a larger bandwidth. // Note the unfortunate us of hardcoded constants. align a; int errors; if ( !REFTRACE_VARIANTS ) { const int SMA_method = 1; if (SMA_method == 1) { SmithWatBandedA( hbp.EdgeObject(i), G[g], -mid_offset, bandwidth, a, errors, 0, 1, 1 ); } else if(SMA_method == 2) { SmithWatAffineBanded( hbp.EdgeObject(i), G[g], -mid_offset, bandwidth, a, errors ); } else { cout << "unrecognized SMA_method" << endl; } if ( double(errors) / double( a.extent2( ) ) > max_error_rate ) { // So the following code (after the continue; // bandwidth=5000) was taking a ton of time // (0.5-1 sec per alignment). Also in my tests // it had a very low success rate <0.5% AND its // removal does not seem to impact the result. // We should do something clever with the // alignments (super aligner?) if we end up // needing it. -- neilw continue; #if 0 const int long_edge = 5000; const int max_indel = 5000; if ( hbp.EdgeLengthBases(i) < long_edge ) continue; SmithWatBandedA( hbp.EdgeObject(i), G[g], -mid_offset, max_indel, a, errors, 0, 1, 1 ); if ( double(errors) / double( a.extent2( ) ) > max_error_rate ) continue; #endif } } else { double score = SmithWatAffineBanded( hbp.EdgeObject(i), G[g], -mid_offset, bandwidth, a, errors ) / 3.0; if ( verbosity >= 3 ) { #pragma omp critical { double err_rate = score / double( a.extent2( ) ); int hb_edge = hbp_to_hb[i].first; int offset = -mid_offset; PRINT5( hb_edge, offset, bandwidth, score, err_rate ); } } double var_max_error_rate = 0.3; if ( score / double( a.extent2( ) ) > var_max_error_rate ) continue; } if ( verbosity >= 3 ) { #pragma omp critical { out << "\nalignment " << j << " of edge " << i << " (" << xto_left[i] << " --> " << xto_right[i] << ", hb_edge=" << hbp_to_hb[i].first << ")" << endl; vec<int> errs = a.MutationsGap1Gap2( hbp.EdgeObject(i), G[g] ); int mismatches = errs[0]; int indels = errs[1] + errs[2]; PRINT5_TO( out, g, a.pos2( ), a.Pos2( ), mismatches, indels ); if ( verbosity == 4 ) { PrintVisualAlignment( True, out, hbp.EdgeObject(i), G[g], a ); } if ( verbosity >= 5 ) { PrintVisualAlignment( False, out, hbp.EdgeObject(i), G[g], a ); } } } // Figure out where the position e.isize( ) - K + 1 should map to // under the alignment. Note that because there could be an indel // there, this is not necessarily a meaningful answer. int x1 = e.isize( ) - hbp.K( ) + 1; int x2 = CorrelatePositionsAlways( a, x1 ); // Save results. #pragma omp critical { vedata.push( make_triple( g, a.pos2( ), xto_left[i] ), make_triple( g, x2, xto_right[i] ), make_pair( i, errors ), make_pair( a.pos1( ), a.Pos1( ) ) ); aligns.push_back(a); } } } // Sort the output to avoid the stochastic downstream behavior of BuildGraph // that seems depend on the input order of the alignment data. SortSync(vedata, aligns); } // An experimental version of function to align edges to reference that // automatically adjust heuristics for best results. template<int L> void EdgePlacements::AlignEdgesToRefExp(const int verbosity, ostream& out) { // Setup for alignment. vecbasevector all(G); vec< triple<kmer<L>,int,int> > kmers_plus; MakeKmerLookup0( all, kmers_plus ); vec< kmer<L> > kmers( kmers_plus.size( ) ); for ( int64_t i = 0; i < kmers_plus.jsize( ); i++ ) kmers[i] = kmers_plus[i].first; hbp.ToLeft(xto_left), hbp.ToRight(xto_right); unsigned int max_g_len = G.front().size(); for(size_t gg=1;gg<G.size();++gg){max_g_len=max(max_g_len,G[gg].size());} vec<std::pair<int,int>> permutation(hbp.EdgeObjectCount()); for(int ii=0;ii<hbp.EdgeObjectCount();++ii){ permutation[ii]=std::make_pair(hbp.EdgeObject(ii).isize(),ii);} std::sort(permutation.rbegin(),permutation.rend()); //very dirty way of load balance, should be coded with a worklist.h instead. typedef triple< int, int, pair<int,int> > og_type; // the og specification from old code typedef std::tuple<og_type,double,int> work_type; // og_type, max_error_rate, offset_add vec< vec<work_type> > ee_vec_work( hbp.EdgeObjectCount() ); // edge_idx -> a list of work_type vec< std::pair<size_t,size_t> > unit_idx_tt_vec_work; // flattened indices of ee_vec_work const int np=3;//number of passes #pragma omp parallel { SmithWatBandedAEngine swbae(sqrt(max_g_len)*2,sqrt(max_g_len)); #pragma omp for schedule(dynamic,1) for ( int ee = 0; ee < hbp.EdgeObjectCount( ); ee++ ) { int i=permutation[ee].second; const basevector& e = hbp.EdgeObject(i); // For each kmer in the edge, find its hits to the reference and find // the kmers having the most hits. int nkmers = e.isize( ) - L + 1; vec< triple<int64_t,int64_t,int64_t> > locs(nkmers); vec<int> pos( nkmers, vec<int>::IDENTITY ); kmer<L> x; for ( int j = 0; j < nkmers; j++ ) { x.SetToSubOf( e, j ); int64_t low = LowerBound(kmers, x), high = UpperBound(kmers, x); locs[j].first = high - low; locs[j].second = low, locs[j].third = high; } ReverseSortSync( locs, pos ); // Determine cutoff 'top'. double min_cov_frac = 0.5; int t = int( floor( nkmers * min_cov_frac ) ), top; for ( top = t + 1; top < nkmers; top++ ) if ( locs[top].first > locs[t].first ) break; // Find the associated offsets. vec< pair<int,int> > offset; for ( int j = 0; j < top; j++ ) { for ( int64_t m = locs[j].second; m < locs[j].third; m++ ) { int g = kmers_plus[m].second, o = kmers_plus[m].third - pos[j]; offset.push( g, o ); } } Sort(offset); for(int pass = 0; pass < np; pass++) { // auto pt = getenv("PASS"); // if (pt) { // pass = atoi(pt); // np = 1; // cout << "pass= " << pass << endl; // } RefTraceHeuristics rth; switch (pass) { case 0: //rth.max_offset_diff = 10; // default //rth.max_error_rate = 0.05; //rth.offset_add = 1; // default //rth.max_twiddle = 3; // default rth.min_group_frac = 0.1; rth.min_group_save = 200; break; case 1: rth.max_offset_diff = 30; rth.max_error_rate = 0.31; rth.offset_add = 5; rth.min_group_frac = 0.1; rth.max_twiddle = 5; break; case 2: rth.max_offset_diff = 350; rth.max_error_rate = 0.31; rth.offset_add = 5; rth.min_group_frac = 0.75; rth.max_twiddle = 120; break; } // Form offsets into groups. vec< triple< int, int, pair<int,int> > > og; for ( int j = 0; j < offset.isize( ); j++ ) { int k; for ( k = j + 1; k < offset.isize( ); k++ ) { if ( offset[k].first != offset[j].first ) break; if ( offset[k].second - offset[k-1].second > rth.max_offset_diff ) break; } og.push( k - j, offset[j].first, make_pair( offset[j].second, offset[k-1].second ) ); j = k - 1; } ReverseSort(og); // Filter offset groups. int gj; for ( gj = 0; gj < og.isize( ); gj++ ) { if ( og[gj].first < rth.min_group_save && og[gj].first < rth.min_group_frac * og[0].first ) { break; } } og.resize(gj); for( const auto& entry: og ){ ee_vec_work[i].emplace_back( entry , rth.max_error_rate, rth.offset_add); } } } { #pragma omp barrier } #pragma omp master { const size_t n=std::accumulate(ee_vec_work.begin(),ee_vec_work.end(),size_t(0),[](size_t a,vec<work_type>const&b){return a+b.size();}); unit_idx_tt_vec_work.reserve(n); for(const auto& entry: permutation){ for(size_t ff=0;ff<ee_vec_work[entry.second].size();++ff){ unit_idx_tt_vec_work.emplace_back(entry.second,ff); } } } { #pragma omp barrier } #pragma omp for schedule(dynamic,1) nowait for ( size_t og_idx = 0 ; og_idx < unit_idx_tt_vec_work.size() ; ++og_idx) { { // Align. The reason for adding to the offset is that there could be in // indel in the first or last L bases. // for ( int j = 0; j < og.isize( ); j++ ) { const auto& indices = unit_idx_tt_vec_work[og_idx]; const auto& entry = ee_vec_work[indices.first][indices.second]; // int g = og[j].second; // int off_low = og[j].third.first, off_high = og[j].third.second; int g = std::get<0>(entry).second; int off_low = std::get<0>(entry).third.first, off_high = std::get<0>(entry).third.second; int mid_offset = ( off_low + off_high ) / 2; int bandwidth = Max(mid_offset - off_low, off_high - mid_offset) + std::get<2>(entry);// rth.offset_add; // Do the alignment. This is kludgy. If the alignment has too // many errors and the edge is long, we suspect that the problem // might be with a big indel, so we align using a larger bandwidth. // Note the unfortunate us of hardcoded constants. align a; int errors; swbae.run( hbp.EdgeObject(indices.first), G[g], -mid_offset, bandwidth, a, errors, 0, 1, 1 ); if ( double(errors) / double( a.extent2( ) ) > std::get<1>(entry) /*rth.max_error_rate*/ ) { const int long_edge = 5000; const int max_indel = 5000; if ( hbp.EdgeLengthBases(indices.first) < long_edge ) continue; swbae.run( hbp.EdgeObject(indices.first), G[g], -mid_offset, max_indel, a, errors, 0, 1, 1 ); if ( double(errors) / double( a.extent2( ) ) > std::get<1>(entry)/*rth.max_error_rate*/ ) continue; } // errors += a.pos1(); // errors += hbp.EdgeObject(i).size()-a.Pos1(); // Figure out where the position e.isize( ) - K + 1 should map to // under the alignment. Note that because there could be an indel // there, this is not necessarily a meaningful answer. int x1 = hbp.EdgeObject(indices.first).isize( ) - hbp.K( ) + 1; int x2 = CorrelatePositionsAlways( a, x1 ); #pragma omp critical { vedata.push( make_triple( g, a.pos2( ), xto_left[indices.first] ), make_triple( g, x2, xto_right[indices.first] ), make_pair( indices.first, errors ), make_pair( a.pos1( ), a.Pos1( ) ) ); aligns.push_back(a); } } } } }//omp parallel // Sort the output to avoid the stochastic downstream behavior of BuildGraph // that seems depend on the input order of the alignment data. UniqueSortSync(vedata, aligns); } #endif
convolutiondepthwise_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void convdw3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int g=0; g<group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g*9; float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k012x = vld1q_f32(kernel0); float32x4_t _k345x = vld1q_f32(kernel0+3); float32x4_t _k678x = vld1q_f32(kernel0+6); _k012x = vsetq_lane_f32(0.f, _k012x, 3); _k345x = vsetq_lane_f32(0.f, _k345x, 3); _k678x = vsetq_lane_f32(0.f, _k678x, 3); float32x4_t _bias0 = vdupq_n_f32(bias0); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r30n = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r30n, 1); float32x4_t _r32 = vextq_f32(_r30, _r30n, 2); float32x4_t _sum1 = vmulq_laneq_f32(_r00, _k012x, 0); float32x4_t _sum2 = vfmaq_laneq_f32(_bias0, _r01, _k012x, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k012x, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r10, _k345x, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k345x, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r12, _k345x, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k678x, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r21, _k678x, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k678x, 2); float32x4_t _sum3 = vmulq_laneq_f32(_r10, _k012x, 0); float32x4_t _sum4 = vfmaq_laneq_f32(_bias0, _r11, _k012x, 1); _sum3 = vfmaq_laneq_f32(_sum3, _r12, _k012x, 2); _sum4 = vfmaq_laneq_f32(_sum4, _r20, _k345x, 0); _sum3 = vfmaq_laneq_f32(_sum3, _r21, _k345x, 1); _sum4 = vfmaq_laneq_f32(_sum4, _r22, _k345x, 2); _sum3 = vfmaq_laneq_f32(_sum3, _r30, _k678x, 0); _sum4 = vfmaq_laneq_f32(_sum4, _r31, _k678x, 1); _sum3 = vfmaq_laneq_f32(_sum3, _r32, _k678x, 2); _sum1 = vaddq_f32(_sum1, _sum2); _sum3 = vaddq_f32(_sum3, _sum4); vst1q_f32(outptr, _sum1); vst1q_f32(outptr2, _sum3); r0 += 4; r1 += 4; r2 += 4; r3 += 4; outptr += 4; outptr2 += 4; } #else if (nn > 0) { asm volatile( "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "0: \n" "vmul.f32 q7, q9, %e14[0] \n" "vand q13, %q17, %q17 \n"// q13 = _bias0 "vmul.f32 q6, q11, %e14[1] \n" "vmla.f32 q13, q12, %f14[0] \n" "pld [%4, #192] \n" "vld1.f32 {d18-d20}, [%4] \n"// r1 "add %4, #16 \n" "vmla.f32 q7, q9, %e15[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e15[1] \n" "vmla.f32 q13, q12, %f15[0] \n" "vmul.f32 q8, q9, %e14[0] \n" "vand q15, %q17, %q17 \n"// q15 = _bias0 "vmul.f32 q14, q11, %e14[1] \n" "vmla.f32 q15, q12, %f14[0] \n" "pld [%5, #192] \n" "vld1.f32 {d18-d20}, [%5 :64] \n"// r2 "add %5, #16 \n" "vmla.f32 q7, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e16[1] \n" "vmla.f32 q13, q12, %f16[0] \n" "vmla.f32 q8, q9, %e15[0] \n" "vmla.f32 q14, q11, %e15[1] \n" "vmla.f32 q15, q12, %f15[0] \n" "pld [%6, #192] \n" "vld1.f32 {d18-d20}, [%6] \n"// r3 "add %6, #16 \n" "vmla.f32 q8, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q14, q11, %e16[1] \n" "vmla.f32 q15, q12, %f16[0] \n" "vadd.f32 q7, q7, q6 \n" "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "vadd.f32 q8, q8, q14 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q8, q8, q15 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "add %3, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k012x), // %14 "w"(_k345x), // %15 "w"(_k678x), // %16 "w"(_bias0) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); float32x4_t _sum2 = vmulq_f32(_r10, _k012x); _sum2 = vmlaq_f32(_sum2, _r20, _k345x); _sum2 = vmlaq_f32(_sum2, _r30, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); _sum2 = vsetq_lane_f32(bias0, _sum2, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); *outptr2 = vaddvq_f32(_sum2); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _sss2 = vpadd_f32(_ss, _ss2); *outptr = vget_lane_f32(_sss2, 0); *outptr2 = vget_lane_f32(_sss2, 1); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; float sum2 = bias0; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr = sum; *outptr2 = sum2; #endif r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); float32x4_t _sum1 = vmulq_laneq_f32(_r00, _k012x, 0); float32x4_t _sum2 = vfmaq_laneq_f32(_bias0, _r01, _k012x, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k012x, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r10, _k345x, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k345x, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r12, _k345x, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k678x, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r21, _k678x, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k678x, 2); _sum1 = vaddq_f32(_sum1, _sum2); vst1q_f32(outptr, _sum1); r0 += 4; r1 += 4; r2 += 4; outptr += 4; } #else if (nn > 0) { asm volatile( "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "0: \n" "vmul.f32 q7, q8, %e10[0] \n" "vand q14, %q13, %q13 \n"// q14 = _bias0 "vmul.f32 q13, q10, %e10[1] \n" "vmla.f32 q14, q11, %f10[0] \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n"// r1 "add %3, #16 \n" "vmla.f32 q7, q8, %e11[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e11[1] \n" "vmla.f32 q14, q11, %f11[0] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n"// r2 "add %4, #16 \n" "vmla.f32 q7, q8, %e12[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e12[1] \n" "vmla.f32 q14, q11, %f12[0] \n" "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q7, q7, q14 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vst1.f32 {d14-d15}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; #endif r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int g=0; g<group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g*9; float* outptr = out; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; #if __ARM_NEON float32x4_t _k012x = vld1q_f32(kernel0); float32x4_t _k345x = vld1q_f32(kernel0+3); float32x4_t _k678x = vld1q_f32(kernel0+6); _k012x = vsetq_lane_f32(0.f, _k012x, 3); _k345x = vsetq_lane_f32(0.f, _k345x, 3); _k678x = vsetq_lane_f32(0.f, _k678x, 3); float32x4_t _bias0 = vdupq_n_f32(bias0); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4x2_t _r0 = vld2q_f32(r0); float32x4x2_t _r0n = vld2q_f32(r0+8); float32x4_t _r00 = _r0.val[0];// 0 2 4 6 float32x4_t _r01 = _r0.val[1];// 1 3 5 7 float32x4_t _r02 = vextq_f32(_r00, _r0n.val[0], 1);// 2 4 6 8 float32x4_t _outp = vfmaq_laneq_f32(_bias0, _r00, _k012x, 0); _outp = vfmaq_laneq_f32(_outp, _r01, _k012x, 1); _outp = vfmaq_laneq_f32(_outp, _r02, _k012x, 2); float32x4x2_t _r1 = vld2q_f32(r1); float32x4x2_t _r1n = vld2q_f32(r1+8); float32x4_t _r10 = _r1.val[0]; float32x4_t _r11 = _r1.val[1]; float32x4_t _r12 = vextq_f32(_r10, _r1n.val[0], 1); _outp = vfmaq_laneq_f32(_outp, _r10, _k345x, 0); _outp = vfmaq_laneq_f32(_outp, _r11, _k345x, 1); _outp = vfmaq_laneq_f32(_outp, _r12, _k345x, 2); float32x4x2_t _r2 = vld2q_f32(r2); float32x4x2_t _r2n = vld2q_f32(r2+8); float32x4_t _r20 = _r2.val[0]; float32x4_t _r21 = _r2.val[1]; float32x4_t _r22 = vextq_f32(_r20, _r2n.val[0], 1); _outp = vfmaq_laneq_f32(_outp, _r20, _k678x, 0); _outp = vfmaq_laneq_f32(_outp, _r21, _k678x, 1); _outp = vfmaq_laneq_f32(_outp, _r22, _k678x, 2); vst1q_f32(outptr, _outp); r0 += 8; r1 += 8; r2 += 8; outptr += 4; } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vand q11, %q13, %q13 \n" "0: \n" "vmul.f32 q0, q2, %e10[0] \n" "vmul.f32 q10, q3, %e10[1] \n" "pld [%2, #128] \n" "vld2.f32 {d16-d17}, [%2] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f10[0] \n" "pld [%3, #256] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vmla.f32 q0, q2, %e11[0] \n" "vmla.f32 q10, q3, %e11[1] \n" "pld [%3, #128] \n" "vld2.f32 {d16-d17}, [%3] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f11[0] \n" "pld [%4, #256] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vmla.f32 q0, q2, %e12[0] \n" "vmla.f32 q10, q3, %e12[1] \n" "pld [%4, #128] \n" "vld2.f32 {d16-d17}, [%4] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f12[0] \n" "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vadd.f32 q0, q0, q10 \n" "vadd.f32 q0, q0, q11 \n" "vand q11, %q13, %q13 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k012x), // %10 "w"(_k345x), // %11 "w"(_k678x), // %12 "w"(_bias0) // %13 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k012x); _sum = vmlaq_f32(_sum, _r10, _k345x); _sum = vmlaq_f32(_sum, _r20, _k678x); _sum = vsetq_lane_f32(bias0, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
nanopore_hdp.c
// // nanopore_hdp.c // // // Created by Jordan Eizenga on 1/8/16. // // // in 0-based index #define ALIGNMENT_KMER_COL 9 #define ALIGNMENT_STRAND_COL 4 #define ALIGNMENT_SIGNAL_COL 13 #define ASSIGNMENT_KMER_COL 0 #define ASSIGNMENT_STRAND_COL 1 #define ASSIGNMENT_SIGNAL_COL 2 // number of expected column in the two kinds of input tables #define NUM_ALIGNMENT_COLS 15 #define NUM_ASSIGNMENT_COLS 4 #define MODEL_ROW_HEADER_LENGTH 0 #define MODEL_MEAN_ENTRY 0 #define MODEL_NOISE_ENTRY 1 #define MODEL_ENTRY_LENGTH 5 #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #include <string.h> #include <inttypes.h> #include "pairwiseAligner.h" #include "hdp_math_utils.h" NanoporeHDP* package_nanopore_hdp(HierarchicalDirichletProcess* hdp, const char* alphabet, int64_t alphabet_size, int64_t kmer_length) { NanoporeHDP* nhdp = (NanoporeHDP*) malloc(sizeof(NanoporeHDP)); // copy and sort alphabet char* internal_alphabet = (char*) malloc(sizeof(char) * (alphabet_size + 1)); for (int64_t i = 0; i < alphabet_size; i++) { internal_alphabet[i] = alphabet[i]; } int64_t min_idx; char temp; for (int64_t i = 0; i < alphabet_size; i++) { min_idx = i; for (int64_t j = i + 1; j < alphabet_size; j++) { if (internal_alphabet[j] < internal_alphabet[min_idx]) { min_idx = j; } } temp = internal_alphabet[i]; internal_alphabet[i] = internal_alphabet[min_idx]; internal_alphabet[min_idx] = temp; } for (int64_t i = 1; i < alphabet_size; i++) { if (alphabet[i - 1] == alphabet[i]) { fprintf(stderr, "Characters of alphabet must be distinct.\n"); exit(EXIT_FAILURE); } } internal_alphabet[alphabet_size] = '\0'; nhdp->hdp = hdp; nhdp->alphabet = internal_alphabet; nhdp->alphabet_size = alphabet_size; nhdp->kmer_length = kmer_length; // note: destroying the HDP housed in the NHDP will destroy the DistributionMetricMemo nhdp->distr_metric_memos = stSet_construct2(&free); return nhdp; } void destroy_nanopore_hdp(NanoporeHDP* nhdp) { destroy_hier_dir_proc(nhdp->hdp); stSet_destruct(nhdp->distr_metric_memos); free(nhdp->alphabet); free(nhdp); } int64_t get_nanopore_hdp_kmer_length(NanoporeHDP* nhdp) { return nhdp->kmer_length; } int64_t get_nanopore_hdp_alphabet_size(NanoporeHDP* nhdp) { return nhdp->alphabet_size; } char* get_nanopore_hdp_alphabet(NanoporeHDP* nhdp) { char* alphabet = nhdp->alphabet; int64_t alphabet_size = nhdp->alphabet_size; char* copy = (char*) malloc(sizeof(char) * (alphabet_size + 1)); for (int64_t i = 0; i < alphabet_size; i++) { copy[i] = alphabet[i]; } copy[alphabet_size] = '\0'; return copy; } // wrappers void execute_nhdp_gibbs_sampling(NanoporeHDP* nhdp, int64_t num_samples, int64_t burn_in, int64_t thinning, bool verbose) { execute_gibbs_sampling(nhdp->hdp, num_samples, burn_in, thinning, verbose); } void execute_nhdp_gibbs_sampling_with_snapshots(NanoporeHDP* nhdp, int64_t num_samples, int64_t burn_in, int64_t thinning, void (*snapshot_func)(HierarchicalDirichletProcess*, void*), void* snapshot_func_args, bool verbose) { execute_gibbs_sampling_with_snapshots(nhdp->hdp, num_samples, burn_in, thinning, snapshot_func, snapshot_func_args, verbose); } void finalize_nhdp_distributions(NanoporeHDP* nhdp) { finalize_distributions(nhdp->hdp); } void normal_inverse_gamma_params_from_minION(const char* model_filepath, double* mu_out, double* nu_out, double* alpha_out, double* beta_out) { // model format: // stateNumber \t alphabetSize \t alphabet \t kmerSize // [level_mean, level_stdv, noise_mean, noise_stdv, noise_lambda] FILE* model_file = fopen(model_filepath, "r"); char* line = stFile_getLineFromFile(model_file); stList* tokens = stString_split(line); if (stList_length(tokens) != 4) { st_errAbort("normal_inverse_gamma_params_from_minION: Model format has changed invalid model" "found here %s\n", model_filepath); } free(line); stList_destruct(tokens); // ignore transitions line line = stFile_getLineFromFile(model_file); tokens = stString_split(line); if (stList_length(tokens) != 10) { st_errnoAbort("More than 3-state hmm transitions parameters found\n"); } line = stFile_getLineFromFile(model_file); tokens = stString_split(line); int64_t table_length = (stList_length(tokens) - MODEL_ROW_HEADER_LENGTH) / MODEL_ENTRY_LENGTH; double* means = (double*) malloc(sizeof(double) * table_length); double* precisions = (double*) malloc(sizeof(double) * table_length); int64_t mean_offset = MODEL_ROW_HEADER_LENGTH + MODEL_MEAN_ENTRY; // 1 int64_t noise_offset = MODEL_ROW_HEADER_LENGTH + MODEL_NOISE_ENTRY; // 2 char* mean_str; char* noise_str; double noise; for (int i = 0; i < table_length; i++) { mean_str = (char*) stList_get(tokens, mean_offset + i * MODEL_ENTRY_LENGTH); sscanf(mean_str, "%lf", &(means[i])); noise_str = (char*) stList_get(tokens, noise_offset + i * MODEL_ENTRY_LENGTH); sscanf(noise_str, "%lf", &noise); precisions[i] = 1.0 / (noise * noise); } free(line); stList_destruct(tokens); mle_normal_inverse_gamma_params(means, precisions, table_length, mu_out, nu_out, alpha_out, beta_out); free(means); free(precisions); fclose(model_file); } // fixed concentration parameters 'gamma' for each depth HierarchicalDirichletProcess* minION_hdp(int64_t num_dps, int64_t depth, double* gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double mu, nu, alpha, beta; normal_inverse_gamma_params_from_minION(model_filepath, &mu, &nu, &alpha, &beta); return new_hier_dir_proc(num_dps, depth, gamma, sampling_grid_start, sampling_grid_stop, sampling_grid_length, mu, nu, alpha, beta); } // Gamma distribution prior on the concentration parameters 'gamma' // must designate vector of 'alpha' and 'beta' parameters of distribution for each depth HierarchicalDirichletProcess* minION_hdp_2(int64_t num_dps, int64_t depth, double* gamma_alpha, double* gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double mu, nu, alpha, beta; normal_inverse_gamma_params_from_minION(model_filepath, &mu, &nu, &alpha, &beta); return new_hier_dir_proc_2(num_dps, depth, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, mu, nu, alpha, beta); } void update_nhdp_from_alignment(NanoporeHDP* nhdp, const char* alignment_filepath, bool has_header) { update_nhdp_from_alignment_with_filter(nhdp, alignment_filepath, has_header, NULL); } void update_nhdp_from_alignment_with_filter(NanoporeHDP* nhdp, const char* alignment_filepath, bool has_header, const char* strand_filter) { stList* signal_list = stList_construct3(0, &free); stList* dp_id_list = stList_construct3(0, &free); FILE* align_file = fopen(alignment_filepath, "r"); if (align_file == NULL) { fprintf(stderr, "Alignment %s file does not exist.\n", alignment_filepath); exit(EXIT_FAILURE); } stList* tokens; int64_t line_length; char* kmer; char* strand; char* signal_str; int64_t* dp_id_ptr; double* signal_ptr; bool warned = false; int proceed = 0; char* line = stFile_getLineFromFile(align_file); if (has_header) { line = stFile_getLineFromFile(align_file); } while (line != NULL) { tokens = stString_split(line); line_length = stList_length(tokens); if (!warned) { if ((line_length != NUM_ALIGNMENT_COLS) && (line_length != NUM_ASSIGNMENT_COLS)) { fprintf(stderr, "Input format has changed from design period, HDP may receive incorrect data.\n"); warned = true; continue; } } bool using_alignment; if (line_length == NUM_ALIGNMENT_COLS) { using_alignment = true; } else { using_alignment = false; } int strand_col = using_alignment ? ALIGNMENT_STRAND_COL : ASSIGNMENT_STRAND_COL; int signal_col = using_alignment ? ALIGNMENT_SIGNAL_COL : ASSIGNMENT_SIGNAL_COL; int kmer_col = using_alignment ? ALIGNMENT_KMER_COL : ASSIGNMENT_KMER_COL; strand = (char*) stList_get(tokens, strand_col); if (strand_filter != NULL) { proceed = strcmp(strand, strand_filter); } if (proceed == 0) { signal_str = (char*) stList_get(tokens, signal_col); kmer = (char*) stList_get(tokens, kmer_col); signal_ptr = (double*) malloc(sizeof(double)); dp_id_ptr = (int64_t*) malloc(sizeof(int64_t)); sscanf(signal_str, "%lf", signal_ptr); *dp_id_ptr = kmer_id(kmer, nhdp->alphabet, nhdp->alphabet_size, nhdp->kmer_length); stList_append(signal_list, signal_ptr); stList_append(dp_id_list, dp_id_ptr); } stList_destruct(tokens); free(line); line = stFile_getLineFromFile(align_file); } fclose(align_file); int64_t data_length; double* signal = stList_toDoublePtr(signal_list, &data_length); int64_t* dp_ids = stList_toIntPtr(dp_id_list, &data_length); stList_destruct(signal_list); stList_destruct(dp_id_list); reset_hdp_data(nhdp->hdp); pass_data_to_hdp(nhdp->hdp, signal, dp_ids, data_length); } // n^k int64_t power(int64_t n, int64_t k) { int64_t num = 1; for (int64_t i = 0; i < k; i++) { num *= n; } return num; } // ((n k)) int64_t multiset_number(int64_t n, int64_t k) { int64_t num = 1; for (int64_t m = n + k - 1; m >= n; m--) { num *= m; } for (int64_t m = k; m >= 2; m--) { num /= m; } return num; } int64_t* get_word(int64_t word_id, int64_t alphabet_size, int64_t word_length) { int64_t* word = (int64_t*) malloc(sizeof(int64_t) * word_length); int64_t id_remainder = word_id; for (int64_t i = 0; i < word_length; i++) { word[word_length - i - 1] = id_remainder % alphabet_size; id_remainder /= alphabet_size; } return word; } int64_t* get_word_multiset(int64_t word_id, int64_t alphabet_size, int64_t word_length) { int64_t* multiset = get_word(word_id, alphabet_size, word_length); // selection sort 'cause whatever int64_t min_idx; int64_t temp; for (int64_t i = 0; i < word_length; i++) { min_idx = i; for (int64_t j = i + 1; j < word_length; j++) { if (multiset[j] < multiset[min_idx]) { min_idx = j; } } temp = multiset[i]; multiset[i] = multiset[min_idx]; multiset[min_idx] = temp; } return multiset; } int64_t multiset_id_internal(int64_t* tail, int64_t tail_length, int64_t alphabet_min, int64_t alphabet_size) { int64_t head = tail[0]; if (tail_length == 1) { return head - alphabet_min; } int64_t step = 0; for (int64_t i = alphabet_min; i < alphabet_size; i++) { if (head > i) { step += multiset_number(alphabet_size - i, tail_length - 1); } else { return step + multiset_id_internal(&(tail[1]), tail_length - 1, i, alphabet_size); } } fprintf(stderr, "Character outside alphabet included in multiset\n"); exit(EXIT_FAILURE); } int64_t multiset_id(int64_t* multiset, int64_t length, int64_t alphabet_size) { return multiset_id_internal(multiset, length, 0, alphabet_size); } int64_t word_id_to_multiset_id(int64_t word_id, int64_t alphabet_size, int64_t word_length) { int64_t* multiset = get_word_multiset(word_id, alphabet_size, word_length); int64_t id = multiset_id(multiset, word_length, alphabet_size); free(multiset); return id; } int64_t word_id(int64_t* word, int64_t alphabet_size, int64_t word_length) { int64_t id = 0; int64_t step = 1; for (int64_t i = word_length - 1; i >= 0; i--) { id += step * word[i]; step *= alphabet_size; } return id; } int64_t* kmer_to_word(char* kmer, char* alphabet, int64_t alphabet_size, int64_t kmer_length) { int64_t* word = (int64_t*) malloc(sizeof(int64_t) * kmer_length); for (int64_t i = 0; i < kmer_length; i++) { int64_t j = 0; while (kmer[i] != alphabet[j]) { j++; if (j == alphabet_size) { fprintf(stderr, "[signalAlign] - ERROR: K-mer contains character outside alphabet. " "Got offending kmer is: %s. alphabet is %s kmer length %"PRId64"\n", kmer, alphabet, kmer_length); exit(EXIT_FAILURE); } } word[i] = j; } return word; } int64_t kmer_id(char* kmer, char* alphabet, int64_t alphabet_size, int64_t kmer_length) { int64_t* word = kmer_to_word(kmer, alphabet, alphabet_size, kmer_length); int64_t id = word_id(word, alphabet_size, kmer_length); free(word); return id; } int64_t standard_kmer_id(char* kmer, int64_t kmer_length) { return kmer_id(kmer, "ACGT", 4, kmer_length); } int64_t nhdp_kmer_id(NanoporeHDP* nhdp, char* kmer) { return kmer_id(kmer, nhdp->alphabet, nhdp->alphabet_size, nhdp->kmer_length); } double get_nanopore_kmer_density(NanoporeHDP* nhdp, void *kmer, void *x) { if (kmer == NULL) { return LOG_ZERO; } else { double u = *(double *)x; //return dir_proc_density(nhdp->hdp, *(double *) x, nhdp_kmer_id(nhdp, (char *)kmer)); return dir_proc_density(nhdp->hdp, u, nhdp_kmer_id(nhdp, (char *)kmer)); } } double get_kmer_distr_distance(NanoporeDistributionMetricMemo* memo, char* kmer_1, char* kmer_2) { NanoporeHDP* nhdp = memo->nhdp; return get_dir_proc_distance(memo->memo, nhdp_kmer_id(nhdp, kmer_1), nhdp_kmer_id(nhdp, kmer_2)); } NanoporeDistributionMetricMemo* package_nanopore_metric_memo(NanoporeHDP* nhdp, DistributionMetricMemo* memo) { NanoporeDistributionMetricMemo* nanopore_memo = (NanoporeDistributionMetricMemo*) malloc(sizeof(NanoporeDistributionMetricMemo)); nanopore_memo->nhdp = nhdp; nanopore_memo->memo = memo; return nanopore_memo; } NanoporeDistributionMetricMemo* new_nhdp_kl_divergence_memo(NanoporeHDP* nhdp) { return package_nanopore_metric_memo(nhdp, new_kl_divergence_memo(nhdp->hdp)); } NanoporeDistributionMetricMemo* new_nhdp_hellinger_distance_memo(NanoporeHDP* nhdp) { return package_nanopore_metric_memo(nhdp, new_hellinger_distance_memo(nhdp->hdp)); } NanoporeDistributionMetricMemo* new_nhdp_l2_distance_memo(NanoporeHDP* nhdp) { return package_nanopore_metric_memo(nhdp, new_l2_distance_memo(nhdp->hdp)); } NanoporeDistributionMetricMemo* new_nhdp_shannon_jensen_distance_memo(NanoporeHDP* nhdp) { return package_nanopore_metric_memo(nhdp, new_shannon_jensen_distance_memo(nhdp->hdp)); } double compare_nhdp_distrs_kl_divergence(NanoporeHDP* nhdp_1, char* kmer_1, NanoporeHDP* nhdp_2, char* kmer_2) { return compare_hdp_distrs_kl_divergence(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1), nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2)); } double compare_nhdp_distrs_l2_distance(NanoporeHDP* nhdp_1, char* kmer_1, NanoporeHDP* nhdp_2, char* kmer_2) { return compare_hdp_distrs_l2_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1), nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2)); } double compare_nhdp_distrs_shannon_jensen_distance(NanoporeHDP* nhdp_1, char* kmer_1, NanoporeHDP* nhdp_2, char* kmer_2) { return compare_hdp_distrs_shannon_jensen_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1), nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2)); } double compare_nhdp_distrs_hellinger_distance(NanoporeHDP* nhdp_1, char* kmer_1, NanoporeHDP* nhdp_2, char* kmer_2) { return compare_hdp_distrs_hellinger_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1), nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2)); } double kmer_distr_expected_val(NanoporeHDP* nhdp, char* kmer) { return dir_proc_expected_val(nhdp->hdp, nhdp_kmer_id(nhdp, kmer)); } double kmer_distr_variance(NanoporeHDP* nhdp, char* kmer) { return dir_proc_variance(nhdp->hdp, nhdp_kmer_id(nhdp, kmer)); } int64_t flat_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); return num_leaves + 1; } void flat_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) { int64_t last_dp_id = power(alphabet_size, kmer_length); for (int64_t id = 0; id < last_dp_id; id++) { set_dir_proc_parent(hdp, id, last_dp_id); } } NanoporeHDP* flat_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma, double leaf_gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_params = (double*) malloc(sizeof(double) * 2); gamma_params[0] = base_gamma; gamma_params[1] = leaf_gamma; int64_t num_dps = flat_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 2, gamma_params, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); flat_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } NanoporeHDP* flat_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_alpha = (double*) malloc(sizeof(double) * 2); gamma_alpha[0] = base_gamma_alpha; gamma_alpha[1] = leaf_gamma_alpha; double* gamma_beta = (double*) malloc(sizeof(double) * 2); gamma_beta[0] = base_gamma_beta; gamma_beta[1] = leaf_gamma_beta; int64_t num_dps = flat_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 2, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); flat_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } int64_t multiset_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = multiset_number(alphabet_size, kmer_length); return num_leaves + num_middle_dps + 1; } void multiset_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = multiset_number(alphabet_size, kmer_length); // set kmer parents to multisets int64_t multiset_id; for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) { multiset_id = word_id_to_multiset_id(kmer_id, alphabet_size, kmer_length); set_dir_proc_parent(hdp, kmer_id, num_leaves + multiset_id); } // set multiset parents to base dp int64_t last_dp_id = num_leaves + num_middle_dps; for (int64_t middle_dp_id = num_leaves; middle_dp_id < last_dp_id; middle_dp_id++) { set_dir_proc_parent(hdp, middle_dp_id, last_dp_id); } } NanoporeHDP* multiset_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma, double middle_gamma, double leaf_gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_params = (double*) malloc(sizeof(double) * 3); gamma_params[0] = base_gamma; gamma_params[1] = middle_gamma; gamma_params[2] = leaf_gamma; int64_t num_dps = multiset_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); multiset_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } NanoporeHDP* multiset_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha, double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_alpha = (double*) malloc(sizeof(double) * 3); gamma_alpha[0] = base_gamma_alpha; gamma_alpha[1] = middle_gamma_alpha; gamma_alpha[2] = leaf_gamma_alpha; double* gamma_beta = (double*) malloc(sizeof(double) * 3); gamma_beta[0] = base_gamma_beta; gamma_beta[1] = middle_gamma_beta; gamma_beta[2] = leaf_gamma_beta; int64_t num_dps = multiset_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); multiset_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } int64_t middle_2_nts_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) { if (kmer_length <= 2) { fprintf(stderr, "k-mer is not long enough for middle 2 nucleotides HDP\n"); exit(EXIT_FAILURE); } return power(alphabet_size, kmer_length) + power(alphabet_size, 2) + 1; } int64_t kmer_id_to_middle_nts_id(int64_t kmer_id, int64_t alphabet_size, int64_t kmer_length) { int64_t* kmer = get_word(kmer_id, alphabet_size, kmer_length); int64_t id = alphabet_size * kmer[kmer_length / 2 - 1] + kmer[kmer_length / 2]; free(kmer); return id; } void middle_2_nts_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = power(alphabet_size, 2); int64_t middle_dp_id; for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) { middle_dp_id = kmer_id_to_middle_nts_id(kmer_id, alphabet_size, kmer_length); set_dir_proc_parent(hdp, kmer_id, middle_dp_id + num_leaves); } int64_t last_dp_id = num_leaves + num_middle_dps; for (int64_t id = num_leaves; id < last_dp_id; id++) { set_dir_proc_parent(hdp, id, last_dp_id); } } NanoporeHDP* middle_2_nts_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma, double middle_gamma, double leaf_gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { if (kmer_length % 2 != 0) { fprintf(stderr, "Warning: middle two nucleotides of odd length kmer is ambiguous. Resolving arbitrarily.\n"); } double* gamma_params = (double*) malloc(sizeof(double) * 3); gamma_params[0] = base_gamma; gamma_params[1] = middle_gamma; gamma_params[2] = leaf_gamma; int64_t num_dps = middle_2_nts_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); middle_2_nts_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } int64_t word_id_to_group_multiset_id(int64_t word_id, int64_t* char_groups, int64_t alphabet_size, int64_t word_length, int64_t num_groups) { int64_t* word = get_word(word_id, alphabet_size, word_length); for (int64_t i = 0; i < word_length; i++) { word[i] = char_groups[word[i]]; } int64_t min_idx; int64_t temp; for (int64_t i = 0; i < word_length; i++) { min_idx = i; for (int64_t j = i + 1; j < word_length; j++) { if (word[j] < word[min_idx]) { min_idx = j; } } temp = word[i]; word[i] = word[min_idx]; word[min_idx] = temp; } int64_t id = multiset_id(word, word_length, num_groups); free(word); return id; } int64_t group_multiset_hdp_num_dps(int64_t alphabet_size, int64_t* char_groups, int64_t kmer_length) { int64_t num_groups = 0; for (int64_t i = 0; i < alphabet_size; i++) { if (char_groups[i] + 1 > num_groups) { num_groups = char_groups[i] + 1; } } int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = multiset_number(num_groups, kmer_length); return num_leaves + num_middle_dps + 1; } void group_multiset_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t* char_groups, int64_t alphabet_size, int64_t kmer_length) { int64_t num_groups = 0; for (int64_t i = 0; i < alphabet_size; i++) { if (char_groups[i] + 1 > num_groups) { num_groups = char_groups[i] + 1; } } int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = multiset_number(num_groups, kmer_length); // set kmer parents to multisets int64_t multiset_id; for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) { multiset_id = word_id_to_group_multiset_id(kmer_id, char_groups, alphabet_size, kmer_length, num_groups); set_dir_proc_parent(hdp, kmer_id, num_leaves + multiset_id); } // set multiset parents to base dp int64_t last_dp_id = num_leaves + num_middle_dps; for (int64_t middle_dp_id = num_leaves; middle_dp_id < last_dp_id; middle_dp_id++) { set_dir_proc_parent(hdp, middle_dp_id, last_dp_id); } } void confirm_valid_groupings(int64_t* char_groups, int64_t alphabet_size) { for (int64_t i = 0; i < alphabet_size; i++) { if (char_groups[i] < 0) { fprintf(stderr, "Group numbers must be non-negative.\n"); exit(EXIT_FAILURE); } } int64_t num_groups = 0; for (int64_t i = 0; i < alphabet_size; i++) { if (char_groups[i] + 1 > num_groups) { num_groups = char_groups[i] + 1; } } for (int64_t i = 0; i < num_groups; i++) { bool found_group = false; for (int64_t j = 0; j < alphabet_size; j++) { if (char_groups[j] == i) { found_group = true; break; } } if (!found_group) { fprintf(stderr, "Groups must be consecutively numbered starting with 0.\n"); exit(EXIT_FAILURE); } } } int64_t* alphabet_sort_groups(const char* alphabet, int64_t* char_groups, int64_t alphabet_size) { char* aux_alphabet = (char*) malloc(sizeof(char) * alphabet_size); int64_t* sorted_char_groups = (int64_t*) malloc(sizeof(int64_t) * alphabet_size); for (int64_t i = 0; i < alphabet_size; i++) { aux_alphabet[i] = alphabet[i]; sorted_char_groups[i] = char_groups[i]; } int64_t temp_group; char temp_char; int64_t min_idx; for (int64_t i = 0; i < alphabet_size; i++) { min_idx = i; for (int64_t j = i + 1; j < alphabet_size; j++) { if (aux_alphabet[j] < aux_alphabet[min_idx]) { min_idx = j; } } temp_char = aux_alphabet[i]; aux_alphabet[i] = aux_alphabet[min_idx]; aux_alphabet[min_idx] = temp_char; temp_group = sorted_char_groups[i]; sorted_char_groups[i] = sorted_char_groups[min_idx]; sorted_char_groups[min_idx] = temp_group; } free(aux_alphabet); return sorted_char_groups; } // assumes char_groups are 0-based and consecutively numbered NanoporeHDP* group_multiset_hdp_model(const char* alphabet, int64_t* char_groups, int64_t alphabet_size, int64_t kmer_length, double base_gamma, double middle_gamma, double leaf_gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { confirm_valid_groupings(char_groups, alphabet_size); double* gamma_params = (double*) malloc(sizeof(double) * 3); gamma_params[0] = base_gamma; gamma_params[1] = middle_gamma; gamma_params[2] = leaf_gamma; int64_t num_dps = group_multiset_hdp_num_dps(alphabet_size, char_groups, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); int64_t* sorted_char_groups = alphabet_sort_groups(alphabet, char_groups, alphabet_size); group_multiset_hdp_model_internal(hdp, sorted_char_groups, alphabet_size, kmer_length); free(sorted_char_groups); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } // assumes char_groups are 0-based and consecutively numbered NanoporeHDP* group_multiset_hdp_model_2(const char* alphabet, int64_t* char_groups, int64_t alphabet_size, int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha, double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { confirm_valid_groupings(char_groups, alphabet_size); double *gamma_alpha = (double *) malloc(sizeof(double) * 3); gamma_alpha[0] = base_gamma_alpha; gamma_alpha[1] = middle_gamma_alpha; gamma_alpha[2] = leaf_gamma_alpha; double *gamma_beta = (double *) malloc(sizeof(double) * 3); gamma_beta[0] = base_gamma_beta; gamma_beta[1] = middle_gamma_beta; gamma_beta[2] = leaf_gamma_beta; int64_t num_dps = group_multiset_hdp_num_dps(alphabet_size, char_groups, kmer_length); HierarchicalDirichletProcess *hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); int64_t *sorted_char_groups = alphabet_sort_groups(alphabet, char_groups, alphabet_size); group_multiset_hdp_model_internal(hdp, sorted_char_groups, alphabet_size, kmer_length); free(sorted_char_groups); finalize_hdp_structure(hdp); NanoporeHDP *nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } NanoporeHDP* middle_2_nts_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha, double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { if (kmer_length % 2 != 0) { fprintf(stderr, "Warning: middle 2 nucleotides of odd length kmer is ambiguous. Resolving arbitrarily.\n"); } double* gamma_alpha = (double*) malloc(sizeof(double) * 3); gamma_alpha[0] = base_gamma_alpha; gamma_alpha[1] = middle_gamma_alpha; gamma_alpha[2] = leaf_gamma_alpha; double* gamma_beta = (double*) malloc(sizeof(double) * 3); gamma_beta[0] = base_gamma_beta; gamma_beta[1] = middle_gamma_beta; gamma_beta[2] = leaf_gamma_beta; int64_t num_dps = middle_2_nts_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); middle_2_nts_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } int64_t purine_composition_hdp_num_dps(int64_t num_purines, int64_t num_pyrimidines, int64_t kmer_length) { int64_t num_leaves = power(num_purines + num_pyrimidines, kmer_length); int64_t num_middle_dps = kmer_length + 1; return num_leaves + num_middle_dps + 1; } void purine_composition_hdp_model_internal(HierarchicalDirichletProcess* hdp, bool* purine_alphabet, int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = kmer_length + 1; // set kmer parents to purine multisets int64_t num_purines; int64_t* word; for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) { word = get_word(kmer_id, alphabet_size, kmer_length); num_purines = 0; for (int64_t i = 0; i < kmer_length; i++) { if (purine_alphabet[word[i]]) { num_purines++; } } free(word); set_dir_proc_parent(hdp, kmer_id, num_leaves + num_purines); } // set purine set parents to base dp int64_t last_dp_id = num_leaves + num_middle_dps; for (int64_t middle_dp_id = num_leaves; middle_dp_id < last_dp_id; middle_dp_id++) { set_dir_proc_parent(hdp, middle_dp_id, last_dp_id); } } NanoporeHDP* purine_composition_hdp_model(char* purine_alphabet, int64_t num_purines, char* pyrimidine_alphabet, int64_t num_pyrimidines, int64_t kmer_length, double base_gamma, double middle_gamma, double leaf_gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_params = (double*) malloc(sizeof(double) * 3); gamma_params[0] = base_gamma; gamma_params[1] = middle_gamma; gamma_params[2] = leaf_gamma; int64_t num_dps = purine_composition_hdp_num_dps(num_purines, num_pyrimidines, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); int64_t alphabet_size = num_purines + num_pyrimidines; char* alphabet = (char*) malloc(sizeof(char) * alphabet_size); for (int64_t i = 0; i < num_purines; i++) { alphabet[i] = purine_alphabet[i]; } for (int64_t i = 0; i < num_pyrimidines; i++) { alphabet[i + num_purines] = pyrimidine_alphabet[i]; } NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); // get back the alphabet in the internal ordering free(alphabet); alphabet = get_nanopore_hdp_alphabet(nhdp); bool* purines = (bool*) malloc(sizeof(bool) * alphabet_size); for (int64_t i = 0; i < num_purines; i++) { purines[i] = false; for (int64_t j = 0; j < num_purines; j++) { if (alphabet[i] == purine_alphabet[j]) { purines[i] = true; break; } } } free(alphabet); purine_composition_hdp_model_internal(hdp, purines, alphabet_size, kmer_length); free(purines); finalize_hdp_structure(hdp); return nhdp; } NanoporeHDP* purine_composition_hdp_model_2(char* purine_alphabet, int64_t num_purines, char* pyrimidine_alphabet, int64_t num_pyrimidines, int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha, double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_alpha = (double*) malloc(sizeof(double) * 3); gamma_alpha[0] = base_gamma_alpha; gamma_alpha[1] = middle_gamma_alpha; gamma_alpha[2] = leaf_gamma_alpha; double* gamma_beta = (double*) malloc(sizeof(double) * 3); gamma_beta[0] = base_gamma_beta; gamma_beta[1] = middle_gamma_beta; gamma_beta[2] = leaf_gamma_beta; int64_t num_dps = purine_composition_hdp_num_dps(num_purines, num_pyrimidines, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); int64_t alphabet_size = num_purines + num_pyrimidines; char* alphabet = (char*) malloc(sizeof(char) * alphabet_size); for (int64_t i = 0; i < num_purines; i++) { alphabet[i] = purine_alphabet[i]; } for (int64_t i = 0; i < num_pyrimidines; i++) { alphabet[i + num_purines] = pyrimidine_alphabet[i]; } NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); // get back the alphabet in the internal ordering free(alphabet); alphabet = get_nanopore_hdp_alphabet(nhdp); bool* purines = (bool*) malloc(sizeof(bool) * alphabet_size); for (int64_t i = 0; i < alphabet_size; i++) { purines[i] = false; for (int64_t j = 0; j < num_purines; j++) { if (alphabet[i] == purine_alphabet[j]) { purines[i] = true; break; } } } free(alphabet); purine_composition_hdp_model_internal(hdp, purines, alphabet_size, kmer_length); free(purines); finalize_hdp_structure(hdp); return nhdp; } void serialize_nhdp(NanoporeHDP* nhdp, const char* filepath) { FILE* out = fopen(filepath, "w"); fprintf(out, "%"PRId64"\n", nhdp->alphabet_size); fprintf(out, "%s\n", nhdp->alphabet); fprintf(out, "%"PRId64"\n", nhdp->kmer_length); serialize_hdp(nhdp->hdp, out); fclose(out); } NanoporeHDP* deserialize_nhdp(const char* filepath) { FILE* in = fopen(filepath, "r"); char* line = stFile_getLineFromFile(in); int64_t alphabet_size; sscanf(line, "%"SCNd64, &alphabet_size); free(line); line = stFile_getLineFromFile(in); char* alphabet = (char*) malloc(sizeof(char) * alphabet_size+1); sscanf(line, "%s", alphabet); free(line); line = stFile_getLineFromFile(in); int64_t kmer_length; sscanf(line, "%"SCNd64, &kmer_length); free(line); HierarchicalDirichletProcess* hdp = deserialize_hdp(in); fclose(in); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); free(alphabet); return nhdp; } static void nanoporeHdp_checkThreeLevelPriorParameters(double baseGammaAlpha, double baseGammaBeta, double middleGammaAlpha, double middleGammaBeta, double leafGammaAlpha, double leafGammaBeta) { if ((baseGammaAlpha == NULL_HYPERPARAMETER) || (baseGammaBeta == NULL_HYPERPARAMETER) || (middleGammaAlpha == NULL_HYPERPARAMETER) || (middleGammaBeta == NULL_HYPERPARAMETER) || (leafGammaAlpha == NULL_HYPERPARAMETER) || (leafGammaBeta == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a alphas and betas for the base, middle, " "and the leaf distributions for the prior for this NanoporeHdp"); } } static void nanoporeHdp_checkThreeLevelFixedParameters(double baseGamma, double middleGamma, double leafGamma) { if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER) || (middleGamma == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma, middle gamma, and leaf gamma " "for this NanoporeHdpType\n"); } } static void nanoporeHdp_checkTwoLevelPriorParameters(double baseGammaAlpha, double baseGammaBeta, double leafGammaAlpha, double leafGammaBeta) { if ((baseGammaAlpha == NULL_HYPERPARAMETER) || (baseGammaBeta == NULL_HYPERPARAMETER) || (leafGammaAlpha == NULL_HYPERPARAMETER) || (leafGammaBeta == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a alphas and betas for the base and the leaf" "distributions for the prior for this NanoporeHdp"); } } static NanoporeHDP *loadNanoporeHdpFromScratch(NanoporeHdpType nHdpType, const char *modelFile, int64_t kmerLength, double baseGamma, double middleGamma, double leafGamma, double baseGammaAlpha, double baseGammaBeta, double middleGammaAlpha, double middleGammaBeta, double leafGammaAlpha, double leafGammaBeta, double samplingGridStart, double samplingGridEnd, int64_t samplingGridLength, char *alphabet) { if (nHdpType == singleLevelFixedCanonical) { if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma and leaf gamma " "for this NanoporeHdpType\n"); } NanoporeHDP *nHdp = flat_hdp_model(CANONICAL_ALPHA, CANONICAL_NUBMER, kmerLength, baseGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == singleLevelYeastAltC) { if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma and leaf gamma " "for this NanoporeHdpType\n"); } NanoporeHDP *nHdp = flat_hdp_model(ALL_YEAST_ALTC, 21, kmerLength, baseGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == singleLevelYeast) { if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma and leaf gamma " "for this NanoporeHdpType\n"); } NanoporeHDP *nHdp = flat_hdp_model(ALL_YEAST, 17, kmerLength, baseGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == singleLevelYeastSmall5mer) { if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma and leaf gamma " "for this NanoporeHdpType\n"); } NanoporeHDP *nHdp = flat_hdp_model(ALL_YEAST_SMALL_5MER, 7, kmerLength, baseGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == singleLevelAll16SrRNA) { if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma and leaf gamma " "for this NanoporeHdpType\n"); } NanoporeHDP *nHdp = flat_hdp_model(ALL_16SRRNA, 11, kmerLength, baseGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == singleLevelFixedM6A) { if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma and leaf gamma " "for this NanoporeHdpType\n"); } NanoporeHDP *nHdp = flat_hdp_model(METHYL_ADENOSINE_RNA, SYMBOL_NUMBER, kmerLength, baseGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == singleLevelFixedrRNA) { if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma and leaf gamma " "for this NanoporeHdpType\n"); } NanoporeHDP *nHdp = flat_hdp_model(M7G_PSI_RRNA, SYMBOL_NUMBER_NO_N, kmerLength, baseGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == singleLevelFixed) { if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma and leaf gamma " "for this NanoporeHdpType\n"); } NanoporeHDP *nHdp = flat_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == singleLevelPrior) { nanoporeHdp_checkTwoLevelPriorParameters(baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = flat_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == singleLevelPrior2) { nanoporeHdp_checkTwoLevelPriorParameters(baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = flat_hdp_model_2(METHYL_CYTOSINE_ALPHA, SYMBOL_NUMBER, kmerLength, baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == singleLevelPriorEcoli) { nanoporeHdp_checkTwoLevelPriorParameters(baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = flat_hdp_model_2(METHYL_CYTOSINE_ADENOSINE_ALPHA, SYMBOL_NUMBER_METHYL_CA, kmerLength, baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == multisetFixed) { nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma); NanoporeHDP *nHdp = multiset_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGamma, middleGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == multisetPrior) { nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = multiset_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == multisetPrior2) { nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = multiset_hdp_model_2(METHYL_CYTOSINE_ALPHA, SYMBOL_NUMBER, kmerLength, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == multisetPriorEcoli) { nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = multiset_hdp_model_2(METHYL_CYTOSINE_ADENOSINE_ALPHA, SYMBOL_NUMBER_METHYL_CA, kmerLength, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == compFixed) { nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma); NanoporeHDP *nHdp = purine_composition_hdp_model(PURINES, 2, PYRIMIDINES, 4, kmerLength, baseGamma, middleGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == compPrior) { nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = purine_composition_hdp_model_2(PURINES, 2, PYRIMIDINES, 4, kmerLength, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == middleNtsFixed) { nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma); NanoporeHDP *nHdp = middle_2_nts_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGamma, middleGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == middleNtsPrior) { nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = middle_2_nts_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == groupMultisetFixed) { nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma); // ACEGOT // {0, 1, 1, 2, 1, 3} int64_t groups[6] = {0, 1, 1, 2, 1, 3}; NanoporeHDP *nHdp = group_multiset_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, groups, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGamma, middleGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == groupMultisetPrior) { nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta); // ACEGOT // {0, 1, 1, 2, 1, 3} int64_t groups[6] = {0, 1, 1, 2, 1, 3}; NanoporeHDP *nHdp = group_multiset_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, groups, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } else { if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma and leaf gamma " "for unspecified NanoporeHdpType\n"); } NanoporeHDP *nHdp = flat_hdp_model(alphabet, strlen(alphabet), kmerLength, baseGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } } void nanoporeHdp_buildNanoporeHdpFromAlignment(NanoporeHdpType type, int64_t kmerLength, const char *templateModelFile, const char *complementModelFile, const char *alignments, const char *templateHDP, const char *complementHDP, int64_t nbSamples, int64_t burnIn, int64_t thinning, bool verbose, double baseGamma, double middleGamma, double leafGamma, double baseGammaAlpha, double baseGammaBeta, double middleGammaAlpha, double middleGammaBeta, double leafGammaAlpha, double leafGammaBeta, double samplingGridStart, double samplingGridEnd, int64_t samplingGridLength, char *alphabet) { fprintf(stderr, "Building Nanopore HDP\n"); #pragma omp parallel sections { { fprintf(stderr, "Updating Template HDP from alignments...\n"); NanoporeHDP *nHdpT = loadNanoporeHdpFromScratch(type, templateModelFile, kmerLength, baseGamma, middleGamma, leafGamma, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, alphabet); update_nhdp_from_alignment_with_filter(nHdpT, alignments, FALSE, "t"); fprintf(stderr, "Running Gibbs for template doing %"PRId64"samples, %"PRId64"burn in, %"PRId64"thinning.\n", nbSamples, burnIn, thinning); execute_nhdp_gibbs_sampling(nHdpT, nbSamples, burnIn, thinning, verbose); finalize_nhdp_distributions(nHdpT); fprintf(stderr, "Serializing template to %s...\n", templateHDP); serialize_nhdp(nHdpT, templateHDP); destroy_nanopore_hdp(nHdpT); } #pragma omp section { fprintf(stderr, "Updating Complement HDP from alignments...\n"); NanoporeHDP *nHdpC = loadNanoporeHdpFromScratch(type, complementModelFile, kmerLength, baseGamma, middleGamma, leafGamma, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, alphabet); update_nhdp_from_alignment_with_filter(nHdpC, alignments, FALSE, "c"); fprintf(stderr, "Running Gibbs for complement doing %"PRId64"samples, %"PRId64"burn in, %"PRId64"thinning.\n", nbSamples, burnIn, thinning); execute_nhdp_gibbs_sampling(nHdpC, nbSamples, burnIn, thinning, verbose); finalize_nhdp_distributions(nHdpC); fprintf(stderr, "Serializing complement to %s...\n", complementHDP); serialize_nhdp(nHdpC, complementHDP); destroy_nanopore_hdp(nHdpC); } } } void nanoporeHdp_buildOneDHdpFromAlignment(NanoporeHdpType type, int64_t kmerLength, const char *templateModelFile, const char *alignments, const char *templateHDP, int64_t nbSamples, int64_t burnIn, int64_t thinning, bool verbose, double baseGamma, double middleGamma, double leafGamma, double baseGammaAlpha, double baseGammaBeta, double middleGammaAlpha, double middleGammaBeta, double leafGammaAlpha, double leafGammaBeta, double samplingGridStart, double samplingGridEnd, int64_t samplingGridLength, char *alphabet) { fprintf(stderr, "Updating Template HDP from alignments...\n"); NanoporeHDP *nHdpT = loadNanoporeHdpFromScratch(type, templateModelFile, kmerLength, baseGamma, middleGamma, leafGamma, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, alphabet); update_nhdp_from_alignment_with_filter(nHdpT, alignments, FALSE, "t"); fprintf(stderr, "Running Gibbs for template doing %"PRId64"samples, %"PRId64"burn in, %"PRId64"thinning.\n", nbSamples, burnIn, thinning); execute_nhdp_gibbs_sampling(nHdpT, nbSamples, burnIn, thinning, verbose); finalize_nhdp_distributions(nHdpT); fprintf(stderr, "Serializing template to %s...\n", templateHDP); serialize_nhdp(nHdpT, templateHDP); destroy_nanopore_hdp(nHdpT); }
simde-diagnostic.h
/* SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Copyright: * 2017-2020 Evan Nemerson <evan@nemerson.com> */ /* SIMDe targets a very wide range of standards and compilers, and our * goal is to compile cleanly even with extremely aggressive warnings * (i.e., -Weverything in clang, -Wextra in GCC, /W4 for MSVC, etc.) * treated as errors. * * While our preference is to resolve the underlying issue a given * diagnostic is warning us about, sometimes that's not possible. * Fixing a warning in one compiler may cause problems in another. * Sometimes a warning doesn't really apply to us (false positives), * and sometimes adhering to a warning would mean dropping a feature * we *know* the compiler supports since we have tested specifically * for the compiler or feature. * * When practical, warnings are only disabled for specific code. For * a list of warnings which are enabled by default in all SIMDe code, * see SIMDE_DISABLE_UNWANTED_DIAGNOSTICS. Note that we restore the * warning stack when SIMDe is done parsing, so code which includes * SIMDe is not deprived of these warnings. */ #if !defined(SIMDE_DIAGNOSTIC_H) #define SIMDE_DIAGNOSTIC_H #include "hedley.h" #include "simde-detect-clang.h" /* This is only to help us implement functions like _mm_undefined_ps. */ #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) #undef SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ #endif #if HEDLEY_HAS_WARNING("-Wuninitialized") #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("clang diagnostic ignored \"-Wuninitialized\"") #elif HEDLEY_GCC_VERSION_CHECK(4,2,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") #elif HEDLEY_PGI_VERSION_CHECK(19,10,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("diag_suppress 549") #elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,SEC_UNINITIALIZED_MEM_READ,SEC_UNDEFINED_RETURN_VALUE,unassigned)") #elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,SEC_UNINITIALIZED_MEM_READ,SEC_UNDEFINED_RETURN_VALUE)") #elif HEDLEY_SUNPRO_VERSION_CHECK(5,12,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,unassigned)") #elif \ HEDLEY_TI_VERSION_CHECK(16,9,9) || \ HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,2) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("diag_suppress 551") #elif HEDLEY_INTEL_VERSION_CHECK(13,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("warning(disable:592)") #elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) && !defined(__MSVC_RUNTIME_CHECKS) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ __pragma(warning(disable:4700)) #endif /* GCC emits a lot of "notes" about the ABI being different for things * in newer versions of GCC. We don't really care because all our * functions are inlined and don't generate ABI. */ #if HEDLEY_GCC_VERSION_CHECK(7,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_PSABI_ _Pragma("GCC diagnostic ignored \"-Wpsabi\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PSABI_ #endif /* Since MMX uses x87 FP registers, you're supposed to call _mm_empty() * after each MMX function before any floating point instructions. * Some compilers warn about functions which use MMX functions but * don't call _mm_empty(). However, since SIMDe is implementyng the * MMX API we shouldn't be calling _mm_empty(); we leave it to the * caller to invoke simde_mm_empty(). */ #if HEDLEY_INTEL_VERSION_CHECK(19,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ _Pragma("warning(disable:13200 13203)") #elif defined(HEDLEY_MSVC_VERSION) #define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ __pragma(warning(disable:4799)) #else #define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ #endif /* Intel is pushing people to use OpenMP SIMD instead of Cilk+, so they * emit a diagnostic if you use #pragma simd instead of * #pragma omp simd. SIMDe supports OpenMP SIMD, you just need to * compile with -qopenmp or -qopenmp-simd and define * SIMDE_ENABLE_OPENMP. Cilk+ is just a fallback. */ #if HEDLEY_INTEL_VERSION_CHECK(18,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ _Pragma("warning(disable:3948)") #else #define SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ #endif /* MSVC emits a diagnostic when we call a function (like * simde_mm_set_epi32) while initializing a struct. We currently do * this a *lot* in the tests. */ #if \ defined(HEDLEY_MSVC_VERSION) #define SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ __pragma(warning(disable:4204)) #else #define SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ #endif /* This warning needs a lot of work. It is triggered if all you do is * pass the value to memcpy/__builtin_memcpy, or if you initialize a * member of the union, even if that member takes up the entire union. * Last tested with clang-10, hopefully things will improve in the * future; if clang fixes this I'd love to enable it. */ #if \ HEDLEY_HAS_WARNING("-Wconditional-uninitialized") #define SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ _Pragma("clang diagnostic ignored \"-Wconditional-uninitialized\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ #endif /* This warning is meant to catch things like `0.3 + 0.4 == 0.7`, which * will is false. However, SIMDe uses these operations exclusively * for things like _mm_cmpeq_ps, for which we really do want to check * for equality (or inequality). * * If someone wants to put together a SIMDE_FLOAT_EQUAL(a, op, b) macro * which just wraps a check in some code do disable this diagnostic I'd * be happy to accept it. */ #if \ HEDLEY_HAS_WARNING("-Wfloat-equal") || \ HEDLEY_GCC_VERSION_CHECK(3,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ _Pragma("GCC diagnostic ignored \"-Wfloat-equal\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ #endif /* This is because we use HEDLEY_STATIC_ASSERT for static assertions. * If Hedley can't find an implementation it will preprocess to * nothing, which means there will be a trailing semi-colon. */ #if HEDLEY_HAS_WARNING("-Wextra-semi") #define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ _Pragma("clang diagnostic ignored \"-Wextra-semi\"") #elif HEDLEY_GCC_VERSION_CHECK(8,1,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ _Pragma("GCC diagnostic ignored \"-Wextra-semi\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ #endif /* We do use a few variadic macros, which technically aren't available * until C99 and C++11, but every compiler I'm aware of has supported * them for much longer. That said, usage is isolated to the test * suite and compilers known to support them. */ #if HEDLEY_HAS_WARNING("-Wvariadic-macros") || HEDLEY_GCC_VERSION_CHECK(4,0,0) #if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") #define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ \ _Pragma("clang diagnostic ignored \"-Wvariadic-macros\"") \ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ _Pragma("GCC diagnostic ignored \"-Wvariadic-macros\"") #endif #else #define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ #endif /* emscripten requires us to use a __wasm_unimplemented_simd128__ macro * before we can access certain SIMD intrinsics, but this diagnostic * warns about it being a reserved name. It is a reserved name, but * it's reserved for the compiler and we are using it to convey * information to the compiler. * * This is also used when enabling native aliases since we don't get to * choose the macro names. */ #if HEDLEY_HAS_WARNING("-Wdouble-promotion") #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ _Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ #endif /* clang 3.8 warns about the packed attribute being unnecessary when * used in the _mm_loadu_* functions. That *may* be true for version * 3.8, but for later versions it is crucial in order to make unaligned * access safe. */ #if HEDLEY_HAS_WARNING("-Wpacked") #define SIMDE_DIAGNOSTIC_DISABLE_PACKED_ _Pragma("clang diagnostic ignored \"-Wpacked\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PACKED_ #endif /* Triggered when assigning a float to a double implicitly. We use * explicit casts in SIMDe, this is only used in the test suite. */ #if HEDLEY_HAS_WARNING("-Wdouble-promotion") #define SIMDE_DIAGNOSTIC_DISABLE_DOUBLE_PROMOTION_ _Pragma("clang diagnostic ignored \"-Wdouble-promotion\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_DOUBLE_PROMOTION_ #endif /* Several compilers treat conformant array parameters as VLAs. We * test to make sure we're in C mode (C++ doesn't support CAPs), and * that the version of the standard supports CAPs. We also reject * some buggy compilers like MSVC (the logic is in Hedley if you want * to take a look), but with certain warnings enabled some compilers * still like to emit a diagnostic. */ #if HEDLEY_HAS_WARNING("-Wvla") #define SIMDE_DIAGNOSTIC_DISABLE_VLA_ _Pragma("clang diagnostic ignored \"-Wvla\"") #elif HEDLEY_GCC_VERSION_CHECK(4,3,0) #define SIMDE_DIAGNOSTIC_DISABLE_VLA_ _Pragma("GCC diagnostic ignored \"-Wvla\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_VLA_ #endif #if HEDLEY_HAS_WARNING("-Wused-but-marked-unused") #define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ _Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ #endif #if HEDLEY_HAS_WARNING("-Wunused-function") #define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ _Pragma("clang diagnostic ignored \"-Wunused-function\"") #elif HEDLEY_GCC_VERSION_CHECK(3,4,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ _Pragma("GCC diagnostic ignored \"-Wunused-function\"") #elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) /* Likely goes back further */ #define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ __pragma(warning(disable:4505)) #else #define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ #endif #if HEDLEY_HAS_WARNING("-Wpass-failed") #define SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ _Pragma("clang diagnostic ignored \"-Wpass-failed\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ #endif #if HEDLEY_HAS_WARNING("-Wpadded") #define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ _Pragma("clang diagnostic ignored \"-Wpadded\"") #elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) /* Likely goes back further */ #define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ __pragma(warning(disable:4324)) #else #define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ #endif #if HEDLEY_HAS_WARNING("-Wzero-as-null-pointer-constant") #define SIMDE_DIAGNOSTIC_DISABLE_ZERO_AS_NULL_POINTER_CONSTANT_ _Pragma("clang diagnostic ignored \"-Wzero-as-null-pointer-constant\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_ZERO_AS_NULL_POINTER_CONSTANT_ #endif #if HEDLEY_HAS_WARNING("-Wold-style-cast") #define SIMDE_DIAGNOSTIC_DISABLE_OLD_STYLE_CAST_ _Pragma("clang diagnostic ignored \"-Wold-style-cast\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_OLD_STYLE_CAST_ #endif #if HEDLEY_HAS_WARNING("-Wcast-function-type") || HEDLEY_GCC_VERSION_CHECK(8,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_CAST_FUNCTION_TYPE_ _Pragma("GCC diagnostic ignored \"-Wcast-function-type\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CAST_FUNCTION_TYPE_ #endif /* clang will emit this warning when we use C99 extensions whan not in * C99 mode, even though it does support this. In such cases we check * the compiler and version first, so we know it's not a problem. */ #if HEDLEY_HAS_WARNING("-Wc99-extensions") #define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_ _Pragma("clang diagnostic ignored \"-Wc99-extensions\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_ #endif /* https://github.com/simd-everywhere/simde/issues/277 */ #if defined(HEDLEY_GCC_VERSION) && HEDLEY_GCC_VERSION_CHECK(4,6,0) && !HEDLEY_GCC_VERSION_CHECK(6,4,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ _Pragma("GCC diagnostic ignored \"-Wunused-but-set-variable\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ #endif /* This is the warning that you normally define _CRT_SECURE_NO_WARNINGS * to silence, but you have to do that before including anything and * that would require reordering includes. */ #if defined(_MSC_VER) #define SIMDE_DIAGNOSTIC_DISABLE_ANNEX_K_ __pragma(warning(disable:4996)) #else #define SIMDE_DIAGNOSTIC_DISABLE_ANNEX_K_ #endif /* Some compilers, such as clang, may use `long long` for 64-bit * integers, but `long long` triggers a diagnostic with * -Wc++98-compat-pedantic which says 'long long' is incompatible with * C++98. */ #if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") #define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ #endif /* Some problem as above */ #if HEDLEY_HAS_WARNING("-Wc++11-long-long") #define SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ _Pragma("clang diagnostic ignored \"-Wc++11-long-long\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ #endif /* emscripten emits this whenever stdin/stdout/stderr is used in a * macro. */ #if HEDLEY_HAS_WARNING("-Wdisabled-macro-expansion") #define SIMDE_DIAGNOSTIC_DISABLE_DISABLED_MACRO_EXPANSION_ _Pragma("clang diagnostic ignored \"-Wdisabled-macro-expansion\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_DISABLED_MACRO_EXPANSION_ #endif /* Clang uses C11 generic selections to implement some AltiVec * functions, which triggers this diagnostic when not compiling * in C11 mode */ #if HEDLEY_HAS_WARNING("-Wc11-extensions") #define SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_ _Pragma("clang diagnostic ignored \"-Wc11-extensions\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_ #endif /* Clang sometimes triggers this warning in macros in the AltiVec and * NEON headers, or due to missing functions. */ #if HEDLEY_HAS_WARNING("-Wvector-conversion") #define SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") /* For NEON, the situation with -Wvector-conversion in clang < 10 is * bad enough that we just disable the warning altogether. */ #if defined(SIMDE_ARCH_ARM) && SIMDE_DETECT_CLANG_VERSION_NOT(10,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ #endif #else #define SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ #endif #if !defined(SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_) #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ #endif /* SLEEF triggers this a *lot* in their headers */ #if HEDLEY_HAS_WARNING("-Wignored-qualifiers") #define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ _Pragma("clang diagnostic ignored \"-Wignored-qualifiers\"") #elif HEDLEY_GCC_VERSION_CHECK(4,3,0) #define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ _Pragma("GCC diagnostic ignored \"-Wignored-qualifiers\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ #endif /* GCC emits this under some circumstances when using __int128 */ #if HEDLEY_GCC_VERSION_CHECK(4,8,0) #define SIMDE_DIAGNOSTIC_DISABLE_PEDANTIC_ _Pragma("GCC diagnostic ignored \"-Wpedantic\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PEDANTIC_ #endif /* MSVC doesn't like (__assume(0), code) and will warn about code being * unreachable, but we want it there because not all compilers * understand the unreachable macro and will complain if it is missing. * I'm planning on adding a new macro to Hedley to handle this a bit * more elegantly, but until then... */ #if defined(HEDLEY_MSVC_VERSION) #define SIMDE_DIAGNOSTIC_DISABLE_UNREACHABLE_ __pragma(warning(disable:4702)) #else #define SIMDE_DIAGNOSTIC_DISABLE_UNREACHABLE_ #endif /* This is a false positive from GCC in a few places. */ #if HEDLEY_GCC_VERSION_CHECK(4,7,0) #define SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ #endif #if defined(SIMDE_ENABLE_NATIVE_ALIASES) #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ \ SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ #else #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ #endif #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS \ SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ \ SIMDE_DIAGNOSTIC_DISABLE_PSABI_ \ SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ \ SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ \ SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ \ SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ \ SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ \ SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ \ SIMDE_DIAGNOSTIC_DISABLE_VLA_ \ SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ \ SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ \ SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ \ SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ \ SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ \ SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ \ SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ #endif /* !defined(SIMDE_DIAGNOSTIC_H) */
LAGraph_grread.c
//------------------------------------------------------------------------------ // LAGraph_grread: read a matrix from a binary file //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ //------------------------------------------------------------------------------ // LAGraph_grread: read a matrix from a binary file. // Contributed by Tim Davis, Texas A&M, based on the Galois graph reader // file format. // The file format consists of a header, with the following content: // uint64_t version : either 1 or 2. 1: nodes are 2^32, 2: nodes are // 64 bit. This value is returned to the caller, but is otherwise // unused. // uint64_t esize : the size of the edge weight, as sizeof (edgetype). // For example, if the file contains edge weights of type int32_t, // esize is sizeof (int32_t) == 4. The caller must specify the // corresponding GrB_Type, and its size must match esize. // uint64_t n : the number of node in the graph. The GrB_Matrix is // n-by-n. Rectangular matrices are not supported by this format. // uint64_t e : the number of edges in the graph // This header is followed by a matrix in CSR format: // Gp : an array of size ((n+1) * sizeof (uint64_t)) bytes, but Gp [0] = 0 // does not appear in the file. This section of the file is thus // (n * sizeof (uint64_t)) bytes in length. // Gj : an array of size (e * sizeof (int32_t)), containing the adjaceny // lists. Note that the indices are 32 bit, not 64 bit, and thus // this format is limited to graphs with n < 2^32. // Gx : an array of size (e * esize), containing the edge weights. // LAgraph_grread returns its status: GrB_SUCCESS if succesful, // GrB_OUT_OF_MEMORY if out of memory, GrB_INVALID_VALUE if a file I/O error // occurs or the edge size is not what was expected. #include "LAGraph_internal.h" //------------------------------------------------------------------------------ // gr_header //------------------------------------------------------------------------------ // The gr_header specifies the first 4 * sizeof(uint64_t) bytes of the file. typedef struct { uint64_t version ; // either 1 or 2. // 1: node id's are in the range 0 to 2^32 // 2: node id's are in the range 0 to 2^64 uint64_t esize ; // sizeof (edgetype) uint64_t n ; // # of nodes in the graph uint64_t e ; // # of edges in the graph } gr_header ; //------------------------------------------------------------------------------ // LAGraph_binary_read //------------------------------------------------------------------------------ // Read a block of binary data from a file. Returns GrB_SUCCESS if successful, // GrB_INVALID_VALUE otherwise. static GrB_Info LAGraph_binary_read ( char *name, // name of array being read in FILE *fp, // file to read from void *buffer, // buffer of size nbytes to read into size_t n, // # of elements to read size_t size // size of each element ) { if (fp == NULL) { fprintf (stderr, "LAGraph_grread: file I/O error\n") ; return (GrB_INVALID_VALUE) ; } size_t n_read = fread (buffer, size, n, fp) ; if (n_read != n) { fprintf (stderr, "LAGraph_grread: file I/O error; expected %g items" ", got %g, object %s, size %g\n", (double) n_read, (double) n, name, (double) size) ; return (GrB_INVALID_VALUE) ; } return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // LAGRAPH_FREE_ALL //------------------------------------------------------------------------------ // Free all allocated space; used only for error return. #define LAGRAPH_FREE_ALL \ { \ GrB_free (G) ; \ LAGRAPH_FREE (Gp) ; \ LAGRAPH_FREE (Gj) ; \ LAGRAPH_FREE (Gj_32) ; \ LAGRAPH_FREE (Gx) ; \ if (fp != NULL) fclose (fp) ; \ fp = NULL ; \ } //------------------------------------------------------------------------------ // LAGraph_grread //------------------------------------------------------------------------------ GrB_Info LAGraph_grread // read a matrix from a binary file ( GrB_Matrix *G, // handle of matrix to create uint64_t *G_version, // the version in the file const char *filename, // name of file to open GrB_Type gtype // type of matrix to read, NULL if no edge weights // (in that case, G has type GrB_BOOL with all // edge weights equal to 1). ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; GrB_Index *Gp = NULL ; int32_t *Gj_32 = NULL ; GrB_Index *Gj = NULL ; void *Gx = NULL ; FILE *fp = NULL ; if (G == NULL || G_version == NULL || filename == NULL) { LAGRAPH_ERROR ("invalid input arguments", GrB_NULL_POINTER) ; } (*G) = NULL ; (*G_version) = 0 ; //-------------------------------------------------------------------------- // open the file //-------------------------------------------------------------------------- fp = fopen (filename, "r") ; if (fp == NULL) { fprintf (stderr, "LAGraph_grread: file not found: %s\n", filename) ; LAGRAPH_ERROR ("input file not found", GrB_INVALID_VALUE) ; } //-------------------------------------------------------------------------- // open the file and read the gr_header //-------------------------------------------------------------------------- gr_header header ; LAGRAPH_OK (LAGraph_binary_read ("header", fp, &header, 1, sizeof (gr_header))) ; uint64_t version = header.version ; // version, 1 or 2 uint64_t esize = header.esize ; // sizeof (edge type) uint64_t n = header.n ; // # of nodes uint64_t e = header.e ; // # of edges (*G_version) = version ; size_t esize_expected = 0 ; if (gtype != NULL) { LAGRAPH_OK (GxB_Type_size (&esize_expected, gtype)) ; } if (esize != esize_expected) { fprintf (stderr, "LAGraph_grread: esize in file (%g) does not match" " gtype size (%g)\n", (double) esize, (double) esize_expected) ; LAGRAPH_ERROR ("unexpected edge size", GrB_INVALID_VALUE) ; } if (! (version == 1 || version == 2)) { LAGRAPH_ERROR ("invalid version, must be 1 or 2", GrB_INVALID_VALUE) ; } if (version == 1 && n > UINT32_MAX) { LAGRAPH_ERROR ("problem too large", GrB_INVALID_VALUE) ; } //-------------------------------------------------------------------------- // allocate and read in the pointers //-------------------------------------------------------------------------- Gp = LAGraph_malloc (n+1, sizeof (GrB_Index)) ; if (Gp == NULL) { LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ; } Gp [0] = 0 ; LAGRAPH_OK (LAGraph_binary_read ("pointers", fp, Gp+1, n, sizeof (GrB_Index))) ; //-------------------------------------------------------------------------- // allocate and read in the indices //-------------------------------------------------------------------------- Gj = LAGraph_malloc (e, sizeof (GrB_Index)) ; if (Gj == NULL) { LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ; } if (version == 1) { //---------------------------------------------------------------------- // indices are in 32-bit format in the file //---------------------------------------------------------------------- // allocate workspace for a single chunk #define CHUNK (10 * 1024 * 1024) int64_t chunk = LAGRAPH_MIN (CHUNK, e) ; Gj_32 = LAGraph_malloc (chunk, sizeof (int32_t)) ; if (Gj_32 == NULL) { LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ; } // read in the indices one chunk at a time for (int64_t k = 0 ; k < e ; k += CHUNK) { // read in the next chunk int64_t chunk = LAGRAPH_MIN (CHUNK, e-k) ; LAGRAPH_OK (LAGraph_binary_read ("indices", fp, Gj_32, chunk, sizeof (int32_t))) ; // convert the chunk to 64-bit #pragma omp parallel for schedule(static) for (GrB_Index p = 0 ; p < chunk ; p++) { Gj [k + p] = (GrB_Index) Gj_32 [p] ; } } LAGRAPH_FREE (Gj_32) ; } else { //---------------------------------------------------------------------- // indices are in 64-bit format in the file //---------------------------------------------------------------------- LAGRAPH_OK (LAGraph_binary_read ("indices", fp, Gj, e, sizeof (GrB_Index))) ; } //-------------------------------------------------------------------------- // read in the values //-------------------------------------------------------------------------- bool no_edge_weights = (gtype == NULL) ; if (no_edge_weights) { // the input file has no edge weights gtype = GrB_BOOL ; esize = sizeof (bool) ; } Gx = LAGraph_malloc (e, esize) ; if (Gx == NULL) LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ; if (no_edge_weights) { // set all edge weights to boolean true bool *Gbool = (bool *) Gx ; #pragma omp parallel for schedule(static) for (GrB_Index p = 0 ; p < e ; p++) { Gbool [p] = true ; } } else { // read in the edge weights LAGRAPH_OK (LAGraph_binary_read ("edgeweights", fp, Gx, e, esize)) ; } //-------------------------------------------------------------------------- // import the data into the GrB_Matrix //-------------------------------------------------------------------------- LAGRAPH_OK (GxB_Matrix_import_CSR (G, gtype, n, n, e, -1, &Gp, &Gj, &Gx, NULL)) ; //-------------------------------------------------------------------------- // close the file and return result //-------------------------------------------------------------------------- fclose (fp) ; return (GrB_SUCCESS) ; }
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static int m_maxThreads = -1; EIGEN_UNUSED_VARIABLE(m_maxThreads); if(action==SetAction) { eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} Index volatile sync; int volatile users; Index lhs_start; Index lhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redisigned anyway. EIGEN_UNUSED_VARIABLE(depth); EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // compute the maximal number of threads from the size of the product: // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once. Index size = transpose ? rows : cols; Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr); // compute the maximal number of threads from the total amount of work: double work = static_cast<double>(rows) * static_cast<double>(cols) * static_cast<double>(depth); double kMinTaskSize = 50000; // FIXME improve this heuristic. pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize)); // compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), pb_max_threads); // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session, // then abort multi-threading // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (threads==1) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Eigen::initParallel(); func.initParallelSession(threads); if(transpose) std::swap(rows,cols); ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0); int errorCount = 0; #pragma omp parallel num_threads(threads) reduction(+: errorCount) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads); blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; Index r0 = i*blockRows; Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; info[i].lhs_start = r0; info[i].lhs_length = actualBlockRows; EIGEN_TRY { if(transpose) func(c0, actualBlockCols, 0, rows, info); else func(0, rows, c0, actualBlockCols, info); } EIGEN_CATCH(...) { ++errorCount; } } if (errorCount) printf("assert exception\n"); //EIGEN_THROW_X(Eigen::eigen_assert_exception()); #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
critical2.c
// PASS: * // RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t // RUN: diff <(mpirun -np 4 %t) %s.reference_output #include <omp.h> #include <stdio.h> #include <unistd.h> int main() { int i; #pragma omp parallel shared(i) { int thread = omp_get_thread_num(); if (thread == 0) { sleep(0); } if (thread == 1) { sleep(1); } if (thread == 2) { sleep(4); } if (thread == 3) { sleep(5); } // ordering: 0,1, 2,3 printf("Thread %i before critical\n", thread); #pragma omp critical { if (thread == 0) { sleep(2); } if (thread == 1) { usleep(10); } if (thread == 2) { sleep(2); } if (thread == 3) { usleep(10); } } // ordering: 0,1, 2,3 // if critical does not work 1 will "overtake" 0 same for 3 and 2 printf("Thread %i after critical\n", thread); } }
quantized_conv2d.h
/* Copyright 2018 The Blueoil Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================*/ #ifndef DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED #define DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED #include <vector> #include <memory> #include <stdexcept> #include "global.h" #include "tensor_view.h" #include "tensor_convert.h" #include "operators.h" #include "time_measurement.h" #include "func/impl/quantized_conv2d_tiling.h" #include "func/impl/quantized_conv2d_kn2row.h" #include "func/impl/quantized_conv2d_accelerator.h" #ifdef _OPENMP #include <omp.h> #endif template <typename T_input, MemoryLayout layout_input, typename T_kernel, MemoryLayout layout_kernel> void QuantizedConv2D( const TensorView<QuantizedPacked<T_input>, layout_input>& input, const TensorView<QuantizedPacked<T_kernel>, layout_kernel>& kernel, binary_convolution_parameters p) { Measurement::Start("QuantizedConv2D"); constexpr T_UINT TilingInTypeBitWidth = dlk::impl::tiling_input_elem_t::BitCount; T_UINT kh = p.normal_conv_params.kernel_height; T_UINT kw = p.normal_conv_params.kernel_width; T_UINT padding = p.normal_conv_params.padding; T_UINT ih = p.normal_conv_params.input_height; T_UINT iw = p.normal_conv_params.input_width; T_UINT ic = p.normal_conv_params.input_channels; T_UINT oc = p.normal_conv_params.output_channels; T_UINT maxa = (1 << p.n_bit) - 1; auto size = oc * ih * iw; if (p.device_output_buf == nullptr) p.device_output_buf = new BIN_CONV_OUTPUT[size](); assert(kh == kw); // kernel rectangle must be square assert(kh % 2 == 1); // kernel size must be odd assert(1 <= kh && kh <= 5); // Only 1x1, 3x3, 5x5 are supported assert(ic * kh * kw * maxa <= std::numeric_limits<BIN_CONV_OUTPUT>::max()); // overflow check #ifdef RUN_ON_FPGA dlk::impl::tca_input_t::tensor_info_t<std::size_t> shape = { (ic + QUANTIZED_PACKED::BitCount - 1) / QUANTIZED_PACKED::BitCount, ih, iw, p.bin_input_bitwidth, QUANTIZED_PACKED::BitCount }; dlk::impl::tca_input_t tmp((QUANTIZED_PACKED*)p.device_input_buf, shape); convert_tensor(input, tmp); dlk::impl::TCAConv2d(tmp, kernel, p); #elif defined USE_NEON || defined USE_AVX dlk::impl::tiling_input_t::tensor_info_t<std::size_t> shape = { ic / TilingInTypeBitWidth, ih, iw, p.bin_input_bitwidth, TilingInTypeBitWidth }; dlk::impl::tiling_input_t tmp(reinterpret_cast<dlk::impl::tiling_input_elem_t*>(p.device_input_buf), shape); convert_tensor(input, tmp); dlk::impl::QuantizedConv2DTiling(tmp, kernel, p); #else dlk::impl::kn2row_input_t::tensor_info_t<std::size_t> shape = { ih, iw, ic / QUANTIZED_PACKED::BitCount, p.bin_input_bitwidth, QUANTIZED_PACKED::BitCount }; dlk::impl::kn2row_input_t tmp(reinterpret_cast<QUANTIZED_PACKED*>(p.device_input_buf), shape); convert_tensor(input, tmp); dlk::impl::QuantizedConv2DKn2Row(tmp, kernel, p); #endif Measurement::Stop(); } template <typename T_input, MemoryLayout layout_input, typename T_kernel, MemoryLayout layout_kernel> void func_QuantizedConv2D( const TensorView<QuantizedPacked<T_input>, layout_input>& input, const TensorView<QuantizedPacked<T_kernel>, layout_kernel>& kernel, const TensorView<T_FLOAT, MemoryLayout::NHWC>& output, const T_FLOAT scaling_factor, const binary_convolution_parameters& p) { QuantizedConv2D(input, kernel, p); Measurement::Start("QuantizedConv2D_ApplyScalingFactor"); unsigned out_elems = p.normal_conv_params.output_height * p.normal_conv_params.output_width * p.normal_conv_params.output_channels; // temporary: (2^n - 1) * (max - min) const T_FLOAT post_qtz_factor = 2.0f / 3.0f; const T_FLOAT coeff = scaling_factor * post_qtz_factor; size_t b = 32; auto &ncp(p.normal_conv_params); auto true_out_channels = output.get_shape()[3]; auto channel_blocks = true_out_channels / b; size_t area = ncp.output_height * ncp.output_width; auto out_buf = reinterpret_cast<VOLATILE_IF_FPGA BIN_CONV_OUTPUT*>(p.device_output_buf); #pragma omp parallel for for (size_t hw = 0; hw < area; ++hw) { size_t out_index = hw * true_out_channels; for (size_t s = 0; s < channel_blocks; ++s) for (size_t d = 0; d < b; ++d) output.data()[out_index++] = coeff * out_buf[hw * b + s * (area * b) + d]; for (size_t d = 0; d < true_out_channels - channel_blocks*b; ++d) output.data()[out_index++] = coeff * out_buf[hw * b + channel_blocks * (area * b) + d]; } Measurement::Stop(); } template <typename T_input, MemoryLayout layout_input, typename T_kernel, MemoryLayout layout_kernel> void func_QuantizedConv2D( const TensorView<QuantizedPacked<T_input>, layout_input>& input, const TensorView<QuantizedPacked<T_kernel>, layout_kernel>& kernel, const TensorView<T_FLOAT, MemoryLayout::NHWC>& output, T_FLOAT scaling_factor[], binary_convolution_parameters p) { QuantizedConv2D(input, kernel, p); unsigned out_elems = p.normal_conv_params.output_height * p.normal_conv_params.output_width; unsigned out_channels = p.normal_conv_params.output_channels; size_t b = 32; auto& ncp(p.normal_conv_params); auto true_out_channels = output.get_shape()[3]; auto channel_blocks = true_out_channels / b; // temporary: (2^n - 1) * (max - min) T_FLOAT post_qtz_factor = 2.0 / 3.0; Measurement::Start("QuantizedConv2D_ApplyScalingFactor"); size_t area = ncp.output_height * ncp.output_width; auto out_buf = reinterpret_cast<VOLATILE_IF_FPGA BIN_CONV_OUTPUT*>(p.device_output_buf); #pragma omp parallel for for (size_t hw = 0; hw < area; ++hw) { size_t out_index = hw * true_out_channels; for (size_t s = 0; s < channel_blocks; ++s) for (size_t d = 0; d < b; ++d) output.data()[out_index++] = (scaling_factor[s*b + d] * post_qtz_factor) * out_buf[hw * b + s * (area * b) + d]; for (size_t d = 0; d < true_out_channels - channel_blocks*b; ++d) output.data()[out_index++] = (scaling_factor[channel_blocks*b + d] * post_qtz_factor) * out_buf[hw * b + channel_blocks * (area * b) + d]; } Measurement::Stop(); } template <typename T_input, MemoryLayout layout_input, typename T_kernel, MemoryLayout layout_kernel> void func_QuantizedConv2DWithThreshold( const TensorView<QuantizedPacked<T_input>, layout_input>& input, const TensorView<QuantizedPacked<T_kernel>, layout_kernel>& kernel, const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& output, const T_FLOAT scaling_factor, const binary_convolution_parameters& p) { QuantizedConv2D(input, kernel, p); unsigned out_elems = p.normal_conv_params.output_height * p.normal_conv_params.output_width * p.normal_conv_params.output_channels; const auto bytes = out_elems / 8 * p.n_bit; Measurement::Start("Memcpy"); #ifdef _OPENMP const int num_blocks = bytes / sizeof(QUANTIZED_PACKED); const int num_threads = omp_get_max_threads(); const int chunk_size = (num_blocks + num_threads - 1) / num_threads; #pragma omp parallel for for (int i = 0; i < num_blocks; i += chunk_size) { memcpy(output.data() + i, (QUANTIZED_PACKED*)(p.device_output_buf) + i, std::min(chunk_size, num_blocks - i) * sizeof(QUANTIZED_PACKED)); } #else memcpy(output.data(), (void*)p.device_output_buf, bytes); #endif Measurement::Stop(); } template <typename T_input, MemoryLayout layout_input, typename T_kernel, MemoryLayout layout_kernel> void func_QuantizedConv2DWithThreshold( const TensorView<QuantizedPacked<T_input>, layout_input>& input, const TensorView<QuantizedPacked<T_kernel>, layout_kernel>& kernel, const TensorView<T_FLOAT, MemoryLayout::NHWC>& output, const T_FLOAT scaling_factor, const binary_convolution_parameters& p) { QuantizedConv2D(input, kernel, p); Measurement::Start("linear_to_float"); T_FLOAT n = (1 << p.n_bit) - 1; const auto& np = p.normal_conv_params; const auto out_height = np.output_height; const auto out_width = np.output_width; const auto out_channels = np.output_channels; const auto true_out_channels = output.get_shape()[3]; auto out_buf = reinterpret_cast<VOLATILE_IF_FPGA QUANTIZED_PACKED::base_t*>(p.device_output_buf); for (unsigned r = 0; r < out_height; ++r) { for (unsigned c = 0; c < out_width; ++c) { for (unsigned d = 0; d < true_out_channels; ++d) { const auto i = r * out_width * p.n_bit + c * p.n_bit; QUANTIZED_PACKED::base_t bits = 0; for (unsigned digit = 0; digit < p.n_bit; ++digit) { bits |= ((out_buf[i + digit] >> d) & 1) << digit; } T_FLOAT tmp = (T_FLOAT)bits; tmp = tmp / n; output(0, r, c, d) = tmp * p.max_value; } } } Measurement::Stop(); } template <typename T_input, MemoryLayout layout_input, typename T_kernel, MemoryLayout layout_kernel> void func_QuantizedConv2DWithThreshold( const TensorView<QuantizedPacked<T_input>, layout_input>& input, const TensorView<QuantizedPacked<T_kernel>, layout_kernel>& kernel, const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& output, const T_FLOAT scaling_factor[], const binary_convolution_parameters& p) { func_QuantizedConv2DWithThreshold(input, kernel, output, scaling_factor[0], p); } template <typename T_input, MemoryLayout layout_input, typename T_kernel, MemoryLayout layout_kernel> void func_QuantizedConv2DWithThreshold( const TensorView<T_input, layout_input>& input, const TensorView<T_kernel, layout_kernel>& kernel, const TensorView<T_FLOAT, MemoryLayout::NHWC>& output, T_FLOAT scaling_factor[], binary_convolution_parameters p) { func_QuantizedConv2DWithThreshold(input, kernel, output, scaling_factor[0], p); } #endif // DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED
app.c
/** * Christina Giannoula * cgiannoula: christina.giann@gmail.com */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <dpu.h> #include <dpu_log.h> #include <unistd.h> #include <getopt.h> #include <assert.h> #include <math.h> #include <omp.h> #include "../support/common.h" #include "../support/matrix.h" #include "../support/params.h" #include "../support/partition.h" #include "../support/timer.h" #include "../support/utils.h" // Define the DPU Binary path as DPU_BINARY here. #ifndef DPU_BINARY #define DPU_BINARY "./bin/spmv_dpu" #endif #define DPU_CAPACITY (64 << 20) // A DPU's capacity is 64 MB /* * Main Structures: * 1. Matrices * 2. Input vector * 3. Output vector * 4. Help structures for data partitioning */ static struct BDBCSRMatrix* A; static struct BDCSRMatrix* B; static struct COOMatrix* C; static val_dt* x; static val_dt* y; static val_dt* z; static struct partition_info_t *part_info; /** * @brief Specific information for each DPU */ struct dpu_info_t { uint32_t block_rows_per_dpu; uint32_t prev_block_rows_dpu; uint32_t cols_per_dpu; uint32_t block_start; uint32_t blocks; uint32_t blocks_pad; uint32_t prev_blocks_dpu; uint32_t ptr_offset; uint32_t merge; }; struct dpu_info_t *dpu_info; /** * @brief find the dpus_per_row_partition * @param factor n to create partitions * @param column_partitions to create vert_partitions * @param horz_partitions to return the 2D partitioning */ void find_partitions(uint32_t n, uint32_t *horz_partitions, uint32_t vert_partitions) { uint32_t dpus_per_vert_partition = n / vert_partitions; *horz_partitions = dpus_per_vert_partition; } /** * @brief initialize input vector * @param pointer to input vector and vector size */ void init_vector(val_dt* vec, uint32_t size) { for(unsigned int i = 0; i < size; ++i) { vec[i] = (val_dt) (i%4+1); } } /** * @brief compute output in the host CPU */ static void spmv_host(val_dt* y, struct BDBCSRMatrix *A, val_dt* x) { uint64_t total_blocks = 0; for (uint32_t c = 0; c < A->vert_partitions; c++) { uint32_t ptr_offset = c * (A->num_block_rows + 1); for(uint64_t n=0; n < A->num_block_rows; n++) { for(uint64_t i=A->browptr[ptr_offset + n]; i<A->browptr[ptr_offset + n+1]; i++){ uint64_t j = A->bcolind[total_blocks + i]; for(uint64_t blr=0; blr < A->row_block_size; blr++){ val_dt acc = 0; for(uint64_t blc=0; blc < A->col_block_size; blc++) { acc += A->bval[(total_blocks + i) * A->col_block_size * A->row_block_size + blr * A->col_block_size + blc] * x[A->vert_tile_widths[c] + j * A->col_block_size + blc]; } y[n * A->row_block_size + blr] += acc; } } } total_blocks += A->blocks_per_vert_partition[c]; } } /** * @brief main of the host application */ int main(int argc, char **argv) { struct Params p = input_params(argc, argv); struct dpu_set_t dpu_set, dpu; uint32_t nr_of_dpus; uint32_t nr_of_ranks; // Allocate DPUs and load binary DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set)); DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL)); DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus)); DPU_ASSERT(dpu_get_nr_ranks(dpu_set, &nr_of_ranks)); printf("[INFO] Allocated %d DPU(s)\n", nr_of_dpus); printf("[INFO] Allocated %d Rank(s)\n", nr_of_ranks); printf("[INFO] Allocated %d TASKLET(s) per DPU\n", NR_TASKLETS); unsigned int i; // Initialize input data C = readCOOMatrix(p.fileName); sortCOOMatrix(C); uint32_t horz_partitions = 0; uint32_t vert_partitions = p.vert_partitions; find_partitions(nr_of_dpus, &horz_partitions, p.vert_partitions); printf("[INFO] %dx%d Matrix Partitioning\n\n", horz_partitions, vert_partitions); B = coo2bdcsr(C, horz_partitions, vert_partitions); freeCOOMatrix(C); A = bdcsr2bdbcsr(B, p.row_blsize, p.col_blsize); countNNZperBlockBDBCSRMatrix(A); freeBDCSRMatrix(B); // Initialize partition data part_info = partition_init(A, nr_of_dpus, p.max_nranks, NR_TASKLETS); #if FG_TRANS struct dpu_set_t rank; uint32_t each_rank; DPU_RANK_FOREACH(dpu_set, rank, each_rank){ uint32_t nr_dpus_in_rank; DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank)); part_info->active_dpus_per_rank[each_rank+1] = nr_dpus_in_rank; } int sum = 0; for(int i=0; i < p.max_nranks+1; i++) { part_info->accum_dpus_ranks[i] = part_info->active_dpus_per_rank[i] + sum; sum += part_info->active_dpus_per_rank[i]; } #endif // Initialize help data - Padding needed uint32_t ncols_pad = A->ncols + A->max_tile_width + A->col_block_size; uint32_t tile_width_pad = A->num_block_cols * A->col_block_size; uint32_t nrows_pad = A->nrows + A->row_block_size; if (ncols_pad % (8 / byte_dt) != 0) ncols_pad = ncols_pad + ((8 / byte_dt) - (ncols_pad % (8 / byte_dt))); if (tile_width_pad % (8 / byte_dt) != 0) tile_width_pad = tile_width_pad + ((8 / byte_dt) - (tile_width_pad % (8 / byte_dt))); #if INT8 if (tile_width_pad % 2 != 0) tile_width_pad++; #endif if (nrows_pad % (8 / byte_dt) != 0) nrows_pad = nrows_pad + ((8 / byte_dt) - (nrows_pad % (8 / byte_dt))); // Allocate input vector x = (val_dt *) malloc(ncols_pad * sizeof(val_dt)); // Allocate output vector z = (val_dt *) calloc(nrows_pad, sizeof(val_dt)); // Initialize input vector with arbitrary data init_vector(x, ncols_pad); // Load-balance blocks (block-row granularity) across DPUs of the same vertical partition partition_by_block(A, part_info); // Initialize help data dpu_info = (struct dpu_info_t *) malloc(nr_of_dpus * sizeof(struct dpu_info_t)); dpu_arguments_t *input_args = (dpu_arguments_t *) malloc(nr_of_dpus * sizeof(dpu_arguments_t)); // Max limits for parallel transfers uint64_t max_block_rows_per_dpu = 0; uint64_t max_blocks_per_dpu = 0; // Timer for measurements Timer timer; i = 0; uint32_t acc_blocks = 0; uint32_t total_blocks = 0; DPU_FOREACH(dpu_set, dpu, i) { // Find padding for block rows and non-zero elements needed for CPU-DPU transfers uint32_t tile_horz_indx = i % A->horz_partitions; uint32_t tile_vert_indx = i / A->horz_partitions; uint32_t block_rows_per_dpu = part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx + 1] - part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx]; uint32_t block_rows_per_dpu_pad = part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx + 1] - part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx] + 1; uint32_t prev_block_rows_dpu = part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx]; if (block_rows_per_dpu_pad > max_block_rows_per_dpu) max_block_rows_per_dpu = block_rows_per_dpu_pad; unsigned int blocks, blocks_pad; blocks = A->browptr[tile_vert_indx * (A->num_block_rows + 1) + prev_block_rows_dpu + block_rows_per_dpu] - A->browptr[tile_vert_indx * (A->num_block_rows + 1) + prev_block_rows_dpu]; assert(blocks == part_info->blocks_dpu[i]); if (blocks % 2 != 0) // bcolind blocks_pad = blocks + 1; else blocks_pad = blocks; if (blocks_pad > max_blocks_per_dpu) max_blocks_per_dpu = blocks_pad; // Keep information per DPU dpu_info[i].block_rows_per_dpu = block_rows_per_dpu; dpu_info[i].prev_block_rows_dpu = prev_block_rows_dpu; dpu_info[i].cols_per_dpu = A->vert_tile_widths[tile_vert_indx+1] - A->vert_tile_widths[tile_vert_indx]; dpu_info[i].blocks = blocks; dpu_info[i].blocks_pad = blocks_pad; dpu_info[i].prev_blocks_dpu = total_blocks; dpu_info[i].ptr_offset = tile_vert_indx * (A->num_block_rows + 1) + prev_block_rows_dpu; // Find input arguments per DPU input_args[i].block_rows = block_rows_per_dpu; input_args[i].tcols = tile_width_pad; input_args[i].row_block_size = A->row_block_size; input_args[i].col_block_size = A->col_block_size; //input_args[i].blocks = blocks; #if BLNC_TSKLT_BLOCK // Load-balance blocks across tasklets partition_tsklt_by_block(A, part_info, i, NR_TASKLETS, nr_of_dpus, acc_blocks, prev_block_rows_dpu, block_rows_per_dpu, tile_vert_indx); #else // Load-balance nnzs across tasklets partition_tsklt_by_nnz(A, part_info, i, NR_TASKLETS, nr_of_dpus, acc_blocks, prev_block_rows_dpu, block_rows_per_dpu, tile_vert_indx); #endif uint32_t t; for (t = 0; t < NR_TASKLETS; t++) { // Find input arguments per tasklet input_args[i].start_block_row[t] = part_info->brow_split_tasklet[i * (NR_TASKLETS+2) + t]; input_args[i].end_block_row[t] = part_info->brow_split_tasklet[i * (NR_TASKLETS+2) + (t+1)]; } if (tile_horz_indx == (A->horz_partitions - 1)) acc_blocks += A->blocks_per_vert_partition[tile_vert_indx]; total_blocks += part_info->blocks_dpu[i]; } #if FG_TRANS // Find max number of block rows (subset of elements of the output vector) among DPUs of each rank DPU_RANK_FOREACH(dpu_set, rank, each_rank){ uint32_t max_block_rows_cur_rank = 0; uint32_t max_cols_cur_rank = 0; uint32_t nr_dpus_in_rank; DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank)); uint32_t start_dpu = part_info->accum_dpus_ranks[each_rank]; for (int k = 0; k < nr_dpus_in_rank; k++) { if (start_dpu + k >= nr_of_dpus) break; if (dpu_info[start_dpu + k].block_rows_per_dpu > max_block_rows_cur_rank) max_block_rows_cur_rank = dpu_info[start_dpu + k].block_rows_per_dpu; if (dpu_info[start_dpu + k].cols_per_dpu > max_cols_cur_rank) max_cols_cur_rank = dpu_info[start_dpu + k].cols_per_dpu; } // Padding max_cols_cur_rank = ((max_cols_cur_rank + A->col_block_size - 1) / A->col_block_size) * A->col_block_size; if (max_block_rows_cur_rank % 2 != 0) max_block_rows_cur_rank++; if (max_cols_cur_rank % (8 / byte_dt) != 0) max_cols_cur_rank = max_cols_cur_rank + ((8 / byte_dt) - (max_cols_cur_rank % (8 / byte_dt))); part_info->max_block_rows_per_rank[each_rank] = (uint32_t) max_block_rows_cur_rank; part_info->max_cols_per_rank[each_rank] = (uint32_t) max_cols_cur_rank; } #endif // Initializations for parallel transfers with padding needed if (max_block_rows_per_dpu % 2 != 0) max_block_rows_per_dpu++; if (max_blocks_per_dpu % 2 != 0) max_blocks_per_dpu++; // Re-allocations for padding needed A->browptr = (uint32_t *) realloc(A->browptr, (max_block_rows_per_dpu * nr_of_dpus * sizeof(uint32_t))); A->bcolind = (uint32_t *) realloc(A->bcolind, (max_blocks_per_dpu * nr_of_dpus * sizeof(uint32_t))); A->bval = (val_dt *) realloc(A->bval, (max_blocks_per_dpu * A->row_block_size * A->col_block_size * nr_of_dpus * sizeof(val_dt))); y = (val_dt *) calloc((uint64_t) ((uint64_t) nr_of_dpus * (uint64_t) max_block_rows_per_dpu * A->row_block_size), sizeof(val_dt)); // Count total number of bytes to be transfered in MRAM of DPU unsigned long int total_bytes; total_bytes = ((max_block_rows_per_dpu) * sizeof(uint32_t)) + (max_blocks_per_dpu * sizeof(uint32_t)) + (max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt)) + (tile_width_pad * sizeof(val_dt)) + (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt)); assert(total_bytes <= DPU_CAPACITY && "Bytes needed exceeded MRAM size"); // Copy input arguments to DPUs i = 0; DPU_FOREACH(dpu_set, dpu, i) { input_args[i].max_block_rows = max_block_rows_per_dpu; input_args[i].max_blocks = max_blocks_per_dpu; DPU_ASSERT(dpu_prepare_xfer(dpu, input_args + i)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(dpu_arguments_t), DPU_XFER_DEFAULT)); // Copy input matrix to DPUs startTimer(&timer, 0); // Copy Browptr i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->browptr + dpu_info[i].ptr_offset)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt)), max_block_rows_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT)); // Copy Bcolind i = 0; total_blocks = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->bcolind + total_blocks)); total_blocks += part_info->blocks_dpu[i]; } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_block_rows_per_dpu * sizeof(uint32_t), max_blocks_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT)); // Copy Bvalues i = 0; total_blocks = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->bval + ((uint64_t) total_blocks * A->row_block_size * A->col_block_size))); total_blocks += part_info->blocks_dpu[i]; } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_block_rows_per_dpu * sizeof(uint32_t) + max_blocks_per_dpu * sizeof(uint32_t), max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt), DPU_XFER_DEFAULT)); stopTimer(&timer, 0); // Copy input vector to DPUs startTimer(&timer, 1); #if CG_TRANS // Coarse-grained data transfers in the input vector i = 0; DPU_FOREACH(dpu_set, dpu, i) { uint32_t tile_vert_indx = i / A->horz_partitions; DPU_ASSERT(dpu_prepare_xfer(dpu, x + A->vert_tile_widths[tile_vert_indx])); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), tile_width_pad * sizeof(val_dt), DPU_XFER_DEFAULT)); #endif #if FG_TRANS // Fine-grained data transfers in the input vector at rank granularity i = 0; DPU_FOREACH(dpu_set, dpu, i) { uint32_t tile_vert_indx = i / A->horz_partitions; DPU_ASSERT(dpu_prepare_xfer(dpu, x + A->vert_tile_widths[tile_vert_indx])); } i = 0; //struct dpu_set_t rank; DPU_RANK_FOREACH(dpu_set, rank) { DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), part_info->max_cols_per_rank[i] * sizeof(val_dt), DPU_XFER_ASYNC)); i++; } DPU_ASSERT(dpu_sync(dpu_set)); #endif stopTimer(&timer, 1); // Run kernel on DPUs startTimer(&timer, 2); DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS)); stopTimer(&timer, 2); #if LOG // Display DPU Log (default: disabled) DPU_FOREACH(dpu_set, dpu) { DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout)); } #endif // Retrieve results for output vector from DPUs startTimer(&timer, 3); #if CG_TRANS // Coarse-grained data transfers in the output vector i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size))); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), DPU_XFER_DEFAULT)); #endif #if FG_TRANS // Fine-grained data transfers in the output vector at rank granularity i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size))); } i = 0; DPU_RANK_FOREACH(dpu_set, rank) { DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, part_info->max_block_rows_per_rank[i] * A->row_block_size * sizeof(val_dt), DPU_XFER_ASYNC)); i++; } DPU_ASSERT(dpu_sync(dpu_set)); #endif stopTimer(&timer, 3); // Merge partial results to the host CPU startTimer(&timer, 4); uint32_t r, c, t, b; for (c = 0; c < A->vert_partitions; c++) { for (r = 0; r < A->horz_partitions; r++) { #pragma omp parallel for num_threads(p.nthreads) shared(A, z, y, max_block_rows_per_dpu, r, c) private(t, b) for (t = 0; t < part_info->brow_split[c * (A->horz_partitions + 1) + r+1] - part_info->brow_split[c * (A->horz_partitions + 1) + r]; t++) { for (b = 0; b < A->row_block_size; b++) { z[(part_info->brow_split[c * (A->horz_partitions + 1) + r] + t) * A->row_block_size + b] += y[(c * A->horz_partitions + r) * max_block_rows_per_dpu * A->row_block_size + t * A->row_block_size + b]; } } } } stopTimer(&timer, 4); // Print timing results printf("\n"); printf("Load Matrix "); printTimer(&timer, 0); printf("Load Input Vector "); printTimer(&timer, 1); printf("Kernel "); printTimer(&timer, 2); printf("Retrieve Output Vector "); printTimer(&timer, 3); printf("Merge Partial Results "); printTimer(&timer, 4); printf("\n\n"); #if CHECK_CORR // Check output startTimer(&timer, 4); val_dt *y_host = (val_dt *) calloc(nrows_pad, sizeof(val_dt)); spmv_host(y_host, A, x); bool status = true; i = 0; for (i = 0; i < A->nrows; i++) { if(y_host[i] != z[i]) { status = false; } } if (status) { printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n"); } else { printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n"); } free(y_host); #endif // Deallocation freeBDBCSRMatrix(A); free(x); free(z); free(y); partition_free(part_info); DPU_ASSERT(dpu_free(dpu_set)); return 0; }
GB_unop__identity_int16_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int16_bool // op(A') function: GB_unop_tran__identity_int16_bool // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = (int16_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int16_bool ( int16_t *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gbdt.h
#ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <LightGBM/json11.hpp> #include "score_updater.hpp" #include <cstdio> #include <vector> #include <string> #include <fstream> #include <memory> #include <mutex> #include <map> using namespace json11; namespace LightGBM { /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ virtual bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ virtual const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ virtual int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_preb_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); if (num_iteration > 0) { num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration)); } else { num_preb_in_one_row *= max_iteration; } } else if (is_pred_contrib) { num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_preb_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ virtual bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \return Non-empty string if succeeded */ virtual std::string SaveModelToString(int start_iteration, int num_iterations) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_); } if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ virtual const char* SubModelName() const override { return "tree"; } protected: /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); /*! * \brief Helper function for bagging, used for multi-threading optimization * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BaggingHelper(Random& cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief First order derivative of training data */ std::vector<score_t> gradients_; /*! \brief Secend order derivative of training data */ std::vector<score_t> hessians_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> tmp_indices_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; /*! \brief number of threads */ int num_threads_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> offsets_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_write_pos_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_write_pos_buf_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; std::string loaded_parameter_; Json forced_splits_json_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
SpatialAdaptiveMaxPooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialAdaptiveMaxPooling.c" #else #define START_IND(a,b,c) (int)floor((float)(a * c) / b) #define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b) // #define START_IND(a,b,c) a * c / b // #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0 // 4d tensor B x D x H x W static void THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)( real *input_p, real *output_p, THIndex_t *ind_p, int64_t sizeD, int64_t isizeH, int64_t isizeW, int64_t osizeH, int64_t osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { int64_t d; #pragma omp parallel for private(d) for (d = 0; d < sizeD; d++) { /* loop over output */ int64_t oh, ow; for(oh = 0; oh < osizeH; oh++) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = 0; ow < osizeW; ow++) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; /* local pointers */ real *ip = input_p + d*istrideD + istartH*istrideH + istartW*istrideW; real *op = output_p + d*osizeH*osizeW + oh*osizeW + ow; THIndex_t *indp = ind_p + d*osizeH*osizeW + oh*osizeW + ow; /* compute local max: */ int64_t maxindex = -1; real maxval = -FLT_MAX; int ih, iw; for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { real val = *(ip + ih*istrideH + iw*istrideW); if ((val > maxval) || std::isnan(val)) { maxval = val; maxindex = (ih+istartH)*isizeW + (iw+istartW); } } } /* set output to local max */ *op = maxval; /* store location of max */ *indp = maxindex + TH_INDEX_BASE; } } } } void THNN_(SpatialAdaptiveMaxPooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THIndexTensor *indices, int osizeW, int osizeH) { int dimW = 2; int dimH = 1; int64_t sizeB = 1; int64_t sizeD = 0; int64_t isizeH = 0; int64_t isizeW = 0; int64_t istrideD = 0; int64_t istrideH = 0; int64_t istrideW = 0; int64_t istrideB = 0; real *input_data = nullptr; real *output_data = nullptr; THIndex_t *indices_data = nullptr; THNN_ARGCHECK(!input->is_empty() && (input->dim() == 3 || input->dim() == 4), 2, input, "non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s"); if (input->dim() == 4) { istrideB = input->stride(0); sizeB = input->size(0); dimW++; dimH++; } /* sizes */ sizeD = input->size(dimH-1); isizeH = input->size(dimH); isizeW = input->size(dimW); /* strides */ istrideD = input->stride(dimH-1); istrideH = input->stride(dimH); istrideW = input->stride(dimW); /* resize output */ if (input->dim() == 3) { THTensor_(resize3d)(output, sizeD, osizeH, osizeW); /* indices will contain i,j locations for each output point */ THIndexTensor_(resize3d)(indices, sizeD, osizeH, osizeW); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THIndexTensor_(data)(indices); THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)(input_data, output_data, indices_data, sizeD, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); } else { int64_t b; THTensor_(resize4d)(output, sizeB, sizeD, osizeH, osizeW); /* indices will contain i,j locations for each output point */ THIndexTensor_(resize4d)(indices, sizeB, sizeD, osizeH, osizeW); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THIndexTensor_(data)(indices); #pragma omp parallel for private(b) for (b = 0; b < sizeB; b++) { THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)(input_data+b*istrideB, output_data+b*sizeD*osizeH*osizeW, indices_data+b*sizeD*osizeH*osizeW, sizeD, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); } } } static void THNN_(SpatialAdaptiveMaxPooling_updateGradInput_frame)( real *gradInput_p, real *gradOutput_p, THIndex_t *ind_p, int64_t sizeD, int64_t isizeH, int64_t isizeW, int64_t osizeH, int64_t osizeW) { int64_t d; #pragma omp parallel for private(d) for (d = 0; d < sizeD; d++) { real *gradInput_p_d = gradInput_p + d*isizeH*isizeW; real *gradOutput_p_d = gradOutput_p + d*osizeH*osizeW; THIndex_t *ind_p_d = ind_p + d*osizeH*osizeW; /* calculate max points */ int64_t oh, ow; for(oh = 0; oh < osizeH; oh++) { for(ow = 0; ow < osizeW; ow++) { /* retrieve position of max */ int64_t maxp = ind_p_d[oh*osizeW + ow] - TH_INDEX_BASE; /* update gradient */ gradInput_p_d[maxp] += gradOutput_p_d[oh*osizeW + ow]; } } } } void THNN_(SpatialAdaptiveMaxPooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THIndexTensor *indices) { int dimW = 2; int dimH = 1; int64_t sizeB = 1; int sizeD; int isizeH; int isizeW; int osizeH; int osizeW; real *gradInput_data; real *gradOutput_data; THIndex_t *indices_data; /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); if (input->dim() == 4) { sizeB = input->size(0); dimW++; dimH++; } /* sizes */ sizeD = input->size(dimH-1); isizeH = input->size(dimH); isizeW = input->size(dimW); osizeH = gradOutput->size(dimH); osizeW = gradOutput->size(dimW); /* get raw pointers */ gradInput_data = THTensor_(data)(gradInput); gradOutput_data = THTensor_(data)(gradOutput); indices_data = THIndexTensor_(data)(indices); /* backprop */ if (input->dim() == 3) { THNN_(SpatialAdaptiveMaxPooling_updateGradInput_frame)(gradInput_data, gradOutput_data, indices_data, sizeD, isizeH, isizeW, osizeH, osizeW); } else { int64_t b; #pragma omp parallel for private(b) for (b = 0; b < sizeB; b++) { THNN_(SpatialAdaptiveMaxPooling_updateGradInput_frame)(gradInput_data+b*sizeD*isizeH*isizeW, gradOutput_data+b*sizeD*osizeH*osizeW, indices_data+b*sizeD*osizeH*osizeW, sizeD, isizeH, isizeW, osizeH, osizeW); } } /* cleanup */ THTensor_(free)(gradOutput); } #endif
Partition.h
/* * Partition.h * * Created on: 03.10.2013 * Author: cls */ #ifndef PARTITION_H_ #define PARTITION_H_ #include <cinttypes> #include <set> #include <vector> #include <map> #include <cassert> #include <limits> #include "../Globals.h" namespace NetworKit { /** * @ingroup structures * Implements a partition of a set, i.e. a subdivision of the * set into disjoint subsets. */ class Partition { public: Partition(); /** * Create a new partition data structure for @a z elements. * * @param[in] z maximum index */ Partition(index z); /** * Create a new partition data structure for @a z elements. * Initialize each entry to the default value. * WARNING: this circumvents the standard interface and may leave the object * in an inconsistent state. Use only in exceptional cases. * * @param[in] z maximum index * @param[in] defaultValue */ Partition(index z, index defaultValue); Partition(const std::vector<index>& data); /** * Index operator. * * @param[in] e an element */ inline index& operator [](const index& e) { return this->data[e]; } /** * Index operator for const instances of this class. * * @param[in] e an element */ inline const index& operator [](const index& e) const { return this->data[e]; } /** * Get the set (id) in which the element @a e is contained. * * @param e Index of element. * @return The index of the set in which @a e is contained. */ inline index subsetOf(index e) const { assert (e < this->numberOfElements()); return this->data[e]; } /** * Extend the data structure and create a slot * for one more element. Initializes the entry to none * and returns the index of the entry. */ inline index extend() { data.push_back(none); z++; assert (z == data.size()); //(data.size() - 1) return z-1; } /** * Removes the entry for the given element * by setting it to none. */ inline void remove(index e) { assert (e < z); data[e] = none; } /** * Add a (previously unassigned) element @a e to the set @a s. * * @param s The index of the subset. * @param e The element to add. */ inline void addToSubset(index s, index e) { assert (data[e] == none); // guarantee that element was unassigned assert (s <= omega); // do not create new subset ids data[e] = s; } /** * Move the (previously assigned) element @a e to the set @a s. * * @param s The index of the subset. * @param e The element to move. */ inline void moveToSubset(index s, index e) { assert (this->contains(e)); assert (s <= omega); // do not create new subset ids data[e] = s; } /** * Creates a singleton set containing the element @a e. * * @param e The index of the element. */ inline void toSingleton(index e) { data[e] = newSubsetId(); } /** * Assigns every element to a singleton set. * Set id is equal to element id. */ void allToSingletons(); /** * Assigns every element to the same subset. * Set id is equal to zero. */ void allToOnePartition(); /** * Assigns the elements from both sets to a new set and returns the id of it. * * @param s Set to merge. * @param t Set to merge. * @return Id of newly created set. */ index mergeSubsets(index s, index t); /** * Sets an upper bound for the subset ids that CAN be assigned. * * @param[in] upper highest assigned subset ID + 1 */ inline void setUpperBound(index upper) { this->omega = upper-1; } /** * Return an upper bound for the subset ids that have been assigned. * (This is the maximum id + 1.) * * @return The upper bound. */ inline index upperBound() const { return omega+1; } /** * Get a lower bound for the subset ids that have been assigned. * * @return The lower bound. */ inline index lowerBound() const { return 0; } /** * Change subset IDs to be consecutive, starting at 0. * @param useTurbo Default: false. If set to true, a vector instead of a map to assign new ids * which results in a shorter running time but possibly a large space overhead. */ void compact(bool useTurbo = false); bool isCompact() const { std::vector<bool> id_contained(upperBound(), false); for (index e = 0; e < z; ++e) { id_contained[data[e]] = true; } for (index e = 0; e < upperBound(); ++e) { if (!id_contained[e]) { return false; } } return true; } /** * Check if partition assigns a valid subset to the element @a e. * * @param e The element. * @return @c true if the assigned subset is valid, @c false otherwise. */ inline bool contains(index e) const { return (e < z) && (data[e] != none); // e is in the element index range and the entry is not empty } /** * Check if two elements @a e1 and @a e2 belong to the same subset. * * @param e1 Element. * @param e2 Element. * @return @c true if @a e1 and @a e2 belong to same subset, @c false otherwise. */ inline bool inSameSubset(index e1, index e2) const { assert (data[e1] != none); assert (data[e2] != none); return (data[e1] == data[e2]); } /** * Get a list of subset sizes. Indices do not necessarily correspond to subset ids. * * @return A vector of subset sizes. */ std::vector<count> subsetSizes() const; /** * Get a map from subset id to size of the subset. * * @return A map from subset id to size of the subset. */ std::map<index, count> subsetSizeMap() const; /** * Get the members of the subset @a s. * * @param s The subset. * @return A set containing the members of @a s. */ std::set<index> getMembers(const index s) const; /** * @return number of elements in the partition. */ inline count numberOfElements() const { return z; // z is the maximum element id } /** * Get the current number of sets in this partition. * * @return The current number of sets. */ count numberOfSubsets() const; /** * Get the actual vector representing the partition data structure. * @return vector containing information about partitions. */ std::vector<index> getVector() const; /** * Move the vector representing the partition data structure and leave behind an invalid vector * @return vector containing partition */ std::vector<index> moveVector(); /** * @return the subsets of the partition as a set of sets. */ std::set<std::set<index> > getSubsets() const; /** * Get the ids of nonempty subsets. * * @return A set of ids of nonempty subsets. */ std::set<index> getSubsetIds() const; /** * Set a human-readable identifier @a name for the instance. * * @param name The name. */ inline void setName(std::string name) { this->name = name; } /** * Get the human-readable identifier. * * @return The name of this partition. */ inline std::string getName() const { return this->name; } /** * Iterate over all entries (node, cluster id) and execute callback function @a func (lambda closure). * * @param func Takes parameters <code>(node, index)</code> */ template<typename Callback> void forEntries(Callback func) const; /** * Iterate over all entries (node, cluster id) in parallel and execute callback function @a handle (lambda closure). * * @param handle Takes parameters <code>(node, index)</code> */ template<typename Callback> void parallelForEntries(Callback handle, bool parallel = true) const; private: index z; //!< maximum element index that can be mapped index omega; //!< maximum subset index ever assigned std::vector<index> data; //!< data container, indexed by element index, containing subset index std::string name; /** * Allocates and returns a new subset id. */ inline index newSubsetId() { index s = ++omega; return s; } }; template<typename Callback> inline void Partition::forEntries(Callback handle) const { for (index e = 0; e < this->z; e++) { handle(e, data[e]); } } template<typename Callback> inline void Partition::parallelForEntries(Callback handle, bool parallel) const { #pragma omp parallel for if (parallel) for (omp_index e = 0; e < static_cast<omp_index>(this->z); e++) { handle(e, this->data[e]); } } } /* namespace NetworKit */ #endif /* PARTITION_H_ */
EigenBaseSparseMatrix_MT.h
/// try to add opemp instructions to parallelize the eigen sparse matrix multiplication /// inspired by eigen3.2.0 ConservativeSparseSparseProduct.h & SparseProduct.h /// @warning this will not work with pruning (cf Eigen doc) /// @warning this is based on the implementation of eigen3.2.0, it may be wrong with other versions #ifndef EIGENBASESPARSEMATRIX_MT_H #define EIGENBASESPARSEMATRIX_MT_H namespace sofa { namespace component { namespace linearsolver { template<typename Lhs, typename Rhs, typename ResultType> static void conservative_sparse_sparse_product_MT(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef typename Eigen::internal::remove_all<Lhs>::type::Scalar Scalar; typedef typename Eigen::internal::remove_all<Lhs>::type::Index Index; // make sure to call innerSize/outerSize since we fake the storage order. const Index rows = lhs.innerSize(); const Index cols = rhs.outerSize(); eigen_assert(lhs.outerSize() == rhs.innerSize()); // Eigen::Matrix<bool,Eigen::Dynamic,Eigen::Dynamic> mask(cols,rows); Eigen::Matrix<Scalar,Eigen::Dynamic,Eigen::Dynamic> values(cols,rows); // Eigen::Matrix<Index,Eigen::Dynamic,Eigen::Dynamic> indices(cols,rows); // std::vector< std::vector<Index> > indices(cols); values.setZero(); std::vector<Index> nnz(cols,0); Index total = 0; // we compute each column of the result, one after the other #pragma omp parallel for /*num_threads(8)*/ shared(values,nnz/*,indices*/) reduction(+:total) for (Index j=0; j<cols; ++j) { // printf("Element %d traité par le thread %d \n",j,omp_get_thread_num()); // indices[j].reserve(rows); for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt) { Scalar y = rhsIt.value(); Index k = rhsIt.index(); for (typename Lhs::InnerIterator lhsIt(lhs, k); lhsIt; ++lhsIt) { Index i = lhsIt.index(); Scalar x = lhsIt.value(); // if(!values(j,i)) // { //// mask(j,i) = true; // values(j,i) = x * y; //// indices(j,nnz[j])=i; // indices[j].push_back(i); //// ++nnz[j]; // } // else values(j,i) += x * y; } } // std::sort( indices[j].begin(), indices[j].end() ); for(Index i=0; i<rows; ++i) { if( values(j,i)!=0 ) nnz[j]++; } total += nnz[j]; } // estimate the number of non zero entries // given a rhs column containing Y non zeros, we assume that the respective Y columns // of the lhs differs in average of one non zeros, thus the number of non zeros for // the product of a rhs column with the lhs is X+Y where X is the average number of non zero // per column of the lhs. // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs) // Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros(); // res.setZero(); // res.reserve(Index(estimated_nnz_prod)); // TODO eigen black magic to fill the matrix in parallel // start index for each colum (not parallel) // fill array structure in parallel // for (Index j=0; j<cols; ++j) // { // res.startVec(j); // // unordered insertion //// for(int k=0; k</*nnz[j]*/indices[j].size(); ++k) //// { //// int i = indices[j][k]; //// res.insertBackByOuterInnerUnordered(j,i) = values(j,i); //// } // // ordered insertion //// for(unsigned int k=0; k<indices[j].size(); ++k) //// { //// int i = indices[j][k]; //// res.insertBack(j,i) = values(j,i); //// } // for(Index i=0; i<rows; ++i) // { // if( values(j,i)!=0 ) // res.insertBack(j,i) = values(j,i); // } // } // res.finalize(); // this is where eigen black magic occurs res.makeCompressed(); res.reserve(total); Index* innerIndex = res.innerIndexPtr(); Scalar* value = res.valuePtr(); Index* outerIndex = res.outerIndexPtr(); outerIndex[cols]=total; #pragma omp parallel for shared(values,nnz) for (Index j=0; j<cols; ++j) { outerIndex[j] = 0; for (Index k=0; k<j; ++k) outerIndex[j] += nnz[k]; unsigned localIndex = 0; for(Index i=0; i<rows; ++i) { if( values(j,i)!=0 ) { innerIndex[outerIndex[j]+localIndex] = i; value[outerIndex[j]+localIndex] = values(j,i); ++localIndex; } } } } static const unsigned int RowMajor = Eigen::RowMajor; static const unsigned int ColMajor = Eigen::ColMajor; template<typename Lhs, typename Rhs, typename ResultType, int LhsStorageOrder = (Eigen::internal::traits<Lhs>::Flags&Eigen::RowMajorBit) ? RowMajor : ColMajor, int RhsStorageOrder = (Eigen::internal::traits<Rhs>::Flags&Eigen::RowMajorBit) ? RowMajor : ColMajor, int ResStorageOrder = (Eigen::internal::traits<ResultType>::Flags&Eigen::RowMajorBit) ? RowMajor : ColMajor> struct conservative_sparse_sparse_product_selector_MT; template<typename Lhs, typename Rhs, typename ResultType> struct conservative_sparse_sparse_product_selector_MT<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor> { typedef typename Eigen::internal::remove_all<Lhs>::type LhsCleaned; typedef typename LhsCleaned::Scalar Scalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef Eigen::SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix; typedef Eigen::SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix; ColMajorMatrix resCol(lhs.rows(),rhs.cols()); conservative_sparse_sparse_product_MT<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol); // sort the non zeros: RowMajorMatrix resRow(resCol); res = resRow; } }; template<typename Lhs, typename Rhs, typename ResultType> struct conservative_sparse_sparse_product_selector_MT<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor> { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef Eigen::SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix; RowMajorMatrix rhsRow = rhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); conservative_sparse_sparse_product_MT<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow); res = resRow; } }; template<typename Lhs, typename Rhs, typename ResultType> struct conservative_sparse_sparse_product_selector_MT<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor> { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef Eigen::SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix; RowMajorMatrix lhsRow = lhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); conservative_sparse_sparse_product_MT<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow); res = resRow; } }; template<typename Lhs, typename Rhs, typename ResultType> struct conservative_sparse_sparse_product_selector_MT<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor> { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef Eigen::SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); conservative_sparse_sparse_product_MT<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow); res = resRow; } }; template<typename Lhs, typename Rhs, typename ResultType> struct conservative_sparse_sparse_product_selector_MT<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor> { typedef typename Eigen::internal::traits<typename Eigen::internal::remove_all<Lhs>::type>::Scalar Scalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef Eigen::SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); conservative_sparse_sparse_product_MT<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol); res = resCol; } }; template<typename Lhs, typename Rhs, typename ResultType> struct conservative_sparse_sparse_product_selector_MT<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor> { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef Eigen::SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix; ColMajorMatrix lhsCol = lhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); conservative_sparse_sparse_product_MT<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol); res = resCol; } }; template<typename Lhs, typename Rhs, typename ResultType> struct conservative_sparse_sparse_product_selector_MT<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor> { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef Eigen::SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix; ColMajorMatrix rhsCol = rhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); conservative_sparse_sparse_product_MT<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol); res = resCol; } }; template<typename Lhs, typename Rhs, typename ResultType> struct conservative_sparse_sparse_product_selector_MT<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor> { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef Eigen::SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix; typedef Eigen::SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix; RowMajorMatrix resRow(lhs.rows(),rhs.cols()); conservative_sparse_sparse_product_MT<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow); // sort the non zeros: ColMajorMatrix resCol(resRow); res = resCol; } }; /// Eigen::SparseMatrix multiplication (openmp multithreaded version) /// @warning res MUST NOT be the same variable as lhs or rhs template<typename Lhs, typename Rhs, typename ResultType> void mul_EigenSparseMatrix_MT( ResultType& res, const Lhs& lhs, const Rhs& rhs ) { #ifdef _OPENMP assert( &res != &lhs ); assert( &res != &rhs ); conservative_sparse_sparse_product_selector_MT< Lhs, Rhs, ResultType >::run(lhs, rhs, res); #else res = lhs * rhs; #endif } } } } ///////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////// // SPARSE * DENSE ///////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////// #include <Eigen/Sparse> namespace Eigen { template<typename Lhs, typename Rhs> class SparseTimeDenseProduct_MT; //template<typename Lhs, typename Rhs> class DenseTimeSparseProduct_MT; //template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct_MT; //template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct DenseSparseProductReturnType; template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct SparseDenseProductReturnType_MT; template<typename Lhs, typename Rhs, int InnerSize> struct SparseDenseProductReturnType_MT { typedef SparseTimeDenseProduct_MT<Lhs,Rhs> Type; }; template<typename Lhs, typename Rhs> struct SparseDenseProductReturnType_MT<Lhs,Rhs,1> { // typedef SparseDenseOuterProduct_MT<Lhs,Rhs,false> Type; typedef SparseDenseOuterProduct<Lhs,Rhs,false> Type; }; //template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductReturnType_MT //{ // typedef DenseTimeSparseProduct_MT<Lhs,Rhs> Type; //}; //template<typename Lhs, typename Rhs> struct DenseSparseProductReturnType_MT<Lhs,Rhs,1> //{ // typedef SparseDenseOuterProduct_MT<Rhs,Lhs,true> Type; //}; //namespace internal { //using Eigen::internal::traits; //template<typename Lhs, typename Rhs, bool Tr> //struct traits<SparseDenseOuterProduct_MT<Lhs,Rhs,Tr> > //{ // typedef Sparse StorageKind; // typedef typename scalar_product_traits<typename traits<Lhs>::Scalar, // typename traits<Rhs>::Scalar>::ReturnType Scalar; // typedef typename Lhs::Index Index; // typedef typename Lhs::Nested LhsNested; // typedef typename Rhs::Nested RhsNested; // typedef typename remove_all<LhsNested>::type _LhsNested; // typedef typename remove_all<RhsNested>::type _RhsNested; // enum { // LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost, // RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost, // RowsAtCompileTime = Tr ? int(traits<Rhs>::RowsAtCompileTime) : int(traits<Lhs>::RowsAtCompileTime), // ColsAtCompileTime = Tr ? int(traits<Lhs>::ColsAtCompileTime) : int(traits<Rhs>::ColsAtCompileTime), // MaxRowsAtCompileTime = Tr ? int(traits<Rhs>::MaxRowsAtCompileTime) : int(traits<Lhs>::MaxRowsAtCompileTime), // MaxColsAtCompileTime = Tr ? int(traits<Lhs>::MaxColsAtCompileTime) : int(traits<Rhs>::MaxColsAtCompileTime), // Flags = Tr ? RowMajorBit : 0, // CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + NumTraits<Scalar>::MulCost // }; //}; //} // end namespace internal //template<typename Lhs, typename Rhs, bool Tr> //class SparseDenseOuterProduct_MT // : public SparseMatrixBase<SparseDenseOuterProduct_MT<Lhs,Rhs,Tr> > //{ // public: // typedef SparseMatrixBase<SparseDenseOuterProduct_MT> Base; // EIGEN_DENSE_PUBLIC_INTERFACE(SparseDenseOuterProduct_MT) // typedef internal::traits<SparseDenseOuterProduct_MT> Traits; // private: // typedef typename Traits::LhsNested LhsNested; // typedef typename Traits::RhsNested RhsNested; // typedef typename Traits::_LhsNested _LhsNested; // typedef typename Traits::_RhsNested _RhsNested; // public: // class InnerIterator; // EIGEN_STRONG_INLINE SparseDenseOuterProduct_MT(const Lhs& lhs, const Rhs& rhs) // : m_lhs(lhs), m_rhs(rhs) // { // EIGEN_STATIC_ASSERT(!Tr,YOU_MADE_A_PROGRAMMING_MISTAKE); // } // EIGEN_STRONG_INLINE SparseDenseOuterProduct_MT(const Rhs& rhs, const Lhs& lhs) // : m_lhs(lhs), m_rhs(rhs) // { // EIGEN_STATIC_ASSERT(Tr,YOU_MADE_A_PROGRAMMING_MISTAKE); // } // EIGEN_STRONG_INLINE Index rows() const { return Tr ? m_rhs.rows() : m_lhs.rows(); } // EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : m_rhs.cols(); } // EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } // EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } // protected: // LhsNested m_lhs; // RhsNested m_rhs; //}; //template<typename Lhs, typename Rhs, bool Transpose> //class SparseDenseOuterProduct_MT<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNested::InnerIterator //{ // typedef typename _LhsNested::InnerIterator Base; // typedef typename SparseDenseOuterProduct_MT::Index Index; // public: // EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct_MT& prod, Index outer) // : Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer)) // { // } // inline Index outer() const { return m_outer; } // inline Index row() const { return Transpose ? Base::row() : m_outer; } // inline Index col() const { return Transpose ? m_outer : Base::row(); } // inline Scalar value() const { return Base::value() * m_factor; } // protected: // int m_outer; // Scalar m_factor; //}; namespace internal { template<typename Lhs, typename Rhs> struct traits<SparseTimeDenseProduct_MT<Lhs,Rhs> > : traits<ProductBase<SparseTimeDenseProduct_MT<Lhs,Rhs>, Lhs, Rhs> > { typedef Dense StorageKind; typedef MatrixXpr XprKind; }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor, bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1> struct sparse_time_dense_product_impl_MT; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl_MT<SparseLhsType,DenseRhsType,DenseResType, RowMajor, true> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename Lhs::Index Index; typedef typename Lhs::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, unsigned nbThreads) { #pragma omp parallel for num_threads(nbThreads) //schedule(static,3000) for(Index j=0; j<lhs.outerSize(); ++j) { for(Index c=0; c<rhs.cols(); ++c) { typename Res::Scalar& r = res.coeffRef(j,c); r = 0; for(LhsInnerIterator it(lhs,j); it ;++it) r += it.value() * rhs.coeff(it.index(),c); r *= alpha; } } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl_MT<SparseLhsType,DenseRhsType,DenseResType, ColMajor, true> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::Index Index; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, unsigned nbThreads) { #pragma omp parallel for num_threads(nbThreads) for(Index j=0; j<lhs.outerSize(); ++j) { for(Index c=0; c<rhs.cols(); ++c) { typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c); for(LhsInnerIterator it(lhs,j); it ;++it) res.coeffRef(it.index(),c) += it.value() * rhs_j; } } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl_MT<SparseLhsType,DenseRhsType,DenseResType, RowMajor, false> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::Index Index; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, unsigned nbThreads) { #pragma omp parallel for num_threads(nbThreads) for(Index j=0; j<lhs.outerSize(); ++j) { typename Res::RowXpr res_j(res.row(j)); for(LhsInnerIterator it(lhs,j); it ;++it) res_j += (alpha*it.value()) * rhs.row(it.index()); } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl_MT<SparseLhsType,DenseRhsType,DenseResType, ColMajor, false> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::Index Index; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, unsigned nbThreads) { #pragma omp parallel for num_threads(nbThreads) for(Index j=0; j<lhs.outerSize(); ++j) { typename Rhs::ConstRowXpr rhs_j(rhs.row(j)); for(LhsInnerIterator it(lhs,j); it ;++it) res.row(it.index()) += (alpha*it.value()) * rhs_j; } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType> inline void sparse_time_dense_product_MT(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha, unsigned nbThreads) { sparse_time_dense_product_impl_MT<SparseLhsType,DenseRhsType,DenseResType>::run(lhs, rhs, res, alpha, nbThreads); } } // end namespace internal template<typename Lhs, typename Rhs> class SparseTimeDenseProduct_MT : public ProductBase<SparseTimeDenseProduct_MT<Lhs,Rhs>, Lhs, Rhs> { unsigned m_nbThreads; public: EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseTimeDenseProduct_MT) SparseTimeDenseProduct_MT(const Lhs& lhs, const Rhs& rhs, unsigned nbThreads) : Base(lhs,rhs), m_nbThreads(nbThreads) {} template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const { #ifdef _OPENMP // no multithreading for too small vectors if( ( m_rhs.cols() == 1 && m_rhs.rows()<3000 ) || m_nbThreads==1 ) internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha); else { // std::cerr<<"SparseTimeDenseProduct_MT: "<<m_nbThreads<<std::endl; internal::sparse_time_dense_product_MT<Lhs,Rhs,Dest,Scalar>(m_lhs, m_rhs, dest, alpha, m_nbThreads); } #else internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha); #endif } private: SparseTimeDenseProduct_MT& operator=(const SparseTimeDenseProduct_MT&); }; } // namespace Eigen namespace sofa { namespace component { namespace linearsolver { #ifndef OMP_DEFAULT_NUM_THREADS_EIGEN_SPARSE_DENSE_PRODUCT #define OMP_DEFAULT_NUM_THREADS_EIGEN_SPARSE_DENSE_PRODUCT 1 #endif /// Eigen::Sparse * Dense Matrices multiplication (openmp multi-threaded version) template<typename Derived, typename OtherDerived > inline const typename Eigen::SparseDenseProductReturnType_MT<Derived,OtherDerived>::Type mul_EigenSparseDenseMatrix_MT( const Eigen::SparseMatrixBase<Derived>& lhs, const Eigen::MatrixBase<OtherDerived>& rhs, unsigned nbThreads=OMP_DEFAULT_NUM_THREADS_EIGEN_SPARSE_DENSE_PRODUCT ) { return typename Eigen::SparseDenseProductReturnType_MT<Derived,OtherDerived>::Type( lhs.derived(), rhs.derived(), nbThreads ); } } } } #endif // EIGENBASESPARSEMATRIX_MT_H
mkl_util.h
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_ #define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_ #ifdef INTEL_MKL #include <list> #include <memory> #include <string> #include <unordered_map> #include <utility> #include <vector> #if defined(INTEL_MKL_ML_ONLY) || defined(INTEL_MKL_DNN_ONLY) #ifndef INTEL_MKL #error "INTEL_MKL_{ML,DNN}_ONLY require INTEL_MKL" #endif #endif #if defined(INTEL_MKL_ML_ONLY) && defined(INTEL_MKL_DNN_ONLY) #error "at most one of INTEL_MKL_ML_ONLY and INTEL_MKL_DNN_ONLY may be defined" #endif #ifdef INTEL_MKL_ML_ONLY #error "Please use INTEL MKL DNN (the default option for --config=mkl)." #endif #ifdef INTEL_MKL_ML_ONLY #include "mkl_dnn.h" #include "mkl_dnn_types.h" #include "mkl_service.h" #include "mkl_trans.h" #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/graph/mkl_graph_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/cpu_info.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/env_var.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #ifndef INTEL_MKL_ML_ONLY #include "mkldnn.hpp" #include "tensorflow/core/lib/core/stringpiece.h" using mkldnn::engine; using mkldnn::memory; using mkldnn::padding_kind; using mkldnn::primitive; using mkldnn::reorder; #endif #ifdef _WIN32 typedef unsigned int uint; #endif namespace tensorflow { // The file contains a number of utility classes and functions used by MKL // enabled kernels // This class encapsulates all the meta data that is associated with an MKL // tensor. A tensor is an MKL tensor if it was created as the result of an // MKL operation, and did not go through a conversion to a standard // Tensorflow tensor. // For use with MKL ML, has been deprecated typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims; // The dimensions order that MKL-DNN internally uses for 2D activations // [Batch, Channel, Height, Width] and // for 2D filters [Out_Channel, In_Channel, Height, Width]. typedef enum { Dim_N = 0, Dim_C = 1, Dim_H = 2, Dim_W = 3, Dim_O = 0, Dim_I = 1 } MklDnnDims; // The dimensions order that MKL-DNN internally uses for 3D activations // [Batch, Channel, Depth, Height, Width] and // for 3D filters [Out_Channel, In_Channel, Depth, Height, Width]. typedef enum { Dim3d_N = 0, Dim3d_C = 1, Dim3d_D = 2, Dim3d_H = 3, Dim3d_W = 4, Dim3d_O = 0, Dim3d_I = 1 } MklDnnDims3D; // Enum for the order of dimensions of a TF 2D filter with shape [filter_height, // filter_width, in_channels, out_channels] typedef enum { TF_2DFILTER_DIM_H = 0, TF_2DFILTER_DIM_W = 1, TF_2DFILTER_DIM_I = 2, TF_2DFILTER_DIM_O = 3 } TFFilterDims2d; // Enum for the order of dimensions of a TF 3D filter with shape [filter_depth, // filter_height, filter_width, in_channels, out_channels] typedef enum { TF_3DFILTER_DIM_P = 0, TF_3DFILTER_DIM_H = 1, TF_3DFILTER_DIM_W = 2, TF_3DFILTER_DIM_I = 3, TF_3DFILTER_DIM_O = 4 } TFFilterDims3d; // The dimensions order that MKL-DNN requires for the filter in a grouped // convolution (2D only) typedef enum { MKL_GROUP_FILTER_DIM_G = 0, MKL_GROUP_FILTER_DIM_O = 1, MKL_GROUP_FILTER_DIM_I = 2, MKL_GROUP_FILTER_DIM_H = 3, MKL_GROUP_FILTER_DIM_W = 4 } MklDnnFilterGroupDims; // Enum used to templatize MklOp kernel implementations // that support both fp32 and int8 versions. enum class MklQuantization { QUANTIZED_VERSION, FP_VERSION, }; static const int kSmallBatchSize = 32; #ifdef INTEL_MKL_ML_ONLY class MklShape { public: MklShape() {} TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy ~MklShape() { if (sizes_) delete[] sizes_; if (strides_) delete[] strides_; if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS); if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS); if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_; } const bool IsMklTensor() const { return isMklTensor_; } void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; } void SetDimensions(const size_t dimension) { dimension_ = dimension; } void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; } void SetMklLayout(const void* primitive, size_t resourceType) { CHECK_EQ( dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive, (dnnResourceType_t)resourceType), E_SUCCESS); } void SetTfLayout(const size_t dimension, const size_t* sizes, const size_t* strides) { dimension_ = dimension; if (dimension > 0) { // MKl doesn't support zero dimension tensors sizes_ = new size_t[dimension]; strides_ = new size_t[dimension]; for (int ii = 0; ii < dimension; ii++) { sizes_[ii] = sizes[ii]; strides_[ii] = strides[ii]; } CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides), E_SUCCESS); } } // Default case - MKL dim ordering is opposite of TF dim ordering // MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim // TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim // For layers that rely on data_format semantics (conv, pooling etc.) // or operate only on certain dimensions (relu, concat, split etc.), // Mkl APIs might require us to reorder these dimensions. In such cases, // kernels should explicitly set this map void SetTfDimOrder(const size_t dimension) { CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } for (size_t ii = 0; ii < dimension; ii++) { tf_to_mkl_dim_map_[ii] = dimension - (ii + 1); } } void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) { CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } for (size_t ii = 0; ii < dimension; ii++) { tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii]; } } void SetTfDimOrder(const size_t dimension, TensorFormat data_format) { CHECK_EQ(dimension, 4); CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N; } const dnnLayout_t GetMklLayout() const { return mklLayout_; } const dnnLayout_t GetTfLayout() const { return tfLayout_; } const dnnLayout_t GetCurLayout() const { return isMklTensor_ ? mklLayout_ : tfLayout_; } size_t GetDimension() const { return dimension_; } const size_t* GetSizes() const { return sizes_; } int64 dim_size(int index) const { return sizes_[index]; } int64 tf_dim_size(int index) const { return sizes_[tf_to_mkl_dim_map_[index]]; } const size_t* GetStrides() const { return strides_; } const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; } size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Channel dimension. bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Batch dimension. bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Width dimension. bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Height dimension. bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; } // Check if the TF-Mkl dimension ordering map specifies if the input // tensor is in NCHW format. bool IsTensorInNCHWFormat() const { TensorFormat data_format = FORMAT_NCHW; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } // Check if the TF-Mkl dimension ordering map specifies if the input // tensor is in NHWC format. bool IsTensorInNHWCFormat() const { TensorFormat data_format = FORMAT_NHWC; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } void GetConvertedFlatData(dnnLayout_t targetLayout, void* input, void* output) const { dnnLayout_t curLayout; if (isMklTensor_) curLayout = mklLayout_; else curLayout = tfLayout_; dnnPrimitive_t convert; CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout), E_SUCCESS); CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS); CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS); } // The following methods are used for serializing and de-serializing the // contents of the mklshape object. // The data is serialized in this order // isMklTensor_ // dimension_ // sizes_ // strides_ // mklLayout_ // tfLayout_ // tf_to_mkl_dim_map_ #define SIZE_OF_MKL_DNN_BUF \ (dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to // serialize dnn_layout pointer // Size of buffer to hold the serialized object, the size is computed as // follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) + // sizeof(strides_) // + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer) // + sizeof(tf_to_mkl_dim_map_) #define SIZE_OF_MKL_SERIAL_DATA(dims) \ (2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF) // First we need to define some macro for offsets into the serial buffer where // different elements of Mklshape is written/read from #define IS_MKL_TENSOR_OFFSET 0 // Location from start of buffer where isMklTensor_ is serialized #define DIMS_OFFSET \ (IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_ // Location of sizes. Note dim is not used here, left here // to make macros consistent. #define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t)) #define STRIDES_OFFSET(dims) \ (SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides #define MKL_LAYOUT_OFFSET(dims) \ (STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_ #define TF_LAYOUT_OFFSET(dims) \ (MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_ // Location of tf_to_mkl_dim_map_ #define TF_TO_MKL_DIM_MAP_OFFSET(dims) \ (TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // TODO(agramesh1) make sure to create a const to share with rewrite pass // for min size of MKL metadata tensor. void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) { CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize"; // Make sure buffer holds at least isMklTensor_ isMklTensor_ = *reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0; if (isMklTensor_) { // If it is an MKL Tensor then read the rest dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET)); CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_)) << "Bufsize too small in DeSerialize"; sizes_ = new size_t[dimension_]; strides_ = new size_t[dimension_]; tf_to_mkl_dim_map_ = new size_t[dimension_]; for (int i = 0; i < dimension_; i++) { sizes_[i] = reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i]; strides_[i] = reinterpret_cast<const size_t*>( buf + STRIDES_OFFSET(dimension_))[i]; tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>( buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i]; } CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_, buf + MKL_LAYOUT_OFFSET(dimension_)), E_SUCCESS); CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)), E_SUCCESS); } } void SerializeMklShape(unsigned char* buf, size_t buf_size) const { CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_)) << "Bufsize too small to Serialize"; *reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) = isMklTensor_ ? 1 : 0; if (isMklTensor_) { *(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_; for (int i = 0; i < dimension_; i++) { reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] = sizes_[i]; reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] = strides_[i]; reinterpret_cast<size_t*>(buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] = tf_to_mkl_dim_map_[i]; } CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_, buf + MKL_LAYOUT_OFFSET(dimension_)), E_SUCCESS); CHECK_EQ( dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)), E_SUCCESS); } } private: bool isMklTensor_ = false; // Flag to indicate if the tensor is an MKL tensor or not dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding // Tensorflow tensor, used when conversion from MKL to standard tensor size_t dimension_ = 0; size_t* sizes_ = nullptr; // Required by MKL for conversions size_t* strides_ = nullptr; // Required by MKL for conversions size_t* tf_to_mkl_dim_map_ = nullptr; // TF dimension corresponding to this MKL dimension }; #else // Forward decl TensorFormat MklDnn3DDataFormatToTFDataFormat(memory::format format); TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format); memory::dims CalculateTFStrides(const memory::dims& dims_tf_order); memory::desc CreateBlockedMemDescHelper(const memory::dims& dim, const memory::dims& strides, memory::data_type dtype); class MklDnnShape { private: typedef struct { /// Flag to indicate if the tensor is an MKL tensor or not bool is_mkl_tensor_ = false; /// Number of dimensions in Tensorflow format size_t dimension_ = 0; /// Required by MKLDNN for conversions mkldnn_dims_t sizes_; // Required by MKL for conversions memory::format tf_data_format_ = memory::format::format_undef; memory::data_type T_ = memory::data_type::data_undef; // MKL layout mkldnn_memory_desc_t mkl_md_; /// TF dimension corresponding to this MKL dimension mkldnn_dims_t map_; } MklShapeData; MklShapeData data_; typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t; #define INVALID_DIM_SIZE -1 public: MklDnnShape() { for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]); ++i) { data_.sizes_[i] = -1; } for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) { data_.map_[i] = -1; } } ~MklDnnShape() {} TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy /// Helper function to compare memory::desc objects for MklDnn. /// May be this should go into MklDnn directly. inline bool CompareMklDnnLayouts(const memory::desc& md1, const memory::desc& md2) const { mkldnn_memory_desc_t mdd1 = md1.data; mkldnn_memory_desc_t mdd2 = md2.data; const char* d1 = reinterpret_cast<const char*>(&mdd1); const char* d2 = reinterpret_cast<const char*>(&mdd2); size_t md_size = sizeof(mdd1); for (size_t i = 0; i < md_size; i++) { if (*d1++ != *d2++) { return false; } } return true; } /// Equality function for MklDnnShape objects /// @return true if both are equal; false otherwise. inline bool operator==(const MklDnnShape& input_shape) const { if (this->IsMklTensor() != input_shape.IsMklTensor()) { return false; } // If input tensors are in Mkl layout, then we check for dimensions and // sizes. if (this->IsMklTensor()) { return this->GetTfShape() == input_shape.GetTfShape() && CompareMklDnnLayouts(this->GetMklLayout(), input_shape.GetMklLayout()); } return true; } /// Equality operator for MklDnnShape and TFShape. /// Returns: true if TF shapes for both are the same, false otherwise inline bool operator==(const TensorShape& input_shape) const { if (!this->IsMklTensor()) { return false; } return this->GetTfShape() == input_shape; } inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; } inline void SetMklTensor(bool is_mkl_tensor) { data_.is_mkl_tensor_ = is_mkl_tensor; } inline void SetDimensions(const size_t dimension) { data_.dimension_ = dimension; } inline size_t GetDimension(char dimension) const { int index = GetMklDnnTensorDimIndex(dimension); CHECK(index >= 0 && index < this->GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return this->DimSize(index); } inline size_t GetDimension3D(char dimension) const { int index = GetMklDnnTensor3DDimIndex(dimension); CHECK(index >= 0 && index < this->GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return this->DimSize(index); } inline int32 GetMklDnnTensorDimIndex(char dimension) const { switch (dimension) { case 'N': return MklDnnDims::Dim_N; case 'C': return MklDnnDims::Dim_C; case 'H': return MklDnnDims::Dim_H; case 'W': return MklDnnDims::Dim_W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } inline int32 GetMklDnnTensor3DDimIndex(char dimension) const { switch (dimension) { case 'N': return MklDnnDims3D::Dim3d_N; case 'C': return MklDnnDims3D::Dim3d_C; case 'D': return MklDnnDims3D::Dim3d_D; case 'H': return MklDnnDims3D::Dim3d_H; case 'W': return MklDnnDims3D::Dim3d_W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } inline size_t GetDimension() const { return data_.dimension_; } inline const int* GetSizes() const { return reinterpret_cast<const int*>(&data_.sizes_[0]); } // Returns an mkldnn::memory::dims object that contains the sizes of this // MklDnnShape object. inline memory::dims GetSizesAsMklDnnDims() const { memory::dims retVal; if (data_.is_mkl_tensor_) { size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]); for (size_t i = 0; i < dimensions; i++) { if (data_.sizes_[i] != INVALID_DIM_SIZE) retVal.push_back(data_.sizes_[i]); } } else { CHECK_EQ(data_.is_mkl_tensor_, true); } return retVal; } inline int64 DimSize(int index) const { CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0])); return data_.sizes_[index]; } /// Return TensorShape that describes the Tensorflow shape of the tensor /// represented by this MklShape. inline TensorShape GetTfShape() const { CHECK_EQ(data_.is_mkl_tensor_, true); std::vector<int32> shape(data_.dimension_, -1); if (data_.tf_data_format_ != memory::format::blocked) { for (size_t idx = 0; idx < data_.dimension_; ++idx) { shape[idx] = data_.sizes_[TfDimIdx(idx)]; } } else { // If Tensorflow shape is in Blocked format, then we don't have dimension // map for it. So we just create Tensorflow shape from sizes in the // specified order. for (size_t idx = 0; idx < data_.dimension_; ++idx) { shape[idx] = data_.sizes_[idx]; } } TensorShape ts; bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok(); CHECK_EQ(ret, true); return ts; } inline void SetElemType(memory::data_type dt) { data_.T_ = dt; } inline const memory::data_type GetElemType() { return data_.T_; } inline void SetMklLayout(memory::primitive_desc* pd) { CHECK_NOTNULL(pd); data_.mkl_md_ = pd->desc().data; } inline void SetMklLayout(memory::desc* md) { CHECK_NOTNULL(md); data_.mkl_md_ = md->data; } inline const memory::desc GetMklLayout() const { return memory::desc(data_.mkl_md_); } inline memory::format GetTfDataFormat() const { return data_.tf_data_format_; } /// We don't create primitive_descriptor for TensorFlow layout now. /// We use lazy evaluation and create it only when needed. Input format can /// also be Blocked format. inline void SetTfLayout(size_t dims, const memory::dims& sizes, memory::format format) { CHECK_EQ(dims, sizes.size()); data_.dimension_ = dims; for (size_t ii = 0; ii < dims; ii++) { data_.sizes_[ii] = sizes[ii]; } data_.tf_data_format_ = format; if (format != memory::format::blocked) { SetTfDimOrder(dims, format); } } inline const memory::desc GetTfLayout() const { memory::dims dims; for (size_t ii = 0; ii < data_.dimension_; ii++) { dims.push_back(data_.sizes_[ii]); } // Create Blocked memory desc if input TF format was set like that. if (data_.tf_data_format_ == memory::format::blocked) { auto strides = CalculateTFStrides(dims); return CreateBlockedMemDescHelper(dims, strides, data_.T_); } else { return memory::desc(dims, data_.T_, data_.tf_data_format_); } } inline const memory::desc GetCurLayout() const { return IsMklTensor() ? GetMklLayout() : GetTfLayout(); } // nhasabni - I've removed SetTfDimOrder that was setting default order in // case of MKL-ML. We don't need a case of default dimension order because // when an operator that does not get data_format attribute gets all inputs // in Tensorflow format, it will produce output in Tensorflow format. inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) { CHECK(dimension == data_.dimension_); for (size_t ii = 0; ii < dimension; ii++) { data_.map_[ii] = map[ii]; } } inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) { if (dimension == 5) { CHECK(dimension == data_.dimension_); data_.map_[GetTensorDimIndex<3>(data_format, '0')] = MklDnnDims3D::Dim3d_D; data_.map_[GetTensorDimIndex<3>(data_format, '1')] = MklDnnDims3D::Dim3d_H; data_.map_[GetTensorDimIndex<3>(data_format, '2')] = MklDnnDims3D::Dim3d_W; data_.map_[GetTensorDimIndex<3>(data_format, 'C')] = MklDnnDims3D::Dim3d_C; data_.map_[GetTensorDimIndex<3>(data_format, 'N')] = MklDnnDims3D::Dim3d_N; } else { CHECK_EQ(dimension, 4); CHECK(dimension == data_.dimension_); data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W; data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H; data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C; data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N; } } inline void SetTfDimOrder(const size_t dimension, memory::format format) { TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format); SetTfDimOrder(dimension, data_format); } inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; } inline size_t TfDimIdx(int index) const { return data_.map_[index]; } inline int64 TfDimSize(int index) const { return data_.sizes_[TfDimIdx(index)]; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Channel dimension. inline bool IsMklChannelDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_C; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Batch dimension. inline bool IsMklBatchDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_N; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Width dimension. inline bool IsMklWidthDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_W; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Height dimension. inline bool IsMklHeightDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_H; } /// Check if the TF-Mkl dimension ordering map specifies if the input /// tensor is in NCHW format. inline bool IsTensorInNCHWFormat() const { TensorFormat data_format = FORMAT_NCHW; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } /// Check if the TF-Mkl dimension ordering map specifies if the input /// tensor is in NHWC format. inline bool IsTensorInNHWCFormat() const { TensorFormat data_format = FORMAT_NHWC; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } /// The following methods are used for serializing and de-serializing the /// contents of the mklshape object. /// The data is serialized in this order /// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_; /// Size of buffer to hold the serialized object, the size is computed by /// following above mentioned order inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); } void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const { CHECK(buf_size >= GetSerializeBufferSize()) << "Buffer size is too small to SerializeMklDnnShape"; *reinterpret_cast<MklShapeData*>(buf) = data_; } void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) { // Make sure buffer holds at least is_mkl_tensor_. CHECK(buf_size >= sizeof(data_.is_mkl_tensor_)) << "Buffer size is too small in DeSerializeMklDnnShape"; const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf); if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest CHECK(buf_size >= GetSerializeBufferSize()) << "Buffer size is too small in DeSerializeMklDnnShape"; data_ = *reinterpret_cast<const MklShapeData*>(buf); } } }; #endif // List of MklShape objects. Used in Concat/Split layers. #ifndef INTEL_MKL_ML_ONLY typedef std::vector<MklDnnShape> MklDnnShapeList; #else typedef std::vector<MklShape> MklShapeList; #endif #ifdef INTEL_MKL_ML_ONLY // Check if all tensors specified by MklShapes are MKL tensors. inline bool AreAllMklTensors(const MklShapeList& shapes) { for (auto& s : shapes) { if (!s.IsMklTensor()) { return false; } } return true; } template <typename T> inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, const MklShape& mkl_shape) { Tensor output_tensor; TensorShape output_shape; for (size_t j = 0; j < mkl_shape.GetDimension(); j++) { // Outermost to innermost dimension output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]); } // Allocate output tensor. context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor); dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout()); void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data()); void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data()); if (mkl_tensor.NumElements() != 0) { mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer); } return output_tensor; } #else using mkldnn::stream; template <typename T> class MklDnnData; template <typename T> inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, const MklDnnShape& mkl_shape) { Tensor output_tensor; try { if (!mkl_shape.IsMklTensor()) return mkl_tensor; // return input since it is already TF tensor TensorShape output_shape = mkl_shape.GetTfShape(); // Allocate output tensor. context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor); auto cpu_engine = engine(engine::cpu, 0); MklDnnData<T> input(&cpu_engine); // Get Mkl layout of input tensor. auto input_mkl_md = mkl_shape.GetMklLayout(); auto output_tf_md = mkl_shape.GetTfLayout(); auto output_tf_pd = memory::primitive_desc(output_tf_md, cpu_engine); input.SetUsrMem(input_mkl_md, &mkl_tensor); // reorder if (input.IsReorderNeeded(output_tf_pd)) { std::vector<primitive> net; CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, &output_tensor, &net), true); stream(stream::kind::eager).submit(net).wait(); } else { // If not, just forward input tensor to output tensor. CHECK(output_tensor.CopyFrom(mkl_tensor, output_shape)); } } catch (mkldnn::error& e) { string error_msg = "Status: " + std::to_string(e.status) + ", message: " + string(e.message) + ", in file " + string(__FILE__) + ":" + std::to_string(__LINE__); LOG(FATAL) << "Operation received an exception: " << error_msg; } return output_tensor; } #endif // Get the MKL shape from the second string tensor #ifdef INTEL_MKL_ML_ONLY inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) { mklshape->DeSerializeMklShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .data(), ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .size() * sizeof(uint8)); } #else inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) { mklshape->DeSerializeMklDnnShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .data(), ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .size() * sizeof(uint8)); } #endif // Gets the actual input inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) { return ctext->input(GetTensorDataIndex(n, ctext->num_inputs())); } inline void GetMklInputList(OpKernelContext* ctext, StringPiece name, OpInputList* input_tensors) { CHECK_NOTNULL(input_tensors); ctext->input_list(name, input_tensors); } #ifdef INTEL_MKL_ML_ONLY inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklShapeList* mkl_shapes) { OpInputList input_mkl_tensors; GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors); for (int i = 0; i < input_mkl_tensors.size(); i++) { (*mkl_shapes)[i].DeSerializeMklShape( input_mkl_tensors[i].flat<uint8>().data(), input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8)); } } #else inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklDnnShapeList* mkl_shapes) { OpInputList input_mkl_tensors; GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors); for (int i = 0; i < input_mkl_tensors.size(); i++) { (*mkl_shapes)[i].DeSerializeMklDnnShape( input_mkl_tensors[i].flat<uint8>().data(), input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8)); } } #endif #ifndef INTEL_MKL_ML_ONLY /// Get shape of input tensor pointed by 'input_idx' in TensorShape format. /// If the input tensor is in MKL layout, then obtains TensorShape from /// MklShape. inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) { // Sanity check. CHECK_NOTNULL(context); CHECK_LT(input_idx, context->num_inputs()); MklDnnShape input_mkl_shape; GetMklShape(context, input_idx, &input_mkl_shape); if (input_mkl_shape.IsMklTensor()) { return input_mkl_shape.GetTfShape(); } else { const Tensor& t = MklGetInput(context, input_idx); return t.shape(); } } #endif #ifdef INTEL_MKL_ML_ONLY // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, const MklShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension())); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #else // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, const MklDnnShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(mkl_shape.GetSerializeBufferSize()); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklDnnShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #endif #ifdef INTEL_MKL_ML_ONLY // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, Tensor** output, const TensorShape& tf_shape, const MklShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension())); OP_REQUIRES_OK( ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()), tf_shape, output)); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #else // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, Tensor** output, const TensorShape& tf_shape, const MklDnnShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(mkl_shape.GetSerializeBufferSize()); OP_REQUIRES_OK( ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()), tf_shape, output)); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklDnnShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #endif // Allocates a temp tensor and returns the data buffer for temporary storage. // Currently #ifndef INTEL_MKL_ML_ONLY template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, const memory::primitive_desc& pd, void** buf_out) { TensorShape tf_shape; tf_shape.AddDim(pd.get_size() / sizeof(T) + 1); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), tf_shape, tensor_out)); *buf_out = static_cast<void*>(tensor_out->flat<T>().data()); } #else inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, dnnLayout_t lt_buff, void** buf_out) { TensorShape tf_shape; tf_shape.AddDim( dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) / sizeof(float) + 1); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(), tf_shape, tensor_out)); *buf_out = static_cast<void*>(tensor_out->flat<float>().data()); } #endif template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, TensorShape tf_shape) { OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), tf_shape, tensor_out)); } inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides, const size_t* sizes) { // MKL requires strides in NCHW if (data_format == FORMAT_NHWC) { strides[0] = sizes[2]; strides[1] = sizes[0] * sizes[2]; strides[2] = 1; strides[3] = sizes[0] * sizes[1] * sizes[2]; } else { strides[0] = 1; strides[1] = sizes[0]; strides[2] = sizes[0] * sizes[1]; strides[3] = sizes[0] * sizes[1] * sizes[2]; } } #ifdef INTEL_MKL_ML_ONLY inline void MklSizesToTFSizes(OpKernelContext* context, TensorFormat data_format_, const MklShape& mkl_shape, TensorShape* tf_shape) { size_t tf_dim = mkl_shape.GetDimension(); const size_t* tf_sizes = mkl_shape.GetSizes(); OP_REQUIRES(context, tf_dim == 4, errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim")); std::vector<int32> sizes; sizes.push_back(tf_sizes[3]); if (data_format_ == FORMAT_NHWC) { sizes.push_back(tf_sizes[1]); sizes.push_back(tf_sizes[0]); sizes.push_back(tf_sizes[2]); } else { sizes.push_back(tf_sizes[2]); sizes.push_back(tf_sizes[1]); sizes.push_back(tf_sizes[0]); } OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape)); } #endif inline int32 GetMklTensorDimIndex(char dimension) { switch (dimension) { case 'N': return MklDims::N; case 'C': return MklDims::C; case 'H': return MklDims::H; case 'W': return MklDims::W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } #ifdef INTEL_MKL_ML_ONLY inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) { int index = GetMklTensorDimIndex(dimension); CHECK(index >= 0 && index < mkl_shape.GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return mkl_shape.dim_size(index); } #endif inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); const Tensor& meta = context->input(idx_meta_in); Tensor output(data.dtype()); Tensor meta_output(meta.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, data.shape())); CHECK(meta_output.CopyFrom(meta, meta.shape())); context->set_output(idx_data_out, output); context->set_output(idx_meta_out, meta_output); } #ifdef INTEL_MKL_ML_ONLY inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); Tensor output(data.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, shape)); context->set_output(idx_data_out, output); } #else inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); MklDnnShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); Tensor output(data.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, shape)); context->set_output(idx_data_out, output); } #endif #ifdef INTEL_MKL_ML_ONLY inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #else inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); MklDnnShape dnn_shape_output; dnn_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, dnn_shape_output); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #endif inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); context->set_output(idx_meta_out, context->input(idx_meta_in)); } } #ifndef INTEL_MKL_ML_ONLY // Set a dummy MKLDNN shape (called when the output is in TF format) inline void SetDummyMklDnnShapeOutput(OpKernelContext* context, uint32 idx_data_out) { MklDnnShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output); } inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context, int idx_in, int idx_out, const MklDnnShape& mkl_shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); AllocateOutputSetMklShape(context, idx_out, mkl_shape); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #endif // Forward the MKL shape ONLY (used in elementwise and other ops where // we call the eigen implementation and MKL shape is not used) inline void ForwardMklMetaDataInToOut(OpKernelContext* context, uint32 idx_data_in, uint32_t idx_data_out) { uint32 idx_meta_in = GetTensorMetaDataIndex(idx_data_in, context->num_inputs()); uint32 idx_meta_out = GetTensorMetaDataIndex(idx_data_out, context->num_outputs()); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out); } else { context->set_output(idx_meta_out, context->input(idx_meta_in)); } } #ifdef INTEL_MKL_ML_ONLY // Set a dummy MKL shape (called when the output is in TF format) inline void SetDummyMklShapeOutput(OpKernelContext* context, uint32 idx_data_out) { MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output); } // We don't need these functions in MKLDNN. We have defined equality operator // on MklDnnShape class directly. // Checks if the TF shape for both MKL tensors is the same or not // Returns: true if both TF shapes are the same, false otherwise inline bool MklCompareShapes(const MklShape* input_shape_0, const MklShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->GetDimension(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const MklShape* input_shape_0, const TensorShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->GetDimension() != input_shape_1->dims()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->GetDimension(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const TensorShape* input_shape_0, const MklShape* input_shape_1) { return MklCompareShapes(input_shape_1, input_shape_0); } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const TensorShape* input_shape_0, const TensorShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->dims() != input_shape_1->dims()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->dims(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // These functions do not compile with MKL-DNN since mkl.h is missing. // We may need to remove them later. // TODO(intel_tf): Remove this routine when faster MKL layout conversion is // out. inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) { const float* buf_in = input.flat<float>().data(); float* buf_out = (*output)->flat<float>().data(); int64 N = input.dim_size(0); int64 H = input.dim_size(1); int64 W = input.dim_size(2); int64 C = input.dim_size(3); int64 stride_n = H * W * C; #pragma omp parallel for num_threads(16) for (int64 n = 0; n < N; ++n) { mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C, buf_out + n * stride_n, H * W); } } inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) { const float* buf_in = input.flat<float>().data(); float* buf_out = (*output)->flat<float>().data(); int64 N = (*output)->dim_size(0); int64 H = (*output)->dim_size(1); int64 W = (*output)->dim_size(2); int64 C = (*output)->dim_size(3); int64 stride_n = H * W * C; #pragma omp parallel for num_threads(16) for (int64 n = 0; n < N; ++n) { mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W, buf_out + n * stride_n, C); } } #endif // ------------------------------------------------------------------- #ifndef INTEL_MKL_ML_ONLY /// Return MKL-DNN data type (memory::data_type) for input type T /// /// @input None /// @return memory::data_type corresponding to type T template <typename T> static memory::data_type MklDnnType(); /// Instantiation for float type. Add similar instantiations for other /// type if needed. template <> memory::data_type MklDnnType<float>() { return memory::data_type::f32; } template <> memory::data_type MklDnnType<quint8>() { return memory::data_type::u8; } template <> memory::data_type MklDnnType<qint8>() { return memory::data_type::s8; } template <> memory::data_type MklDnnType<qint32>() { return memory::data_type::s32; } /// Map TensorFlow's data format into MKL-DNN 3D data format /// @input: TensorFlow data format /// @return: memory::format corresponding to TensorFlow data format; /// Fails with an error if invalid data format. inline memory::format TFDataFormatToMklDnn3DDataFormat(TensorFormat format) { if (format == FORMAT_NHWC) return memory::format::ndhwc; else if (format == FORMAT_NCHW) return memory::format::ncdhw; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); return memory::format::format_undef; } /// Map TensorFlow's data format into MKL-DNN data format /// /// @input: TensorFlow data format /// @return: memory::format corresponding to TensorFlow data format; /// Fails with an error if invalid data format. inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) { if (format == FORMAT_NHWC) return memory::format::nhwc; else if (format == FORMAT_NCHW) return memory::format::nchw; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); return memory::format::format_undef; } /// Map MKL-DNN data format to TensorFlow's data format /// /// @input: memory::format /// @return: Tensorflow data format corresponding to memory::format /// Fails with an error if invalid data format. inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) { if (format == memory::format::nhwc || format == memory::format::ndhwc) return FORMAT_NHWC; else if (format == memory::format::nchw || format == memory::format::ncdhw) return FORMAT_NCHW; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); // Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure // that we don't come here. return FORMAT_NHWC; } /// Map TensorShape object into memory::dims required by MKL-DNN /// /// This function will simply map input TensorShape into MKL-DNN dims /// naively. So it will preserve the order of dimensions. E.g., if /// input tensor is in NHWC format, then dims will be in NHWC format /// also. /// /// @input TensorShape object in shape /// @return memory::dims corresponding to TensorShape inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) { memory::dims dims(shape.dims()); for (int d = 0; d < shape.dims(); ++d) { dims[d] = shape.dim_size(d); } return dims; } /// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN /// /// This function is a specific one than above function. It will map input /// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the /// order of dimensions. E.g., if input tensor is in NHWC format, then dims /// will be in NCHW format, and not in NHWC format. /// /// @input TensorShape object in shape /// @return memory::dims in MKL-DNN required NCHW format inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnnDataFormat(format), memory::format::format_undef); int n = shape.dim_size(GetTensorDimIndex(format, 'N')); int c = shape.dim_size(GetTensorDimIndex(format, 'C')); int h = shape.dim_size(GetTensorDimIndex(format, 'H')); int w = shape.dim_size(GetTensorDimIndex(format, 'W')); // MKL-DNN requires dimensions in NCHW format. return memory::dims({n, c, h, w}); } inline memory::dims TFShapeToMklDnnDimsInNCDHW(const TensorShape& shape, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnn3DDataFormat(format), memory::format::format_undef); int n = shape.dim_size(GetTensorDimIndex<3>(format, 'N')); int c = shape.dim_size(GetTensorDimIndex<3>(format, 'C')); int d = shape.dim_size(GetTensorDimIndex<3>(format, '0')); int h = shape.dim_size(GetTensorDimIndex<3>(format, '1')); int w = shape.dim_size(GetTensorDimIndex<3>(format, '2')); // MKL-DNN requires dimensions in NCDHW format. return memory::dims({n, c, d, h, w}); } /// Overloaded version of function above. Input parameters are /// self-explanatory. inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnnDataFormat(format), memory::format::format_undef); int n = in_dims[GetTensorDimIndex(format, 'N')]; int c = in_dims[GetTensorDimIndex(format, 'C')]; int h = in_dims[GetTensorDimIndex(format, 'H')]; int w = in_dims[GetTensorDimIndex(format, 'W')]; // MKL-DNN requires dimensions in NCHW format. return memory::dims({n, c, h, w}); } /// Map MklDnn memory::dims object into TensorShape object. /// /// This function will simply map input shape in MKL-DNN memory::dims format /// in Tensorflow's TensorShape object by preserving dimension order. /// /// @input MKL-DNN memory::dims object /// @output TensorShape corresponding to memory::dims inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) { std::vector<int32> shape(dims.size(), -1); for (int d = 0; d < dims.size(); d++) { shape[d] = dims[d]; } TensorShape ret; CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true); return ret; } /// Function to calculate strides given tensor shape in Tensorflow order /// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention, /// dimension with size 1 is outermost dimension; while dimension with size 4 is /// innermost dimension. So strides for this tensor would be {4 * 3 * 2, /// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}. /// /// @input Tensorflow shape in memory::dims type /// @return memory::dims containing strides for the tensor. inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) { CHECK_GT(dims_tf_order.size(), 0); memory::dims strides(dims_tf_order.size()); int last_dim_idx = dims_tf_order.size() - 1; strides[last_dim_idx] = 1; for (int d = last_dim_idx - 1; d >= 0; d--) { strides[d] = strides[d + 1] * dims_tf_order[d + 1]; } return strides; } inline padding_kind TFPaddingToMklDnnPadding(Padding pad) { // MKL-DNN only supports zero padding. return padding_kind::zero; } /// Helper function to create memory descriptor in Blocked format /// /// @input: Tensor dimensions /// @input: strides corresponding to dimensions. One can use utility /// function such as CalculateTFStrides to compute strides /// for given dimensions. /// @return: memory::desc object corresponding to blocked memory format /// for given dimensions and strides. inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim, const memory::dims& strides, memory::data_type dtype) { CHECK_EQ(dim.size(), strides.size()); // We have to construct memory descriptor in a C style. This is not at all // ideal but MKLDNN does not offer any API to construct descriptor in // blocked format except a copy constructor that accepts // mkldnn_memory_desc_t. mkldnn_memory_desc_t md; md.primitive_kind = mkldnn_memory; md.ndims = dim.size(); md.format = mkldnn_blocked; md.data_type = memory::convert_to_c(dtype); for (size_t i = 0; i < dim.size(); i++) { md.layout_desc.blocking.block_dims[i] = 1; md.layout_desc.blocking.strides[1][i] = 1; md.layout_desc.blocking.strides[0][i] = strides[i]; md.layout_desc.blocking.padding_dims[i] = dim[i]; md.layout_desc.blocking.offset_padding_to_data[i] = 0; md.dims[i] = dim[i]; } md.layout_desc.blocking.offset_padding = 0; return memory::desc(md); } template <typename T> inline primitive FindOrCreateReorder(const memory* from, const memory* to); /* * Class to represent all the resources corresponding to a tensor in TensorFlow * that are required to execute an operation (such as Convolution). */ template <typename T> class MklDnnData { private: /// MKL-DNN memory primitive for input user memory memory* user_memory_; /// MKL-DNN memory primitive in case input or output reorder is needed. memory* reorder_memory_; /// Operations memory descriptor memory::desc* op_md_; // flat to indicate if data is 3D or not. bool bIs3D; /// Operations temp buffer void* allocated_buffer_; /// CPU engine on which operation will be executed const engine* cpu_engine_; public: explicit MklDnnData(const engine* e) : user_memory_(nullptr), reorder_memory_(nullptr), op_md_(nullptr), allocated_buffer_(nullptr), cpu_engine_(e) {} ~MklDnnData() { if (allocated_buffer_ != nullptr) { cpu_allocator()->DeallocateRaw(allocated_buffer_); } cpu_engine_ = nullptr; // We don't own this. delete (user_memory_); delete (reorder_memory_); delete (op_md_); } inline void* GetTensorBuffer(const Tensor* tensor) const { CHECK_NOTNULL(tensor); return const_cast<void*>( static_cast<const void*>(tensor->flat<T>().data())); } void SetIs3DData(bool bIs3D_) { bIs3D = bIs3D_; } bool GetIs3D() { return bIs3D; } /// Set user memory primitive using specified dimensions, memory format and /// data_buffer. Function automatically uses element data type by using /// input type T used for creating call object. /// /// In a nutshell, function allows user to describe the input tensor to /// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and /// memory format HWIO, and the buffer that contains actual values is /// pointed by data_buffer. inline void SetUsrMem(const memory::dims& dim, memory::format fm, void* data_buffer = nullptr) { auto md = memory::desc(dim, MklDnnType<T>(), fm); SetUsrMem(md, data_buffer); } inline void SetUsrMem(const memory::dims& dim, memory::format fm, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(dim, fm, GetTensorBuffer(tensor)); } /// Helper function to create memory descriptor in Blocked format /// /// @input: Tensor dimensions /// @input: strides corresponding to dimensions. One can use utility /// function such as CalculateTFStrides to compute strides /// for given dimensions. /// @return: memory::desc object corresponding to blocked memory format /// for given dimensions and strides. static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim, const memory::dims& strides) { return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>()); } /// A version of SetUsrMem call that allows user to create memory in blocked /// format. So in addition to accepting dimensions, it also accepts strides. /// This allows user to create memory for tensor in a format that is not /// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6 /// dimensional tensor as a native format. But by using blocked format, a user /// can create memory for 6D tensor. inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides, void* data_buffer = nullptr) { CHECK_EQ(dim.size(), strides.size()); auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides); SetUsrMem(blocked_md, data_buffer); } inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(dim, strides, GetTensorBuffer(tensor)); } /// A version of function to set user memory primitive that accepts memory /// descriptor directly, instead of accepting dimensions and format. This /// function is more generic that the one above, but the function above is /// sufficient in most cases. inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) { auto pd = memory::primitive_desc(md, *cpu_engine_); SetUsrMem(pd, data_buffer); } /// A version of SetUsrMem with memory descriptor and tensor inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(md, GetTensorBuffer(tensor)); } /// A version of function to set user memory primitive that accepts primitive /// descriptor directly, instead of accepting dimensions and format. This /// function is more generic that the one above, but the function above is /// sufficient in most cases. inline void SetUsrMem(const memory::primitive_desc& pd, void* data_buffer = nullptr) { CHECK_NOTNULL(cpu_engine_); if (user_memory_) delete user_memory_; // TODO(nhasabni): can we remove dynamic memory allocation? if (data_buffer) { user_memory_ = new memory(pd, data_buffer); } else { user_memory_ = new memory(pd); } } /// A version of SetUsrMem with primitive descriptor and tensor inline void SetUsrMem(const memory::primitive_desc& pd, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(pd, GetTensorBuffer(tensor)); } /// Get function for user memory primitive. inline const memory* GetUsrMem() const { return user_memory_; } /// Get function for primitive descriptor of user memory primitive. inline const memory::primitive_desc GetUsrMemPrimDesc() const { CHECK_NOTNULL(user_memory_); return user_memory_->get_primitive_desc(); } /// Get function for descriptor of user memory. inline memory::desc GetUsrMemDesc() { // This is ugly. Why MKL-DNN does not provide desc() method of const type?? const memory::primitive_desc pd = GetUsrMemPrimDesc(); return const_cast<memory::primitive_desc*>(&pd)->desc(); } /// Get function for data buffer of user memory primitive. inline void* GetUsrMemDataHandle() const { CHECK_NOTNULL(user_memory_); return user_memory_->get_data_handle(); } /// Set function for data buffer of user memory primitive. inline void SetUsrMemDataHandle(void* data_buffer) { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(data_buffer); user_memory_->set_data_handle(data_buffer); } /// Set function for data buffer of user memory primitive. inline void SetUsrMemDataHandle(const Tensor* tensor) { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(tensor); user_memory_->set_data_handle(GetTensorBuffer(tensor)); } /// allocate function for data buffer inline void AllocateBuffer(size_t size) { const int64 kMemoryAlginment = 64; // For AVX512 memory alignment. allocated_buffer_ = cpu_allocator()->AllocateRaw(kMemoryAlginment, size); } inline void* GetAllocatedBuffer() { return allocated_buffer_; } /// Get the memory primitive for input and output of an op. If inputs /// to an op require reorders, then this function returns memory primitive /// for reorder. Otherwise, it will return memory primitive for user memory. /// /// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to /// execute Conv2D, we need memory primitive for I and F. Buf if reorder is /// required for I and F (say I_r is reorder primitive for I; F_r is reorder /// primitive for F), then we need I_r and F_r to perform Conv2D. inline const memory& GetOpMem() const { return reorder_memory_ ? *reorder_memory_ : *user_memory_; } /// Set memory descriptor of an operation in terms of dimensions and memory /// format. E.g., For Conv2D, the dimensions would be same as user dimensions /// but memory::format would be mkldnn::any because we want MKL-DNN to choose /// best layout/format for given input dimensions. inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) { // TODO(nhasabni): can we remove dynamic memory allocation? op_md_ = new memory::desc(dim, MklDnnType<T>(), fm); } /// Get function for memory descriptor for an operation inline const memory::desc& GetOpMemDesc() const { return *op_md_; } /// Predicate that checks if we need to reorder user's memory into memory /// pointed by op_pd. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @return: true in case reorder of input is needed; false, otherwise. inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const { CHECK_NOTNULL(user_memory_); return op_pd != user_memory_->get_primitive_desc(); } /// Predicate that checks if we need to reorder user's memory into memory /// based on the provided format. /// /// @input: target_format - memory format of the given input of an /// operation /// @return: true in case reorder of input is needed; false, otherwise. inline bool IsReorderNeeded(const memory::format& target_format) const { CHECK_NOTNULL(user_memory_); return target_format != user_memory_->get_primitive_desc().desc().data.format; } /// Function to create a reorder from memory pointed by from to memory pointed /// by to. Returns created primitive. inline primitive CreateReorder(const memory* from, const memory* to) const { CHECK_NOTNULL(from); CHECK_NOTNULL(to); return reorder(*from, *to); } /// Function to handle input reordering /// /// Check if we need to reorder this input of an operation. /// Return true and allocate reorder memory primitive if reorder is needed. /// Otherwise, return false and do not allocate reorder memory primitive. /// /// To check if reorder is needed, this function compares memory primitive /// descriptor of an operation (op_pd) for the given input with the /// user-specified memory primitive descriptor. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd); net->push_back(CreateReorder(user_memory_, reorder_memory_)); return true; } return false; } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd) { CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately reorder_memory_ = new memory(op_pd); std::vector<primitive> net; net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_)); stream(stream::kind::eager).submit(net).wait(); return true; } return false; } /// Overloaded version of above function that accepts memory buffer /// where output of reorder needs to be stored. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @reorder_data_handle - memory buffer where output of reorder needs to be /// stored. Primitive does not check if buffer is /// enough size to write. /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, void* reorder_data_handle, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(reorder_data_handle); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd, reorder_data_handle); net->push_back(CreateReorder(user_memory_, reorder_memory_)); return true; } return false; } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, void* reorder_data_handle) { CHECK_NOTNULL(reorder_data_handle); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately std::vector<primitive> net; reorder_memory_ = new memory(op_pd, reorder_data_handle); net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_)); stream(stream::kind::eager).submit(net).wait(); return true; } return false; } /// Another overloaded version of CheckReorderToOpMem that accepts Tensor /// where output of reorder needs to be stored. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @reorder_tensor - Tensor whose buffer is to be used to store output of /// reorder. Primitive does not check if buffer is /// enough size to write. /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, Tensor* reorder_tensor, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(reorder_tensor); return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net); } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, Tensor* reorder_tensor) { CHECK_NOTNULL(reorder_tensor); return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor)); } /// Function to handle output reorder /// /// This function performs very similar functionality as input reordering /// function above. The only difference is that this function does not add /// reorder primitive to the net. The reason for this is: the reorder /// primitive for output needs to be added to the list only after operation /// has executed. But we need to prepare a temporary buffer in case output /// reorder is needed. And this temporary buffer will hold the output of /// an operation before it is fed to reorder primitive. /// /// @input memory primitive descriptor for the given output of an operation /// @return: true in case reorder of output is needed; false, otherwise. inline bool PrepareReorderToUserMemIfReq( const memory::primitive_desc& op_pd) { CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd); return true; } return false; } /// Function to actually insert reorder primitive in the net /// /// This function completes remaining part of output reordering. It inserts /// a reordering primitive from the temporary buffer that holds the output /// to the user-specified output buffer. /// /// @input: net - net to which to add reorder primitive inline void InsertReorderToUserMem(std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(reorder_memory_); net->push_back(CreateReorder(reorder_memory_, user_memory_)); } /// TODO: this is a faster path with reorder primitive cache compared with /// InsertReorderToUserMem(std::vector<primitive>* net), will remove /// slow path in the future inline void InsertReorderToUserMem() { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(reorder_memory_); // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately std::vector<primitive> net; net.push_back(FindOrCreateReorder<T>(reorder_memory_, user_memory_)); stream(stream::kind::eager).submit(net).wait(); } }; /// Base class for operations with reuse of primitives /// class MklPrimitive { public: virtual ~MklPrimitive() {} // Dummy data which MKL DNN never operates on unsigned char* DummyData = nullptr; }; const mkldnn::memory::dims NONE_DIMS = {}; // // LRUCache is a class which implements LRU (Least Recently Used) cache. // The implementation is similar to that of // tensorflow/core/platform/cloud/expiring_lru_cache.h // without its thread-safe part because the cache is supposed to be // used as thread local (for instance, MklPrimitive caching). // // The LRU list maintains objects in chronological order based on // creation time, with the least recently accessed object at the // tail of LRU list, while the most recently accessed object // at the head of LRU list. // // This class is used to maintain an upper bound on the total number of // cached items. When the cache reaches its capacity, the LRU item will // be removed and replaced by a new one from SetOp call. // template <typename T> class LRUCache { public: explicit LRUCache(size_t capacity) { capacity_ = capacity; Clear(); } T* GetOp(const string& key) { auto it = cache_.find(key); if (it == cache_.end()) { return nullptr; } // Move to the front of LRU list as the most recently accessed. lru_list_.erase(it->second.lru_iterator); lru_list_.push_front(it->first); it->second.lru_iterator = lru_list_.begin(); return it->second.op; } void SetOp(const string& key, T* op) { if (lru_list_.size() >= capacity_) { Delete(); } // Insert an entry to the front of the LRU list lru_list_.push_front(key); Entry entry(op, lru_list_.begin()); cache_.emplace(std::make_pair(key, std::move(entry))); } void Clear() { if (lru_list_.empty()) return; // Clean up the cache cache_.clear(); lru_list_.clear(); } private: struct Entry { // The entry's value. T* op; // A list iterator pointing to the entry's position in the LRU list. std::list<string>::iterator lru_iterator; // Constructor Entry(T* op, std::list<string>::iterator it) { this->op = op; this->lru_iterator = it; } // Move construcctor Entry(Entry&& source) noexcept : lru_iterator(std::move(source.lru_iterator)) { op = std::move(source.op); source.op = std::forward<T*>(nullptr); } // Destructor ~Entry() { if (op != nullptr) delete op; } }; // Remove the least recently accessed entry from LRU list, which // is the tail of lru_list_. Update cache_ correspondingly. bool Delete() { if (lru_list_.empty()) return false; string key = lru_list_.back(); lru_list_.pop_back(); cache_.erase(key); return true; } // Cache capacity size_t capacity_; // The cache, a map from string key to a LRU entry. std::unordered_map<string, Entry> cache_; // The LRU list of entries. // The front of the list contains the key of the most recently accessed // entry, while the back of the list is the least recently accessed entry. std::list<string> lru_list_; }; template <typename T> class MklPrimitiveFactory { public: MklPrimitiveFactory() {} ~MklPrimitiveFactory() {} MklPrimitive* GetOp(const string& key) { auto& lru_cache = MklPrimitiveFactory<T>::GetLRUCache(); return lru_cache.GetOp(key); } void SetOp(const string& key, MklPrimitive* op) { auto& lru_cache = MklPrimitiveFactory<T>::GetLRUCache(); lru_cache.SetOp(key, op); } /// Function to decide whether HW has AVX512 or AVX2 /// For those legacy device(w/o AVX512 and AVX2), /// MKL-DNN GEMM will be used. static inline bool IsLegacyPlatform() { return (!port::TestCPUFeature(port::CPUFeature::AVX512F) && !port::TestCPUFeature(port::CPUFeature::AVX2)); } /// Fuction to check whether primitive memory optimization is enabled static inline bool IsPrimitiveMemOptEnabled() { bool is_primitive_mem_opt_enabled = true; TF_CHECK_OK(ReadBoolFromEnvVar("TF_MKL_OPTIMIZE_PRIMITIVE_MEMUSE", true, &is_primitive_mem_opt_enabled)); return is_primitive_mem_opt_enabled; } private: static inline LRUCache<MklPrimitive>& GetLRUCache() { static const int kCapacity = 1024; // cache capacity static thread_local LRUCache<MklPrimitive> lru_cache_(kCapacity); return lru_cache_; } }; // utility class for creating keys of MKL primitive pool. class FactoryKeyCreator { public: FactoryKeyCreator() { key_.reserve(kMaxKeyLength); } ~FactoryKeyCreator() {} void AddAsKey(const string& str) { Append(str); } void AddAsKey(const mkldnn::memory::dims& dims) { for (unsigned int i = 0; i < dims.size(); i++) { AddAsKey<int>(dims[i]); } } template <typename T> void AddAsKey(const T data) { auto buffer = reinterpret_cast<const char*>(&data); Append(StringPiece(buffer, sizeof(T))); } string GetKey() { return key_; } private: string key_; const char delimiter = 'x'; const int kMaxKeyLength = 256; void Append(StringPiece s) { key_.append(string(s)); key_.append(1, delimiter); } }; static inline memory::format get_desired_format(int channel, bool is_2d = true) { memory::format fmt_desired = memory::format::any; if (port::TestCPUFeature(port::CPUFeature::AVX512F)) { fmt_desired = is_2d ? memory::format::nChw16c : memory::format::nCdhw16c; } else if (port::TestCPUFeature(port::CPUFeature::AVX2) && (channel % 8) == 0) { fmt_desired = is_2d ? memory::format::nChw8c : memory::format::ncdhw; // no avx2 support for 3d yet. } else { fmt_desired = is_2d ? memory::format::nchw : memory::format::ncdhw; } return fmt_desired; } class MklReorderPrimitive : public MklPrimitive { public: explicit MklReorderPrimitive(const memory* from, const memory* to) { Setup(from, to); } ~MklReorderPrimitive() {} std::shared_ptr<primitive> GetPrimitive() { return context_.reorder_prim; } void SetMemory(const memory* from, const memory* to) { context_.src_mem->set_data_handle(from->get_data_handle()); context_.dst_mem->set_data_handle(to->get_data_handle()); } private: struct ReorderContext { std::shared_ptr<mkldnn::memory> src_mem; std::shared_ptr<mkldnn::memory> dst_mem; std::shared_ptr<primitive> reorder_prim; ReorderContext() : src_mem(nullptr), dst_mem(nullptr), reorder_prim(nullptr) {} } context_; engine cpu_engine_ = engine(engine::cpu, 0); void Setup(const memory* from, const memory* to) { context_.src_mem.reset(new memory( {from->get_primitive_desc().desc(), cpu_engine_}, DummyData)); context_.dst_mem.reset( new memory({to->get_primitive_desc().desc(), cpu_engine_}, DummyData)); context_.reorder_prim = std::make_shared<mkldnn::reorder>( reorder(*context_.src_mem, *context_.dst_mem)); } }; template <typename T> class MklReorderPrimitiveFactory : public MklPrimitiveFactory<T> { public: static MklReorderPrimitive* Get(const memory* from, const memory* to) { auto reorderPrim = static_cast<MklReorderPrimitive*>( MklReorderPrimitiveFactory<T>::GetInstance().GetReorder(from, to)); if (reorderPrim == nullptr) { reorderPrim = new MklReorderPrimitive(from, to); MklReorderPrimitiveFactory<T>::GetInstance().SetReorder(from, to, reorderPrim); } reorderPrim->SetMemory(from, to); return reorderPrim; } static MklReorderPrimitiveFactory& GetInstance() { static MklReorderPrimitiveFactory instance_; return instance_; } private: MklReorderPrimitiveFactory() {} ~MklReorderPrimitiveFactory() {} static string CreateKey(const memory* from, const memory* to) { string prefix = "reorder"; FactoryKeyCreator key_creator; auto const& from_desc = from->get_primitive_desc().desc().data; auto const& to_desc = to->get_primitive_desc().desc().data; const int KIdxFirstStride = 0; memory::dims from_dims(from_desc.dims, &from_desc.dims[from_desc.ndims]); memory::dims to_dims(to_desc.dims, &to_desc.dims[to_desc.ndims]); memory::dims from_strides( from_desc.layout_desc.blocking.strides[KIdxFirstStride], &from_desc.layout_desc.blocking .strides[KIdxFirstStride][from_desc.ndims]); memory::dims to_strides( to_desc.layout_desc.blocking.strides[KIdxFirstStride], &to_desc.layout_desc.blocking.strides[KIdxFirstStride][to_desc.ndims]); key_creator.AddAsKey(prefix); key_creator.AddAsKey(static_cast<int>(from_desc.format)); key_creator.AddAsKey(static_cast<int>(from_desc.data_type)); key_creator.AddAsKey(from_dims); key_creator.AddAsKey(from_strides); key_creator.AddAsKey(static_cast<int>(to_desc.format)); key_creator.AddAsKey(static_cast<int>(to_desc.data_type)); key_creator.AddAsKey(to_dims); key_creator.AddAsKey(to_strides); return key_creator.GetKey(); } MklPrimitive* GetReorder(const memory* from, const memory* to) { string key = CreateKey(from, to); return this->GetOp(key); } void SetReorder(const memory* from, const memory* to, MklPrimitive* op) { string key = CreateKey(from, to); this->SetOp(key, op); } }; /// Fuction to find(or create) a reorder from memory pointed by /// from to memory pointed by to, it will created primitive or /// get primitive from pool if it is cached. /// Returns the primitive. template <typename T> inline primitive FindOrCreateReorder(const memory* from, const memory* to) { CHECK_NOTNULL(from); CHECK_NOTNULL(to); MklReorderPrimitive* reorder_prim = MklReorderPrimitiveFactory<T>::Get(from, to); return *reorder_prim->GetPrimitive(); } // utility function to determine if it is conv 1x1 and stride != 1 // for purpose of temporarily disabling primitive reuse inline bool IsConv1x1StrideNot1(memory::dims filter_dims, memory::dims strides) { if (filter_dims.size() != 4 || strides.size() != 2) return false; return ((filter_dims[2] == 1) && (filter_dims[3] == 1) && ((strides[0] != 1) || (strides[1] != 1))); } #endif // INTEL_MKL_DNN } // namespace tensorflow #endif // INTEL_MKL #endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
GB_unop__identity_fp32_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp32_uint16) // op(A') function: GB (_unop_tran__identity_fp32_uint16) // C type: float // A type: uint16_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp32_uint16) ( float *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp32_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
FeatureLPPooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "THNN/generic/FeatureLPPooling.c" #else #ifndef FEATURE_LP_DEFS #define FEATURE_LP_DEFS #ifdef _MSC_VER #define FEATURE_LP_SIZE_TYPE int64_t #define FEATURE_LP_CAST_TYPE (int64_t) #else #define FEATURE_LP_SIZE_TYPE size_t #define FEATURE_LP_CAST_TYPE #endif typedef struct { size_t size[4]; size_t stride[4]; } FeatureLPPoolingSizes; static inline size_t flpGetOffset(FeatureLPPoolingSizes* s, FEATURE_LP_SIZE_TYPE batch, FEATURE_LP_SIZE_TYPE feature, FEATURE_LP_SIZE_TYPE opt1, FEATURE_LP_SIZE_TYPE opt2) { return s->stride[0] * batch + s->stride[1] * feature + s->stride[2] * opt1 + s->stride[3] * opt2; } static inline size_t flpOutputSize(FEATURE_LP_SIZE_TYPE inputSize, FEATURE_LP_SIZE_TYPE width, FEATURE_LP_SIZE_TYPE stride) { return ((inputSize - width) / stride) + 1; } #endif // FEATURE_LP_DEFS FeatureLPPoolingSizes THNN_(FeatureLPPooling_upcastCPU)(THTensor* t, bool batchMode) { int dim = THTensor_(nDimensionLegacyAll)(t); // Upcast to [batch dim][feature dim][opt dim 1][opt dim 2] FeatureLPPoolingSizes s; for (int i = 0; i < 4; ++i) { s.size[i] = 1; s.stride[i] = 1; } if (dim == 1) { THAssert(!batchMode); // [feature dim] s.size[1] = THTensor_(size)(t, 0); s.stride[1] = THTensor_(stride)(t, 0); } else if (dim == 2) { if (batchMode) { // [batch dim][feature dim] for (int i = 0; i < 2; ++i) { s.size[i] = THTensor_(size)(t, i); s.stride[i] = THTensor_(stride)(t, i); } } else { // [feature dim][opt dim 1] s.size[1] = THTensor_(size)(t, 0); s.stride[1] = THTensor_(stride)(t, 0); s.size[2] = THTensor_(size)(t, 1); s.stride[2] = THTensor_(stride)(t, 1); } } else if (dim == 3) { if (batchMode) { // [batch dim][feature dim][opt dim 1] for (int i = 0; i < 3; ++i) { s.size[i] = THTensor_(size)(t, i); s.stride[i] = THTensor_(stride)(t, i); } } else { // [feature dim][opt dim 1][opt dim 2] for (int i = 1; i < 4; ++i) { s.size[i] = THTensor_(size)(t, i - 1); s.stride[i] = THTensor_(stride)(t, i - 1); } } } else if (dim == 4) { // [batch dim][feature dim][opt dim 1][opt dim 2] THAssert(batchMode); for (int i = 0; i < 4; ++i) { s.size[i] = THTensor_(size)(t, i); s.stride[i] = THTensor_(stride)(t, i); } } return s; } void THNN_(FeatureLPPooling_resizeForOutputCPU)(THTensor* toResize, THTensor* input, bool batchMode, int width, int stride) { int inputDim = THTensor_(nDimensionLegacyAll)(input); THAssert(inputDim >= 1 && inputDim <= 4); int64_t outSize = flpOutputSize(THTensor_(size)(input, 0), width, stride); if (batchMode) { THAssert(inputDim > 1); outSize = flpOutputSize(THTensor_(size)(input, 1), width, stride); } else { THAssert(inputDim < 4); } if (inputDim == 1) { THTensor_(resize1d)(toResize, outSize); } else if (inputDim == 2) { if (batchMode) { THTensor_(resize2d)(toResize, THTensor_(size)(input, 0), outSize); } else { THTensor_(resize2d)(toResize, outSize, THTensor_(size)(input, 1)); } } else if (inputDim == 3) { if (batchMode) { THTensor_(resize3d)(toResize, THTensor_(size)(input, 0), outSize, THTensor_(size)(input, 2)); } else { THTensor_(resize3d)(toResize, outSize, THTensor_(size)(input, 1), THTensor_(size)(input, 2)); } } else if (inputDim == 4) { THTensor_(resize4d)(toResize, THTensor_(size)(input, 0), outSize, THTensor_(size)(input, 2), THTensor_(size)(input, 3)); } } // Makes `toResize` the same size/dimensionality as `src` void THNN_(FeatureLPPooling_resizeCPU)(THTensor* toResize, THTensor* src) { int inputDim = THTensor_(nDimensionLegacyAll)(src); THAssert(inputDim >= 1 && inputDim <= 4); if (inputDim == 1) { THTensor_(resize1d)(toResize, THTensor_(size)(src, 0)); } else if (inputDim == 2) { THTensor_(resize2d)( toResize, THTensor_(size)(src, 0), THTensor_(size)(src, 1)); } else if (inputDim == 3) { THTensor_(resize3d)( toResize, THTensor_(size)(src, 0), THTensor_(size)(src, 1), THTensor_(size)(src, 2)); } else if (inputDim == 4) { THTensor_(resize4d)( toResize, THTensor_(size)(src, 0), THTensor_(size)(src, 1), THTensor_(size)(src, 2), THTensor_(size)(src, 3)); } } void THNN_(FeatureLPPooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, accreal power, int width, int stride, bool batchMode) { int inputDim = THTensor_(nDimensionLegacyAll)(input); if (batchMode) { THArgCheck(inputDim >= 2 && inputDim <= 4, 2, "input must be 2-4 dimensions for batch mode"); } else { THArgCheck(inputDim >= 1 && inputDim <= 3, 2, "input must be 1-3 dimensions for non-batch mode"); } FeatureLPPoolingSizes inputDesc = THNN_(FeatureLPPooling_upcastCPU)(input, batchMode); // Make sure the feature dimension is properly sized THArgCheck(inputDesc.size[1] >= (FEATURE_LP_SIZE_TYPE) width, 3, "input: feature dimension must be >= width"); // Make sure that width and stride are within range THArgCheck(width >= 2 && width <= 16, 5, "width must be between 2 - 16"); THArgCheck(stride >= 1 && stride <= 4, 6, "stride must be between 1 - 4"); // Resize output THNN_(FeatureLPPooling_resizeForOutputCPU)( output, input, batchMode, width, stride); FeatureLPPoolingSizes outputDesc = THNN_(FeatureLPPooling_upcastCPU)(output, batchMode); scalar_t* inputP = input->data<scalar_t>(); scalar_t* outputP = output->data<scalar_t>(); FEATURE_LP_SIZE_TYPE batch, opt1, opt2, outputFeature, i; #pragma omp parallel for for (batch = 0; batch < FEATURE_LP_CAST_TYPE inputDesc.size[0]; ++batch) { for (opt1 = 0; opt1 < FEATURE_LP_CAST_TYPE inputDesc.size[2]; ++opt1) { for (opt2 = 0; opt2 < FEATURE_LP_CAST_TYPE inputDesc.size[3]; ++opt2) { for (outputFeature = 0; outputFeature < FEATURE_LP_CAST_TYPE outputDesc.size[1]; ++outputFeature) { accreal v = (accreal) 0; for (i = 0; i < (FEATURE_LP_SIZE_TYPE) width; ++i) { FEATURE_LP_SIZE_TYPE inputFeature = outputFeature * stride + i; if (inputFeature >= FEATURE_LP_CAST_TYPE inputDesc.size[1]) { break; } v += pow(inputP[flpGetOffset(&inputDesc, batch, inputFeature, opt1, opt2)], power); } outputP[flpGetOffset(&outputDesc, batch, outputFeature, opt1, opt2)] = pow(v, (accreal) 1 / power); } } } } } void THNN_(FeatureLPPooling_updateGradInput)( THNNState *state, THTensor* gradOutput, THTensor* input, THTensor* output, THTensor* gradInput, accreal power, int width, int stride, bool batchMode) { int inputDim = THTensor_(nDimensionLegacyAll)(input); if (batchMode) { THArgCheck(inputDim >= 2 && inputDim <= 4, 3, "input must be 2-4 dimensions for batch mode"); } else { THArgCheck(inputDim >= 1 && inputDim <= 3, 3, "input must be 1-3 dimensions for non-batch mode"); } FeatureLPPoolingSizes inputDesc = THNN_(FeatureLPPooling_upcastCPU)(input, batchMode); FeatureLPPoolingSizes gradOutputDesc = THNN_(FeatureLPPooling_upcastCPU)(gradOutput, batchMode); FeatureLPPoolingSizes outputDesc = THNN_(FeatureLPPooling_upcastCPU)(output, batchMode); // Make sure the feature dimension is properly sized THArgCheck(inputDesc.size[1] >= (FEATURE_LP_SIZE_TYPE) width, 3, "input: feature dimension must be >= width"); // Make sure that width and stride are within range THArgCheck(width >= 2 && width <= 16, 7, "width must be between 2 - 16"); THArgCheck(stride >= 1 && stride <= 4, 8, "stride must be between 1 - 4"); for (int i = 0; i < 4; ++i) { THAssertMsg(outputDesc.size[i] == gradOutputDesc.size[i], "output and gradOutput sizes do not match"); } // Make sure that the input sizes produce the output sizes THArgCheck(flpOutputSize(FEATURE_LP_CAST_TYPE inputDesc.size[1], width, stride) == outputDesc.size[1], 3, "input and output sizes do not match with respect to " "width and stride"); // Resize `gradInput` based on `input` THNN_(FeatureLPPooling_resizeCPU)(gradInput, input); // Zero gradInput for accumulation THTensor_(zero)(gradInput); FeatureLPPoolingSizes gradInputDesc = THNN_(FeatureLPPooling_upcastCPU)(gradInput, batchMode); scalar_t* gradOutputP = gradOutput->data<scalar_t>(); scalar_t* gradInputP = gradInput->data<scalar_t>(); scalar_t* outputP = output->data<scalar_t>(); scalar_t* inputP = input->data<scalar_t>(); FEATURE_LP_SIZE_TYPE batch, opt1, opt2, outputFeature, i; #pragma omp parallel for for (batch = 0; batch < FEATURE_LP_CAST_TYPE inputDesc.size[0]; ++batch) { for (opt1 = 0; opt1 < FEATURE_LP_CAST_TYPE inputDesc.size[2]; ++opt1) { for (opt2 = 0; opt2 < FEATURE_LP_CAST_TYPE inputDesc.size[3]; ++opt2) { for (outputFeature = 0; outputFeature < FEATURE_LP_CAST_TYPE outputDesc.size[1]; ++outputFeature) { // Load output (f(x_is)). It is possible that this is zero, in // which case we'll ignore this point. scalar_t outputV = outputP[ flpGetOffset(&outputDesc, batch, outputFeature, opt1, opt2)]; if (outputV == (scalar_t) 0) { continue; } for (i = 0; i < (FEATURE_LP_SIZE_TYPE) width; ++i) { FEATURE_LP_SIZE_TYPE inputFeature = outputFeature * stride + i; THAssert(inputFeature < inputDesc.size[1]); scalar_t gradOutputV = gradOutputP[ flpGetOffset(&gradOutputDesc, batch, outputFeature, opt1, opt2)]; scalar_t inputV = inputP[ flpGetOffset(&inputDesc, batch, inputFeature, opt1, opt2)]; // Calculate grad * (x_i / f(x_is))^(p - 1) scalar_t v = gradOutputV * pow(inputV / outputV, power - (accreal) 1); gradInputP[ flpGetOffset(&gradInputDesc, batch, inputFeature, opt1, opt2)] += v; } } } } } } #endif
GB_binop__ge_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_uint16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__ge_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__ge_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_uint16) // A*D function (colscale): GB (_AxD__ge_uint16) // D*A function (rowscale): GB (_DxB__ge_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__ge_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__ge_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_uint16) // C=scalar+B GB (_bind1st__ge_uint16) // C=scalar+B' GB (_bind1st_tran__ge_uint16) // C=A+scalar GB (_bind2nd__ge_uint16) // C=A'+scalar GB (_bind2nd_tran__ge_uint16) // C type: bool // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_UINT16 || GxB_NO_GE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__ge_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__ge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
morn_image_convert.c
/* Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com> Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "morn_image.h" short v_to_r[256] = {-180 ,-178 ,-177 ,-175 ,-174 ,-173 ,-171 ,-170 ,-168 ,-167 ,-166 ,-164 ,-163 ,-161 ,-160 ,-159 ,-157 ,-156 ,-154 ,-153 ,-152 ,-150 ,-149 ,-147 ,-146 ,-145 ,-143 ,-142 ,-140 ,-139 ,-137 ,-136 ,-135 ,-133 ,-132 ,-130 ,-129 ,-128 ,-126 ,-125 ,-123 ,-122 ,-121 ,-119 ,-118 ,-116 ,-115 ,-114 ,-112 ,-111 ,-109 ,-108 ,-107 ,-105 ,-104 ,-102 ,-101 ,-100 ,-98 ,-97 ,-95 ,-94 ,-93 ,-91 ,-90 ,-88 ,-87 ,-86 ,-84 ,-83 ,-81 ,-80 ,-79 ,-77 ,-76 ,-74 ,-73 ,-72 ,-70 ,-69 ,-67 ,-66 ,-65 ,-63 ,-62 ,-60 ,-59 ,-58 ,-56 ,-55 ,-53 ,-52 ,-51 ,-49 ,-48 ,-46 ,-45 ,-43 ,-42 ,-41 ,-39 ,-38 ,-36 ,-35 ,-34 ,-32 ,-31 ,-29 ,-28 ,-27 ,-25 ,-24 ,-22 ,-21 ,-20 ,-18 ,-17 ,-15 ,-14 ,-13 ,-11 ,-10 ,-8 ,-7 ,-6 ,-4 ,-3 ,-1 ,0 ,1 ,3 ,4 ,6 ,7 ,8 ,10 ,11 ,13 ,14 ,15 ,17 ,18 ,20 ,21 ,22 ,24 ,25 ,27 ,28 ,29 ,31 ,32 ,34 ,35 ,36 ,38 ,39 ,41 ,42 ,43 ,45 ,46 ,48 ,49 ,51 ,52 ,53 ,55 ,56 ,58 ,59 ,60 ,62 ,63 ,65 ,66 ,67 ,69 ,70 ,72 ,73 ,74 ,76 ,77 ,79 ,80 ,81 ,83 ,84 ,86 ,87 ,88 ,90 ,91 ,93 ,94 ,95 ,97 ,98 ,100 ,101 ,102 ,104 ,105 ,107 ,108 ,109 ,111 ,112 ,114 ,115 ,116 ,118 ,119 ,121 ,122 ,123 ,125 ,126 ,128 ,129 ,130 ,132 ,133 ,135 ,136 ,137 ,139 ,140 ,142 ,143 ,145 ,146 ,147 ,149 ,150 ,152 ,153 ,154 ,156 ,157 ,159 ,160 ,161 ,163 ,164 ,166 ,167 ,168 ,170 ,171 ,173 ,174 ,175 ,177 ,178}; short u_to_g[256] = {-44 ,-44 ,-43 ,-43 ,-43 ,-42 ,-42 ,-42 ,-41 ,-41 ,-40 ,-40 ,-40 ,-39 ,-39 ,-39 ,-38 ,-38 ,-38 ,-37 ,-37 ,-37 ,-36 ,-36 ,-36 ,-35 ,-35 ,-35 ,-34 ,-34 ,-34 ,-33 ,-33 ,-33 ,-32 ,-32 ,-32 ,-31 ,-31 ,-31 ,-30 ,-30 ,-29 ,-29 ,-29 ,-28 ,-28 ,-28 ,-27 ,-27 ,-27 ,-26 ,-26 ,-26 ,-25 ,-25 ,-25 ,-24 ,-24 ,-24 ,-23 ,-23 ,-23 ,-22 ,-22 ,-22 ,-21 ,-21 ,-21 ,-20 ,-20 ,-20 ,-19 ,-19 ,-19 ,-18 ,-18 ,-17 ,-17 ,-17 ,-16 ,-16 ,-16 ,-15 ,-15 ,-15 ,-14 ,-14 ,-14 ,-13 ,-13 ,-13 ,-12 ,-12 ,-12 ,-11 ,-11 ,-11 ,-10 ,-10 ,-10 ,-9 ,-9 ,-9 ,-8 ,-8 ,-8 ,-7 ,-7 ,-7 ,-6 ,-6 ,-5 ,-5 ,-5 ,-4 ,-4 ,-4 ,-3 ,-3 ,-3 ,-2 ,-2 ,-2 ,-1 ,-1 ,-1 ,0 ,0 ,0 ,1 ,1 ,1 ,2 ,2 ,2 ,3 ,3 ,3 ,4 ,4 ,4 ,5 ,5 ,5 ,6 ,6 ,7 ,7 ,7 ,8 ,8 ,8 ,9 ,9 ,9 ,10 ,10 ,10 ,11 ,11 ,11 ,12 ,12 ,12 ,13 ,13 ,13 ,14 ,14 ,14 ,15 ,15 ,15 ,16 ,16 ,16 ,17 ,17 ,17 ,18 ,18 ,19 ,19 ,19 ,20 ,20 ,20 ,21 ,21 ,21 ,22 ,22 ,22 ,23 ,23 ,23 ,24 ,24 ,24 ,25 ,25 ,25 ,26 ,26 ,26 ,27 ,27 ,27 ,28 ,28 ,28 ,29 ,29 ,29 ,30 ,30 ,31 ,31 ,31 ,32 ,32 ,32 ,33 ,33 ,33 ,34 ,34 ,34 ,35 ,35 ,35 ,36 ,36 ,36 ,37 ,37 ,37 ,38 ,38 ,38 ,39 ,39 ,39 ,40 ,40 ,40 ,41 ,41 ,42 ,42 ,42 ,43 ,43 ,43 ,44}; short v_to_g[256] = {-91 ,-91 ,-90 ,-89 ,-89 ,-88 ,-87 ,-86 ,-86 ,-85 ,-84 ,-84 ,-83 ,-82 ,-81 ,-81 ,-80 ,-79 ,-79 ,-78 ,-77 ,-76 ,-76 ,-75 ,-74 ,-74 ,-73 ,-72 ,-71 ,-71 ,-70 ,-69 ,-69 ,-68 ,-67 ,-66 ,-66 ,-65 ,-64 ,-64 ,-63 ,-62 ,-61 ,-61 ,-60 ,-59 ,-59 ,-58 ,-57 ,-56 ,-56 ,-55 ,-54 ,-54 ,-53 ,-52 ,-51 ,-51 ,-50 ,-49 ,-49 ,-48 ,-47 ,-46 ,-46 ,-45 ,-44 ,-44 ,-43 ,-42 ,-41 ,-41 ,-40 ,-39 ,-39 ,-38 ,-37 ,-36 ,-36 ,-35 ,-34 ,-34 ,-33 ,-32 ,-31 ,-31 ,-30 ,-29 ,-29 ,-28 ,-27 ,-26 ,-26 ,-25 ,-24 ,-24 ,-23 ,-22 ,-21 ,-21 ,-20 ,-19 ,-19 ,-18 ,-17 ,-16 ,-16 ,-15 ,-14 ,-14 ,-13 ,-12 ,-11 ,-11 ,-10 ,-9 ,-9 ,-8 ,-7 ,-6 ,-6 ,-5 ,-4 ,-4 ,-3 ,-2 ,-1 ,-1 ,0 ,1 ,1 ,2 ,3 ,4 ,4 ,5 ,6 ,6 ,7 ,8 ,9 ,9 ,10 ,11 ,11 ,12 ,13 ,14 ,14 ,15 ,16 ,16 ,17 ,18 ,19 ,19 ,20 ,21 ,21 ,22 ,23 ,24 ,24 ,25 ,26 ,26 ,27 ,28 ,29 ,29 ,30 ,31 ,31 ,32 ,33 ,34 ,34 ,35 ,36 ,36 ,37 ,38 ,39 ,39 ,40 ,41 ,41 ,42 ,43 ,44 ,44 ,45 ,46 ,46 ,47 ,48 ,49 ,49 ,50 ,51 ,51 ,52 ,53 ,54 ,54 ,55 ,56 ,56 ,57 ,58 ,59 ,59 ,60 ,61 ,61 ,62 ,63 ,64 ,64 ,65 ,66 ,66 ,67 ,68 ,69 ,69 ,70 ,71 ,71 ,72 ,73 ,74 ,74 ,75 ,76 ,76 ,77 ,78 ,79 ,79 ,80 ,81 ,81 ,82 ,83 ,84 ,84 ,85 ,86 ,86 ,87 ,88 ,89 ,89 ,90 ,91}; short u_to_b[256] = {-227 ,-225 ,-223 ,-221 ,-219 ,-218 ,-216 ,-214 ,-212 ,-211 ,-209 ,-207 ,-205 ,-204 ,-202 ,-200 ,-198 ,-196 ,-195 ,-193 ,-191 ,-189 ,-188 ,-186 ,-184 ,-182 ,-181 ,-179 ,-177 ,-175 ,-173 ,-172 ,-170 ,-168 ,-166 ,-165 ,-163 ,-161 ,-159 ,-158 ,-156 ,-154 ,-152 ,-150 ,-149 ,-147 ,-145 ,-143 ,-142 ,-140 ,-138 ,-136 ,-135 ,-133 ,-131 ,-129 ,-127 ,-126 ,-124 ,-122 ,-120 ,-119 ,-117 ,-115 ,-113 ,-112 ,-110 ,-108 ,-106 ,-104 ,-103 ,-101 ,-99 ,-97 ,-96 ,-94 ,-92 ,-90 ,-89 ,-87 ,-85 ,-83 ,-81 ,-80 ,-78 ,-76 ,-74 ,-73 ,-71 ,-69 ,-67 ,-65 ,-64 ,-62 ,-60 ,-58 ,-57 ,-55 ,-53 ,-51 ,-50 ,-48 ,-46 ,-44 ,-42 ,-41 ,-39 ,-37 ,-35 ,-34 ,-32 ,-30 ,-28 ,-27 ,-25 ,-23 ,-21 ,-19 ,-18 ,-16 ,-14 ,-12 ,-11 ,-9 ,-7 ,-5 ,-4 ,-2 ,0 ,2 ,4 ,5 ,7 ,9 ,11 ,12 ,14 ,16 ,18 ,19 ,21 ,23 ,25 ,27 ,28 ,30 ,32 ,34 ,35 ,37 ,39 ,41 ,42 ,44 ,46 ,48 ,50 ,51 ,53 ,55 ,57 ,58 ,60 ,62 ,64 ,65 ,67 ,69 ,71 ,73 ,74 ,76 ,78 ,80 ,81 ,83 ,85 ,87 ,89 ,90 ,92 ,94 ,96 ,97 ,99 ,101 ,103 ,104 ,106 ,108 ,110 ,112 ,113 ,115 ,117 ,119 ,120 ,122 ,124 ,126 ,127 ,129 ,131 ,133 ,135 ,136 ,138 ,140 ,142 ,143 ,145 ,147 ,149 ,150 ,152 ,154 ,156 ,158 ,159 ,161 ,163 ,165 ,166 ,168 ,170 ,172 ,173 ,175 ,177 ,179 ,181 ,182 ,184 ,186 ,188 ,189 ,191 ,193 ,195 ,196 ,198 ,200 ,202 ,204 ,205 ,207 ,209 ,211 ,212 ,214 ,216 ,218 ,219 ,221 ,223 ,225}; unsigned char r_to_y[256] = {0 ,0 ,1 ,1 ,1 ,1 ,2 ,2 ,2 ,3 ,3 ,3 ,4 ,4 ,4 ,4 ,5 ,5 ,5 ,6 ,6 ,6 ,7 ,7 ,7 ,7 ,8 ,8 ,8 ,9 ,9 ,9 ,10 ,10 ,10 ,10 ,11 ,11 ,11 ,12 ,12 ,12 ,13 ,13 ,13 ,13 ,14 ,14 ,14 ,15 ,15 ,15 ,16 ,16 ,16 ,16 ,17 ,17 ,17 ,18 ,18 ,18 ,19 ,19 ,19 ,19 ,20 ,20 ,20 ,21 ,21 ,21 ,22 ,22 ,22 ,22 ,23 ,23 ,23 ,24 ,24 ,24 ,25 ,25 ,25 ,25 ,26 ,26 ,26 ,27 ,27 ,27 ,28 ,28 ,28 ,28 ,29 ,29 ,29 ,30 ,30 ,30 ,30 ,31 ,31 ,31 ,32 ,32 ,32 ,33 ,33 ,33 ,33 ,34 ,34 ,34 ,35 ,35 ,35 ,36 ,36 ,36 ,36 ,37 ,37 ,37 ,38 ,38 ,38 ,39 ,39 ,39 ,39 ,40 ,40 ,40 ,41 ,41 ,41 ,42 ,42 ,42 ,42 ,43 ,43 ,43 ,44 ,44 ,44 ,45 ,45 ,45 ,45 ,46 ,46 ,46 ,47 ,47 ,47 ,48 ,48 ,48 ,48 ,49 ,49 ,49 ,50 ,50 ,50 ,51 ,51 ,51 ,51 ,52 ,52 ,52 ,53 ,53 ,53 ,54 ,54 ,54 ,54 ,55 ,55 ,55 ,56 ,56 ,56 ,57 ,57 ,57 ,57 ,58 ,58 ,58 ,59 ,59 ,59 ,60 ,60 ,60 ,60 ,61 ,61 ,61 ,62 ,62 ,62 ,62 ,63 ,63 ,63 ,64 ,64 ,64 ,65 ,65 ,65 ,65 ,66 ,66 ,66 ,67 ,67 ,67 ,68 ,68 ,68 ,68 ,69 ,69 ,69 ,70 ,70 ,70 ,71 ,71 ,71 ,71 ,72 ,72 ,72 ,73 ,73 ,73 ,74 ,74 ,74 ,74 ,75 ,75 ,75 ,76 ,76 ,76}; unsigned char g_to_y[256] = {0 ,1 ,1 ,2 ,2 ,3 ,4 ,4 ,5 ,5 ,6 ,6 ,7 ,8 ,8 ,9 ,9 ,10 ,11 ,11 ,12 ,12 ,13 ,14 ,14 ,15 ,15 ,16 ,16 ,17 ,18 ,18 ,19 ,19 ,20 ,21 ,21 ,22 ,22 ,23 ,23 ,24 ,25 ,25 ,26 ,26 ,27 ,28 ,28 ,29 ,29 ,30 ,31 ,31 ,32 ,32 ,33 ,33 ,34 ,35 ,35 ,36 ,36 ,37 ,38 ,38 ,39 ,39 ,40 ,41 ,41 ,42 ,42 ,43 ,43 ,44 ,45 ,45 ,46 ,46 ,47 ,48 ,48 ,49 ,49 ,50 ,50 ,51 ,52 ,52 ,53 ,53 ,54 ,55 ,55 ,56 ,56 ,57 ,58 ,58 ,59 ,59 ,60 ,60 ,61 ,62 ,62 ,63 ,63 ,64 ,65 ,65 ,66 ,66 ,67 ,68 ,68 ,69 ,69 ,70 ,70 ,71 ,72 ,72 ,73 ,73 ,74 ,75 ,75 ,76 ,76 ,77 ,77 ,78 ,79 ,79 ,80 ,80 ,81 ,82 ,82 ,83 ,83 ,84 ,85 ,85 ,86 ,86 ,87 ,87 ,88 ,89 ,89 ,90 ,90 ,91 ,92 ,92 ,93 ,93 ,94 ,95 ,95 ,96 ,96 ,97 ,97 ,98 ,99 ,99 ,100 ,100 ,101 ,102 ,102 ,103 ,103 ,104 ,104 ,105 ,106 ,106 ,107 ,107 ,108 ,109 ,109 ,110 ,110 ,111 ,112 ,112 ,113 ,113 ,114 ,114 ,115 ,116 ,116 ,117 ,117 ,118 ,119 ,119 ,120 ,120 ,121 ,122 ,122 ,123 ,123 ,124 ,124 ,125 ,126 ,126 ,127 ,127 ,128 ,129 ,129 ,130 ,130 ,131 ,131 ,132 ,133 ,133 ,134 ,134 ,135 ,136 ,136 ,137 ,137 ,138 ,139 ,139 ,140 ,140 ,141 ,141 ,142 ,143 ,143 ,144 ,144 ,145 ,146 ,146 ,147 ,147 ,148 ,149 ,149 ,150}; unsigned char b_to_y[256] = {0 ,0 ,0 ,0 ,0 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,6 ,6 ,6 ,6 ,6 ,6 ,6 ,6 ,6 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,8 ,8 ,8 ,8 ,8 ,8 ,8 ,8 ,8 ,9 ,9 ,9 ,9 ,9 ,9 ,9 ,9 ,9 ,10 ,10 ,10 ,10 ,10 ,10 ,10 ,10 ,10 ,11 ,11 ,11 ,11 ,11 ,11 ,11 ,11 ,12 ,12 ,12 ,12 ,12 ,12 ,12 ,12 ,12 ,13 ,13 ,13 ,13 ,13 ,13 ,13 ,13 ,13 ,14 ,14 ,14 ,14 ,14 ,14 ,14 ,14 ,14 ,15 ,15 ,15 ,15 ,15 ,15 ,15 ,15 ,16 ,16 ,16 ,16 ,16 ,16 ,16 ,16 ,16 ,17 ,17 ,17 ,17 ,17 ,17 ,17 ,17 ,17 ,18 ,18 ,18 ,18 ,18 ,18 ,18 ,18 ,18 ,19 ,19 ,19 ,19 ,19 ,19 ,19 ,19 ,19 ,20 ,20 ,20 ,20 ,20 ,20 ,20 ,20 ,21 ,21 ,21 ,21 ,21 ,21 ,21 ,21 ,21 ,22 ,22 ,22 ,22 ,22 ,22 ,22 ,22 ,22 ,23 ,23 ,23 ,23 ,23 ,23 ,23 ,23 ,23 ,24 ,24 ,24 ,24 ,24 ,24 ,24 ,24 ,25 ,25 ,25 ,25 ,25 ,25 ,25 ,25 ,25 ,26 ,26 ,26 ,26 ,26 ,26 ,26 ,26 ,26 ,27 ,27 ,27 ,27 ,27 ,27 ,27 ,27 ,27 ,28 ,28 ,28 ,28 ,28 ,28 ,28 ,28 ,29 ,29 ,29 ,29 ,29 ,29}; unsigned char r_to_v[512] = {0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,1 ,2 ,3 ,3 ,4 ,5 ,5 ,6 ,7 ,8 ,8 ,9 ,10 ,10 ,11 ,12 ,12 ,13 ,14 ,15 ,15 ,16 ,17 ,17 ,18 ,19 ,20 ,20 ,21 ,22 ,22 ,23 ,24 ,25 ,25 ,26 ,27 ,27 ,28 ,29 ,30 ,30 ,31 ,32 ,32 ,33 ,34 ,35 ,35 ,36 ,37 ,37 ,38 ,39 ,40 ,40 ,41 ,42 ,42 ,43 ,44 ,45 ,45 ,46 ,47 ,47 ,48 ,49 ,50 ,50 ,51 ,52 ,52 ,53 ,54 ,55 ,55 ,56 ,57 ,57 ,58 ,59 ,60 ,60 ,61 ,62 ,62 ,63 ,64 ,65 ,65 ,66 ,67 ,67 ,68 ,69 ,70 ,70 ,71 ,72 ,72 ,73 ,74 ,75 ,75 ,76 ,77 ,77 ,78 ,79 ,80 ,80 ,81 ,82 ,82 ,83 ,84 ,85 ,85 ,86 ,87 ,87 ,88 ,89 ,89 ,90 ,91 ,92 ,92 ,93 ,94 ,94 ,95 ,96 ,97 ,97 ,98 ,99 ,99 ,100 ,101 ,102 ,102 ,103 ,104 ,104 ,105 ,106 ,107 ,107 ,108 ,109 ,109 ,110 ,111 ,112 ,112 ,113 ,114 ,114 ,115 ,116 ,117 ,117 ,118 ,119 ,119 ,120 ,121 ,122 ,122 ,123 ,124 ,124 ,125 ,126 ,127 ,127, 128 ,129 ,129 ,130 ,131 ,132 ,132 ,133 ,134 ,134 ,135 ,136 ,137 ,137 ,138 ,139 ,139 ,140 ,141 ,142 ,142 ,143 ,144 ,144 ,145 ,146 ,147 ,147 ,148 ,149 ,149 ,150 ,151 ,152 ,152 ,153 ,154 ,154 ,155 ,156 ,157 ,157 ,158 ,159 ,159 ,160 ,161 ,162 ,162 ,163 ,164 ,164 ,165 ,166 ,167 ,167 ,168 ,169 ,169 ,170 ,171 ,171 ,172 ,173 ,174 ,174 ,175 ,176 ,176 ,177 ,178 ,179 ,179 ,180 ,181 ,181 ,182 ,183 ,184 ,184 ,185 ,186 ,186 ,187 ,188 ,189 ,189 ,190 ,191 ,191 ,192 ,193 ,194 ,194 ,195 ,196 ,196 ,197 ,198 ,199 ,199 ,200 ,201 ,201 ,202 ,203 ,204 ,204 ,205 ,206 ,206 ,207 ,208 ,209 ,209 ,210 ,211 ,211 ,212 ,213 ,214 ,214 ,215 ,216 ,216 ,217 ,218 ,219 ,219 ,220 ,221 ,221 ,222 ,223 ,224 ,224 ,225 ,226 ,226 ,227 ,228 ,229 ,229 ,230 ,231 ,231 ,232 ,233 ,234 ,234 ,235 ,236 ,236 ,237 ,238 ,239 ,239 ,240 ,241 ,241 ,242 ,243 ,244 ,244 ,245 ,246 ,246 ,247 ,248 ,248 ,249 ,250 ,251 ,251 ,252 ,253 ,253 ,254 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255}; unsigned char b_to_u[512] = {0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,1 ,1 ,2 ,2 ,3 ,3 ,4 ,4 ,5 ,6 ,6 ,7 ,7 ,8 ,8 ,9 ,10 ,10 ,11 ,11 ,12 ,12 ,13 ,14 ,14 ,15 ,15 ,16 ,16 ,17 ,17 ,18 ,19 ,19 ,20 ,20 ,21 ,21 ,22 ,23 ,23 ,24 ,24 ,25 ,25 ,26 ,26 ,27 ,28 ,28 ,29 ,29 ,30 ,30 ,31 ,32 ,32 ,33 ,33 ,34 ,34 ,35 ,36 ,36 ,37 ,37 ,38 ,38 ,39 ,39 ,40 ,41 ,41 ,42 ,42 ,43 ,43 ,44 ,45 ,45 ,46 ,46 ,47 ,47 ,48 ,48 ,49 ,50 ,50 ,51 ,51 ,52 ,52 ,53 ,54 ,54 ,55 ,55 ,56 ,56 ,57 ,58 ,58 ,59 ,59 ,60 ,60 ,61 ,61 ,62 ,63 ,63 ,64 ,64 ,65 ,65 ,66 ,67 ,67 ,68 ,68 ,69 ,69 ,70 ,70 ,71 ,72 ,72 ,73 ,73 ,74 ,74 ,75 ,76 ,76 ,77 ,77 ,78 ,78 ,79 ,79 ,80 ,81 ,81 ,82 ,82 ,83 ,83 ,84 ,85 ,85 ,86 ,86 ,87 ,87 ,88 ,89 ,89 ,90 ,90 ,91 ,91 ,92 ,92 ,93 ,94 ,94 ,95 ,95 ,96 ,96 ,97 ,98 ,98 ,99 ,99 ,100 ,100 ,101 ,101 ,102 ,103 ,103 ,104 ,104 ,105 ,105 ,106 ,107 ,107 ,108 ,108 ,109 ,109 ,110 ,111 ,111 ,112 ,112 ,113 ,113 ,114 ,114 ,115 ,116 ,116 ,117 ,117 ,118 ,118 ,119 ,120 ,120 ,121 ,121 ,122 ,122 ,123 ,123 ,124 ,125 ,125 ,126 ,126 ,127 ,127 , 128 ,129 ,129 ,130 ,130 ,131 ,131 ,132 ,133 ,133 ,134 ,134 ,135 ,135 ,136 ,136 ,137 ,138 ,138 ,139 ,139 ,140 ,140 ,141 ,142 ,142 ,143 ,143 ,144 ,144 ,145 ,145 ,146 ,147 ,147 ,148 ,148 ,149 ,149 ,150 ,151 ,151 ,152 ,152 ,153 ,153 ,154 ,155 ,155 ,156 ,156 ,157 ,157 ,158 ,158 ,159 ,160 ,160 ,161 ,161 ,162 ,162 ,163 ,164 ,164 ,165 ,165 ,166 ,166 ,167 ,167 ,168 ,169 ,169 ,170 ,170 ,171 ,171 ,172 ,173 ,173 ,174 ,174 ,175 ,175 ,176 ,177 ,177 ,178 ,178 ,179 ,179 ,180 ,180 ,181 ,182 ,182 ,183 ,183 ,184 ,184 ,185 ,186 ,186 ,187 ,187 ,188 ,188 ,189 ,189 ,190 ,191 ,191 ,192 ,192 ,193 ,193 ,194 ,195 ,195 ,196 ,196 ,197 ,197 ,198 ,199 ,199 ,200 ,200 ,201 ,201 ,202 ,202 ,203 ,204 ,204 ,205 ,205 ,206 ,206 ,207 ,208 ,208 ,209 ,209 ,210 ,210 ,211 ,211 ,212 ,213 ,213 ,214 ,214 ,215 ,215 ,216 ,217 ,217 ,218 ,218 ,219 ,219 ,220 ,220 ,221 ,222 ,222 ,223 ,223 ,224 ,224 ,225 ,226 ,226 ,227 ,227 ,228 ,228 ,229 ,230 ,230 ,231 ,231 ,232 ,232 ,233 ,233 ,234 ,235 ,235 ,236 ,236 ,237 ,237 ,238 ,239 ,239 ,240 ,240 ,241 ,241 ,242 ,242 ,243 ,244 ,244 ,245 ,245 ,246 ,246 ,247 ,248 ,248 ,249 ,249 ,250 ,250 ,251 ,252 ,252 ,253 ,253 ,254 ,254 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255 ,255}; void m_ImageYUVToRGB(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel!=3),EXIT,"invalid input"); if(dst==NULL) dst = src; if(dst!=src) mImageRedefine(dst,MAX(3,dst->channel),src->height,src->width,dst->data); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; #pragma omp parallel for for(j=ImageY1(dst);j<ImageY2(dst);j++) { for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { unsigned char y = src->data[0][j][i]; unsigned char u = src->data[1][j][i]; unsigned char v = src->data[2][j][i]; short r = y + v_to_r[v]; short g = y - u_to_g[u] - v_to_g[v]; short b = y + u_to_b[u]; if(r<0) dst->data[2][j][i]=0; else if(r>255) dst->data[2][j][i]=255; else dst->data[2][j][i] = r; if(g<0) dst->data[1][j][i]=0; else if(g>255) dst->data[1][j][i]=255; else dst->data[1][j][i] = g; if(b<0) dst->data[0][j][i]=0; else if(b>255) dst->data[0][j][i]=255; else dst->data[0][j][i] = b; } } *ImageType(dst)=(dst->channel==3)?MORN_IMAGE_RGB:MORN_IMAGE_RGBA; } void m_ImageYUV422ToRGB(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel!=1),EXIT,"invalid input"); mImageRedefine(dst,MAX(3,dst->channel),src->height,src->width/2,dst->data); unsigned char **sdata=src->data[0]; int j; #pragma omp parallel for for(j=0;j<dst->height;j++) { unsigned char y,u,v; short r,g,b; u = sdata[j][0]; for(int i=0;i<dst->width;i+=2) { y=sdata[j][i+i+1]; v=sdata[j][i+i+2]; r = y + v_to_r[v];g = y - u_to_g[u] - v_to_g[v];b = y + u_to_b[u]; if(r<0) dst->data[2][j][i]=0; else if(r>255) dst->data[2][j][i]=255; else dst->data[2][j][i] = r; if(g<0) dst->data[1][j][i]=0; else if(g>255) dst->data[1][j][i]=255; else dst->data[1][j][i] = g; if(b<0) dst->data[0][j][i]=0; else if(b>255) dst->data[0][j][i]=255; else dst->data[0][j][i] = b; y=sdata[j][i+i+3];u=sdata[j][i+i+4]; r = y + v_to_r[v];g = y - u_to_g[u] - v_to_g[v];b = y + u_to_b[u]; if(r<0) dst->data[2][j][i+1]=0; else if(r>255) dst->data[2][j][i+1]=255; else dst->data[2][j][i+1] = r; if(g<0) dst->data[1][j][i+1]=0; else if(g>255) dst->data[1][j][i+1]=255; else dst->data[1][j][i+1] = g; if(b<0) dst->data[0][j][i+1]=0; else if(b>255) dst->data[0][j][i+1]=255; else dst->data[0][j][i+1] = b; } } *(ImageType(dst))=(dst->channel==3)?MORN_IMAGE_RGB:MORN_IMAGE_RGBA; } void m_ImageYUVToGray(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel<3),EXIT,"invalid input"); if(INVALID_POINTER(dst)) dst = src; if(dst!=src) mImageRedefine(dst,1,src->height,src->width,dst->data); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; for(j=ImageY1(dst);j<ImageY2(dst);j++) memcpy(dst->data[0][j]+ImageX1(dst,j),src->data[0][j],(ImageX2(src,j)-ImageX1(dst,j))*sizeof(unsigned char)); *(ImageType(dst))=MORN_IMAGE_GRAY; dst->channel = 1; } void m_ImageRGBToYUV(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel<3),EXIT,"invalid input"); if(dst==NULL) dst = src; if(dst!=src) mImageRedefine(dst,3,src->height,src->width,dst->data); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; for(j=ImageY1(dst);j<ImageY2(dst);j++)for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { unsigned char b = src->data[0][j][i]; unsigned char g = src->data[1][j][i]; unsigned char r = src->data[2][j][i]; unsigned char y = r_to_y[r] + g_to_y[g] + b_to_y[b]; dst->data[0][j][i] = y; dst->data[1][j][i] = b_to_u[256+b-y]; dst->data[2][j][i] = r_to_v[256+r-y]; } *ImageType(dst)=MORN_IMAGE_YUV; } void m_ImageRGBToGray(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel<3),EXIT,"invalid input"); if(dst==NULL) dst = src; if(dst!=src) mImageRedefine(dst,1,src->height,src->width,dst->data); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; for(j=ImageY1(dst);j<ImageY2(dst);j++)for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { unsigned char b = src->data[0][j][i]; unsigned char g = src->data[1][j][i]; unsigned char r = src->data[2][j][i]; dst->data[0][j][i] = r_to_y[r] + g_to_y[g] + b_to_y[b]; } *ImageType(dst)=MORN_IMAGE_GRAY; dst->channel = 1; } void m_ImageToGray(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); int *image_type = ImageType(src); if(*image_type == MORN_IMAGE_GRAY) mImageCopy(src,dst); else if((*image_type == MORN_IMAGE_RGB)||(*image_type == MORN_IMAGE_RGBA)) m_ImageRGBToGray(src,dst); else if(*image_type == MORN_IMAGE_YUV) m_ImageYUVToGray(src,dst); else mException(1,EXIT,"invalid image type %d",*image_type); } void m_ImageSaturation(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel<3),EXIT,"invalid input"); if(dst==NULL) dst = src; mImageRedefine(dst,1,src->height,src->width); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; for(j=ImageY1(src);j<ImageY2(src);j++)for(int i=ImageX1(src,j);i<ImageX2(src,j);i++) { unsigned char b = src->data[0][j][i]; unsigned char g = src->data[1][j][i]; unsigned char r = src->data[2][j][i]; int max,min; if(r>g) {max=r; min=g;} else {max=g; min=r;} if(b>max) {max=b;} else if(b<min) {min=b;} dst->data[0][j][i] = (max==0)?0:(((max-min)*240)/max); } *ImageType(dst)=MORN_IMAGE_GRAY; } void m_ImageRGBToHSV(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel<3),EXIT,"invalid input"); if(dst==NULL) dst = src; if(dst!=src) mImageRedefine(dst,3,src->height,src->width,dst->data); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; for(j=ImageY1(dst);j<ImageY2(dst);j++)for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { unsigned char b = src->data[0][j][i]; unsigned char g = src->data[1][j][i]; unsigned char r = src->data[2][j][i]; int max,min; if(r>g) {max=r; min=g;} else {max=g; min=r;} if(b>max) {max=b;} else if(b<min) {min=b;} if(max==0) { dst->data[0][j][i]=0;dst->data[1][j][i]=0;dst->data[2][j][i]=0; continue; } int value = max-min; dst->data[2][j][i] = max*240/256; dst->data[1][j][i] = (value*240)/max; if(value==0) dst->data[0][j][i]=0; else if(max==r) { if(min==b) dst->data[0][j][i]= ((g-b)*40)/value; else dst->data[0][j][i]=240+((g-b)*40)/value; } else if(max==g) dst->data[0][j][i]= 80+((b-r)*40)/value; else if(max==b) dst->data[0][j][i]=160+((r-g)*40)/value; } *ImageType(dst)=MORN_IMAGE_HSV; } void m_ImageHSVToRGB(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel<3),EXIT,"invalid input"); if(dst==NULL) dst = src; if(dst!=src) mImageRedefine(dst,MAX(3,dst->channel),src->height,src->width,dst->data); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; for(j=ImageY1(dst);j<ImageY2(dst);j++)for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { unsigned char h = src->data[0][j][i]; unsigned char s = src->data[1][j][i]; unsigned char v = src->data[2][j][i]; int max = v*256/240; int value = (max*s/240); int min = max-value; unsigned char r,g,b; if(h< 40) {r=max;b=min;g=min+( h *value/40);} else if(h< 80) {g=max;b=min;r=min+(( 80-h)*value/40);} else if(h<120) {g=max;r=min;b=min+((h- 80)*value/40);} else if(h<160) {b=max;r=min;g=min+((160-h)*value/40);} else if(h<200) {b=max;g=min;r=min+((h-160)*value/40);} else {r=max;g=min;b=min+((240-h)*value/40);} dst->data[0][j][i] = b; dst->data[1][j][i] = g; dst->data[2][j][i] = r; } *ImageType(dst)=(dst->channel==3)?MORN_IMAGE_RGB:MORN_IMAGE_RGBA; } /* void mColorDiff(MImage *src,MImage *dst,unsigned char *color) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); int height = src->height; int width = src->width; unsigned char ***data = src->data; if(src->info.image_type==MORN_IMAGE_RGB) { mException((src->channel!=3),EXIT,"invalid input"); float r0=color[2];float g0=color[1];float b0=color[0]; printf("r0 is %f,g0 is %f,b0 is %f\n",r0,g0,b0); for(int j=0;j<height;j++) for(int i=0;i<width;i++) { float r = data[2][j][i];float g = data[1][j][i];float b = data[0][j][i]; // float diff = (ABS(r*g0-r0*g)/(r*g)+ABS(g*b0-g0*b)/(g*b)+ABS(b*r0-b0*g))/(b*r); // diff = diff*1024; // float diff = sqrt((r-r0)*(r-r0)+(g-g0)*(g-g0)+(b-b0)*(b-b0)); float diff = MAX(MAX(ABS(r-r0),ABS(b-b0)),ABS(g-g0)); dst->data[0][j][i] = (diff>255)?255:diff; } } else mException(1,EXIT,"invalid input"); } */ unsigned char r2l[256]={0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,3,4,4,4,4,4,5,5,5,5,6,6,6,6,6,7,7,7,7,7,8,8,8,8,9,9,9,9,9,10,10,10,10,10,11,11,11,11,11,12,12,12,12,13,13,13,13,13,14,14,14,14,14,15,15,15,15,16,16,16,16,16,17,17,17,17,17,18,18,18,18,18,19,19,19,19,20,20,20,20,20,21,21,21,21,21,22,22,22,22,23,23,23,23,23,24,24,24,24,24,25,25,25,25,26,26,26,26,26,27,27,27,27,27,28,28,28,28,28,29,29,29,29,30,30,30,30,30,31,31,31,31,31,32,32,32,32,33,33,33,33,33,34,34,34,34,34,35,35,35,35,36,36,36,36,36,37,37,37,37,37,38,38,38,38,38,39,39,39,39,40,40,40,40,40,41,41,41,41,41,42,42,42,42,43,43,43,43,43,44,44,44,44,44,45,45,45,45,45,46,46,46,46,47,47,47,47,47,48,48,48,48,48,49,49,49,49,50,50,50,50,50,51,51,51,51,51,52,52,52,52,53,53,53,53,53,54,54,54,54}; unsigned char g2l[256]={0,1,1,2,3,4,4,5,6,6,7,8,9,9,10,11,11,12,13,14,14,15,16,16,17,18,19,19,20,21,21,22,23,24,24,25,26,26,27,28,29,29,30,31,31,32,33,34,34,35,36,36,37,38,39,39,40,41,41,42,43,44,44,45,46,46,47,48,49,49,50,51,51,52,53,54,54,55,56,57,57,58,59,59,60,61,62,62,63,64,64,65,66,67,67,68,69,69,70,71,72,72,73,74,74,75,76,77,77,78,79,79,80,81,82,82,83,84,84,85,86,87,87,88,89,89,90,91,92,92,93,94,94,95,96,97,97,98,99,99,100,101,102,102,103,104,104,105,106,107,107,108,109,109,110,111,112,112,113,114,114,115,116,117,117,118,119,119,120,121,122,122,123,124,124,125,126,127,127,128,129,129,130,131,132,132,133,134,134,135,136,137,137,138,139,139,140,141,142,142,143,144,144,145,146,147,147,148,149,149,150,151,152,152,153,154,154,155,156,157,157,158,159,159,160,161,162,162,163,164,164,165,166,167,167,168,169,170,170,171,172,172,173,174,175,175,176,177,177,178,179,180,180,181,182,182}; unsigned char b2l[256]={0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17,18,18,18,18,18,18,18,18,18,18,18,18,18}; unsigned char r2a[256]={0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7,8,8,8,8,9,9,9,10,10,10,11,11,11,12,12,12,13,13,13,14,14,14,15,15,15,16,16,16,17,17,17,18,18,18,19,19,19,20,20,20,21,21,21,22,22,22,23,23,23,24,24,24,24,25,25,25,26,26,26,27,27,27,28,28,28,29,29,29,30,30,30,31,31,31,32,32,32,33,33,33,34,34,34,35,35,35,36,36,36,37,37,37,38,38,38,39,39,39,39,40,40,40,41,41,41,42,42,42,43,43,43,44,44,44,45,45,45,46,46,46,47,47,47,48,48,48,49,49,49,50,50,50,51,51,51,52,52,52,53,53,53,54,54,54,55,55,55,55,56,56,56,57,57,57,58,58,58,59,59,59,60,60,60,61,61,61,62,62,62,63,63,63,64,64,64,65,65,65,66,66,66,67,67,67,68,68,68,69,69,69,70,70,70,71,71,71,71,72,72,72,73,73,73,74,74,74,75,75,75,76,76,76,77,77,77,78,78,78,79,79,79,80,80,80,81,81,81,82,82,82,83,83,83}; unsigned char b2a[256]={0,0,0,1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,4,4,4,4,4,5,5,5,5,5,5,6,6,6,6,6,6,7,7,7,7,7,7,8,8,8,8,8,9,9,9,9,9,9,10,10,10,10,10,10,11,11,11,11,11,11,12,12,12,12,12,12,13,13,13,13,13,14,14,14,14,14,14,15,15,15,15,15,15,16,16,16,16,16,16,17,17,17,17,17,18,18,18,18,18,18,19,19,19,19,19,19,20,20,20,20,20,20,21,21,21,21,21,22,22,22,22,22,22,23,23,23,23,23,23,24,24,24,24,24,24,25,25,25,25,25,26,26,26,26,26,26,27,27,27,27,27,27,28,28,28,28,28,28,29,29,29,29,29,30,30,30,30,30,30,31,31,31,31,31,31,32,32,32,32,32,32,33,33,33,33,33,34,34,34,34,34,34,35,35,35,35,35,35,36,36,36,36,36,36,37,37,37,37,37,37,38,38,38,38,38,39,39,39,39,39,39,40,40,40,40,40,40,41,41,41,41,41,41,42,42,42,42,42,43,43,43,43,43,43,44,44,44,44,44}; unsigned char r2b[256]={0,0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,18,18,18,18,18,18,18,18,19,19,19,19,19,19,19,19,19,20,20,20,20,20,20,20,20,21,21,21,21,21,21,21,21,22,22,22,22,22,22,22,22,23,23,23,23,23,23,23,23,23,24,24,24,24,24,24,24,24,25,25,25,25,25,25,25,25,26,26,26,26,26,26,26,26,27,27,27,27,27,27,27,27,28,28,28,28,28,28,28,28,28,29,29,29,29,29,29,29,29,30,30,30,30,30,30,30,30,31,31,31,31,31}; unsigned char g2b[256]={0,0,1,1,2,2,2,3,3,3,4,4,5,5,5,6,6,6,7,7,8,8,8,9,9,9,10,10,11,11,11,12,12,12,13,13,14,14,14,15,15,16,16,16,17,17,17,18,18,19,19,19,20,20,20,21,21,22,22,22,23,23,23,24,24,25,25,25,26,26,26,27,27,28,28,28,29,29,30,30,30,31,31,31,32,32,33,33,33,34,34,34,35,35,36,36,36,37,37,37,38,38,39,39,39,40,40,40,41,41,42,42,42,43,43,44,44,44,45,45,45,46,46,47,47,47,48,48,48,49,49,50,50,50,51,51,51,52,52,53,53,53,54,54,54,55,55,56,56,56,57,57,57,58,58,59,59,59,60,60,61,61,61,62,62,62,63,63,64,64,64,65,65,65,66,66,67,67,67,68,68,68,69,69,70,70,70,71,71,71,72,72,73,73,73,74,74,75,75,75,76,76,76,77,77,78,78,78,79,79,79,80,80,81,81,81,82,82,82,83,83,84,84,84,85,85,85,86,86,87,87,87,88,88,89,89,89,90,90,90,91,91,92,92,92,93,93,93,94,94,95,95,95,96,96,96}; void mImageRGBToLAB(MImage *src,MImage *dst) { mException(INVALID_IMAGE(src),EXIT,"invalid input"); mException((src->channel<3),EXIT,"invalid input"); if(dst==NULL) dst = src; if(dst!=src) mImageRedefine(dst,3,src->height,src->width,dst->data); if(!INVALID_POINTER(src->border)) dst->border = src->border; int j; for(j=ImageY1(dst);j<ImageY2(dst);j++)for(int i=ImageX1(dst,j);i<ImageX2(dst,j);i++) { unsigned char b = src->data[0][j][i]; unsigned char g = src->data[1][j][i]; unsigned char r = src->data[2][j][i]; dst->data[0][j][i] = r2l[r] + g2l[g] + b2l[b]; int aa = (r2a[r] - (g>>2) + b2a[b])*2.55;if(aa>255)aa=255;else if(aa<0)aa=0; int bb = (r2b[r] + g2b[g] - (b>>2))*2.55;if(bb>255)bb=255;else if(bb<0)bb=0; dst->data[1][j][i]=aa; dst->data[2][j][i]=bb; } *ImageType(dst)=MORN_IMAGE_LAB; }
CMedianModified.h
/////////////////////////////////////////////////////////////////////////////// // $Id$ // // 3DimViewer // Lightweight 3D DICOM viewer. // // Copyright 2008-2016 3Dim Laboratory s.r.o. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // /////////////////////////////////////////////////////////////////////////////// #include <VPL/Base/Assert.h> #include <VPL/Base/Types.h> #include <VPL/Base/Data.h> #include <VPL/Math/Base.h> #include <VPL/Module/Progress.h> #include <VPL/Image/VolumeFilter.h> #ifndef CMedianModified_H_included #define CMedianModified_H_included namespace vpl { namespace img { template <class V> class CModifiedMedianFilter : public CVolumeFilter<V>, public vpl::mod::CProgress { public: //! Volume filter base. typedef CVolumeFilter<V> base; typedef typename base::tVolume tVolume; typedef typename base::tVoxel tVoxel; public: //! Constructor that creates a new median filter. //! - Parameter 'Size' is a window size and it must be an odd number. CModifiedMedianFilter(tSize Size, const tVoxel & active, const tVoxel & mask = tVoxel() ) : m_color( active ) , m_null( mask ) , m_MedianSize(Size) , m_Data(Size * Size * Size * ompGetMaxThreads()) { VPL_ASSERT((Size % 2) == 1); } //! Destructor ~CModifiedMedianFilter() {} //! Median volume filtering //! - Returns false on failure bool operator()(const tVolume& SrcVolume, tVolume& DstVolume); //! Returns filter response at specified volume position. //! - Value is not normalized! tVoxel getResponse(const tVolume& SrcVolume, tSize x, tSize y, tSize z); //! Sets the windows size void setSize(tSize Size) { VPL_ASSERT((Size % 2) == 1); m_MedianSize = Size; m_Data.resize(Size * Size * Size * ompGetMaxThreads()); } virtual tSize getSize() const {return m_MedianSize;} protected: //! Find median tVoxel myFindMedian(tVoxel * pData, tSize Size, tVoxel current); protected: //! Used "color" tVoxel m_color; //! Used "no color" mask tVoxel m_null; //! Median filter size vpl::tSize m_MedianSize; //! Internal data buffer vpl::base::CData<tVoxel> m_Data; }; //! Median value finding (Z Algorithm) template <class V> typename CModifiedMedianFilter<V>::tVoxel CModifiedMedianFilter<V>::myFindMedian(tVoxel * pData, tSize Size, tVoxel current) { vpl::tSize count(0), threshold( Size / 2); for( vpl::tSize i = 0; i < Size; ++i ) { if( pData[i] == m_color ) ++count; } if( count > threshold ) return m_color; if( current == m_color ) return m_null; return current; } //============================================================================== /* * Methods templates. */ // Volume filtering method template <class V> bool CModifiedMedianFilter<V>::operator()(const tVolume& SrcVolume, tVolume& DstVolume) { CProgress::tProgressInitializer StartProgress(*this); // Volume size vpl::tSize XCount = vpl::math::getMin(SrcVolume.getXSize(), DstVolume.getXSize()); vpl::tSize YCount = vpl::math::getMin(SrcVolume.getYSize(), DstVolume.getYSize()); vpl::tSize ZCount = vpl::math::getMin(SrcVolume.getZSize(), DstVolume.getZSize()); // Initialize the progress observer CProgress::setProgressMax(ZCount); vpl::tSize KernelSize = m_MedianSize * m_MedianSize * m_MedianSize; vpl::tSize kernelHalf( m_MedianSize / 2 ); for( tSize z = 0; z < ZCount; ++z ) { #pragma omp parallel for schedule(static) default(shared) for( tSize y = 0; y < YCount; ++y ) { tSize Start = ompGetThreadNum() * KernelSize; for( tSize x = 0; x < XCount; ++x ) { tVoxel current = SrcVolume( x, y, z ); if( current == m_null || current == m_color ) { // Copy voxels from the window SrcVolume.rect(CPoint3i(x-kernelHalf, y-kernelHalf, z-kernelHalf), CSize3i(m_MedianSize)).copyTo(m_Data.getPtr(Start)); // Median finding tVoxel Median = myFindMedian(m_Data.getPtr(Start), KernelSize, current); // Set pixel value DstVolume.set(x, y, z, Median); }else{ DstVolume.set(x, y, z, current ); } } } // Notify progress observers... if( !CProgress::progress() ) { return false; } } // O.K. return true; } // Volume filter response template <class V> typename CModifiedMedianFilter<V>::tVoxel CModifiedMedianFilter<V>::getResponse(const tVolume& SrcVolume, tSize x, tSize y, tSize z) { // Copy voxels from the window SrcVolume.rect(CPoint3i(x, y, z), CSize3i(m_MedianSize)).copyTo(m_Data.getPtr()); // Median finding return median::findMedian<tVoxel>(m_Data.getPtr(), m_MedianSize * m_MedianSize * m_MedianSize); } } // namespace img } // namespace vpl /* //////////////////////////////////////////////////////////////////////////////////////////////////// //!\brief Modified median filter. //////////////////////////////////////////////////////////////////////////////////////////////////// template <class V> struct CModifiedMedianFilter : public vpl::img::CVolumeFilter< V >, public vpl::mod::CProgress { public: //! Volume filter base. typedef vpl::img::CVolumeFilter<V> base; typedef typename base::tVolume tVolume; typedef typename base::tVoxel tVoxel; public: //! Default constructor. CModifiedMedianFilter( const tVoxel & active, const tVoxel & mask = tVoxel() ) : m_color( active ) , m_null( mask ) , m_MedianSize( 1 ) { } //! Virtual destructor. virtual ~CModifiedMedianFilter() {} //! Filtering of input/source volume. //! - Returns false on failure. virtual bool operator()(const tVolume& SrcVolume, tVolume& DstVolume); //! Sets the windows size void setSize(vpl::tSize Size) { VPL_ASSERT((Size % 2) == 1); m_MedianSize = Size / 2; } protected: //! Find median modified tVoxel findMedian( const tVolume& SrcVolume, vpl::tSize x, vpl::tSize y, vpl::tSize z, tVoxel current ); //! Median filter size vpl::tSize m_MedianSize; protected: //! Used "color" tVoxel m_color; //! Used "no color" mask tVoxel m_null; }; template <class V> //////////////////////////////////////////////////////////////////////////////////////////////////// //!\brief casting operator. //! //!\typeparam V . //! //!\return The result of the operation. //////////////////////////////////////////////////////////////////////////////////////////////////// bool CModifiedMedianFilter< V >::operator()(const tVolume& SrcVolume, tVolume& DstVolume) { CProgress::tProgressInitializer StartProgress(*this); // Volume size vpl::tSize XCount = vpl::math::getMin(SrcVolume.getXSize(), DstVolume.getXSize()); vpl::tSize YCount = vpl::math::getMin(SrcVolume.getYSize(), DstVolume.getYSize()); vpl::tSize ZCount = vpl::math::getMin(SrcVolume.getZSize(), DstVolume.getZSize()); // Initialize the progress observer CProgress::setProgressMax(ZCount); // Filter the image for( vpl::tSize z = 0; z < ZCount; ++z ) { for( vpl::tSize y = 0; y < YCount; ++y ) { for( vpl::tSize x = 0; x < XCount; ++x ) { tVoxel current = SrcVolume( x, y, z ); // if( current == m_null || current == m_color ) { DstVolume( x, y, z ) = findMedian( SrcVolume, x, y, z, current ); } } } // Notify progress observers... if( !CProgress::progress() ) { return false; } } // O.K. return true; } template <class V> //////////////////////////////////////////////////////////////////////////////////////////////////// //!\brief Searches for the first median. //! //!\typeparam V . //!\param x The x coordinate. //!\param y The y coordinate. //!\param z The z coordinate. //! //!\return The found median. //////////////////////////////////////////////////////////////////////////////////////////////////// typename CModifiedMedianFilter< V >::tVoxel CModifiedMedianFilter< V >::findMedian( const tVolume& SrcVolume, vpl::tSize x, vpl::tSize y, vpl::tSize z, tVoxel current ) { vpl::tSize count(0); vpl::tSize threshold( m_MedianSize * m_MedianSize * m_MedianSize / 2 ); for( vpl::tSize k = z - m_MedianSize; k <= z + m_MedianSize; ++k ) { for( vpl::tSize j = y - m_MedianSize; j <= y + m_MedianSize; ++j ) { for( vpl::tSize i = x - m_MedianSize; i <= x + m_MedianSize; ++i ) { if( SrcVolume( i, j, k ) == m_color ) ++count; } } } if( count > threshold ) return m_color; if( current == m_color ) return m_null; return current; } */ // CMedianModified_H_included #endif
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 16; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(8*t1+Ny+7,16)),floord(16*t2+Ny+3,16)),floord(16*t1-16*t2+Nz+Ny+5,16));t3++) { for (t4=max(max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-19,32)),ceild(16*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(8*t1+Nx+7,32)),floord(16*t2+Nx+3,32)),floord(16*t3+Nx+3,32)),floord(16*t1-16*t2+Nz+Nx+5,32));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),4*t3+2),8*t4+6);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) { lbv=max(32*t4,4*t5+4); ubv=min(32*t4+31,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
Example_icv.1.c
/* * @@name: icv.1c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success */ #include <stdio.h> #include <omp.h> int main (void) { omp_set_nested(1); omp_set_max_active_levels(8); omp_set_dynamic(0); omp_set_num_threads(2); #pragma omp parallel { omp_set_num_threads(3); #pragma omp parallel { omp_set_num_threads(4); #pragma omp single { // The following should print: // Inner: max_act_lev=8, num_thds=3, max_thds=4 // Inner: max_act_lev=8, num_thds=3, max_thds=4 printf ("Inner: max_act_lev=%d, num_thds=%d, max_thds=%d\n", omp_get_max_active_levels(), omp_get_num_threads(), omp_get_max_threads()); } } #pragma omp barrier #pragma omp single { // The following should print: // Outer: max_act_lev=8, num_thds=2, max_thds=3 printf ("Outer: max_act_lev=%d, num_thds=%d, max_thds=%d\n", omp_get_max_active_levels(), omp_get_num_threads(), omp_get_max_threads()); } } return 0; }
valid.yolo7.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_512_34_34_256_3_3.h" #include "gen_ukr_A4B2gemm_1_512_34_34_256_3_3.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 34; int Ny = 34; int Nh = 3; long long Astrides[6] = {0,1,2,3,4,5}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } #pragma omp barrier// begin push button generated block for(int c5=0;c5<256+0;c5+=256) { for(int xy5=0;xy5<1156+0;xy5+=1156) { for(int f5=0;f5<512+0;f5+=512) { for(int xy4=xy5;xy4<min(1156, 1156+xy5);xy4+=1156) { for(int f4=f5;f4<min(512, 512+f5);f4+=512) { for(int c4=c5;c4<min(256, 256+c5);c4+=256) { for(int c3=c4;c3<min(256, 256+c4);c3+=Tc1) { for(int xy3=xy4;xy3<min(1156, 1156+xy4);xy3+=Txy3) { for(int f3=f4;f3<min(512, 512+f4);f3+=Tf2) { for(int xy2=xy3;xy2<min(1156, Txy3+xy3);xy2+=6) { for(int f2=f3;f2<min(512, Tf2+f3);f2+=16) { for(int c2=c3;c2<min(256, Tc1+c3);c2+=Tc1) { for(int c1=c2;c1<min(256, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(1156, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(512, 16+f2);f1+=16) { int ctile=min(Tc1, 256-c1); int x1=xy1/34; int y1=xy1%34/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*331776+c1_1*1296+1*x1*36+1*y1*1+c1_2*1; int offsetB=0+kf1_1*36864+c1*144+0*48+0*16+kf1_2*1; int offsetC=0+b1*591872+of1_1*1156+x1*34+y1*1+of1_2*1; if(34-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(34*34-xy1>=6){ for(int sti=34-y1;sti<6;sti+=1) { Astrides[sti]+=2; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=34-y1;sti<6;sti+=1) { Astrides[sti]-=2; } } else{ cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
static.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <unistd.h> int main() { int n_threads, i; /* Schedule allows you to create the scheme with which the threads distribute the work of an iteration of a cycle. "static": means that iterations blocks are mapped statically to the execution threads in a round-robin fashion. The nice thing with static scheduling is that OpenMP run-time guarantees that if you have two separate loops with the same number of iterations and execute them with the same number of threads using static scheduling, */ #pragma omp parallel for private(i) schedule(static) num_threads(4) for(i=0; i<16; i++) { //wait i second sleep(i); printf("The thread %d has completed the iteration %d\n", omp_get_thread_num(), i); } printf("All threads have ended!\n"); return 0; }
conv.h
#ifndef CONV_H #define CONV_H namespace TSnap { /// Sequentially converts the table into a graph with links from nodes in \c SrcCol to those in \c DstCol. template<class PGraph> PGraph ToGraph(PTable Table, const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy) { PGraph Graph = PGraph::TObj::New(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); // make single pass over all rows in the table if (NodeType == atInt) { for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx]; TInt DVal = (Table->IntCols)[DstColIdx][CurrRowIdx]; //Using AddNodeUnchecked ensures that no error is thrown when the same node is seen twice Graph->AddNodeUnchecked(SVal); Graph->AddNodeUnchecked(DVal); Graph->AddEdgeUnchecked(SVal, DVal); } } else if (NodeType == atFlt) { // node values - i.e. the unique values of src/dst col //THashSet<TInt> IntNodeVals; // for both int and string node attr types. THash<TFlt, TInt> FltNodeVals; for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal, DVal; TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal); TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal); Graph->AddEdge(SVal, DVal); } } else { for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value TInt DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value //Using AddNodeUnchecked ensures that no error is thrown when the same node is seen twice Graph->AddNodeUnchecked(SVal); Graph->AddNodeUnchecked(DVal); Graph->AddEdgeUnchecked(SVal, DVal); } } Graph->SortNodeAdjV(); return Graph; } /// Converts the Table into a graph with edges from \c SrcCol to \c DstCol, and attribute vector defined by the arguments. template<class PGraph> PGraph ToNetwork(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& SrcAttrV, TStrV& DstAttrV, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { PGraph Graph = PGraph::TObj::New(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); //Table->AddGraphAttributeV(SrcAttrV, false, true, false); //Table->AddGraphAttributeV(DstAttrV, false, false, true); //Table->AddGraphAttributeV(EdgeAttrV, true, false, true); // node values - i.e. the unique values of src/dst col //THashSet<TInt> IntNodeVals; // for both int and string node attr types. THash<TFlt, TInt> FltNodeVals; // node attributes THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; // make single pass over all rows in the table for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal, DVal; if (NodeType == atFlt) { TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal); TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal); } else if (NodeType == atInt || NodeType == atStr) { if (NodeType == atInt) { SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx]; DVal = (Table->IntCols)[DstColIdx][CurrRowIdx]; } else { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value } if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); } if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); } //CheckAndAddIntNode(Graph, IntNodeVals, SVal); //CheckAndAddIntNode(Graph, IntNodeVals, DVal); } // add edge and edge attributes Graph->AddEdge(SVal, DVal, CurrRowIdx); // Aggregate edge attributes and add to graph for (TInt i = 0; i < EdgeAttrV.Len(); i++) { TStr ColName = EdgeAttrV[i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrValIdx(Index, CurrRowIdx), ColName); break; } } // get src and dst node attributes into hashmaps if ((Table->SrcNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SVal, Table->SrcNodeAttrV, CurrRowIdx, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } if ((Table->DstNodeAttrV).Len() > 0) { Table->AddNodeAttributes(DVal, Table->DstNodeAttrV, CurrRowIdx, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } } // aggregate node attributes and add to graph if ((Table->SrcNodeAttrV).Len() > 0 || (Table->DstNodeAttrV).Len() > 0) { for (TNEANet::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) { TInt NId = NodeI.GetId(); if (NodeIntAttrs.IsKey(NId)) { TStrIntVH IntAttrVals = NodeIntAttrs.GetDat(NId); for (TStrIntVH::TIter it = IntAttrVals.BegI(); it < IntAttrVals.EndI(); it++) { TInt AttrVal = Table->AggregateVector<TInt>(it.GetDat(), AggrPolicy); Graph->AddIntAttrDatN(NId, AttrVal, it.GetKey()); } } if (NodeFltAttrs.IsKey(NId)) { TStrFltVH FltAttrVals = NodeFltAttrs.GetDat(NId); for (TStrFltVH::TIter it = FltAttrVals.BegI(); it < FltAttrVals.EndI(); it++) { TFlt AttrVal = Table->AggregateVector<TFlt>(it.GetDat(), AggrPolicy); Graph->AddFltAttrDatN(NId, AttrVal, it.GetKey()); } } if (NodeStrAttrs.IsKey(NId)) { TStrStrVH StrAttrVals = NodeStrAttrs.GetDat(NId); for (TStrStrVH::TIter it = StrAttrVals.BegI(); it < StrAttrVals.EndI(); it++) { TStr AttrVal = Table->AggregateVector<TStr>(it.GetDat(), AggrPolicy); Graph->AddStrAttrDatN(NId, AttrVal, it.GetKey()); } } } } return Graph; } /// Calls ToNetwork with an empty attribute vector. Convenience wrapper. template<class PGraph> PGraph ToNetwork(PTable Table, const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy) { TStrV V; return ToNetwork<PGraph>(Table, SrcCol, DstCol, V, AggrPolicy); } #ifdef GCC_ATOMIC /// Performs table to graph conversion in parallel using the sort-first algorithm. This is the recommended method to use. template<class PGraphMP> PGraphMP ToGraphMP(PTable Table, const TStr& SrcCol, const TStr& DstCol) { // double start = omp_get_wtime(); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt NumRows = Table->NumValidRows; TIntV SrcCol1, DstCol1, SrcCol2, DstCol2; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { SrcCol2.Reserve(NumRows, NumRows); } #pragma omp section { DstCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } } // double endResize = omp_get_wtime(); // printf("Resize time = %f\n", endResize-start); TIntPrV Partitions; Table->GetPartitionRanges(Partitions, omp_get_max_threads()); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); SrcCol2[RowId] = RowI.GetIntAttr(SrcColIdx); DstCol1[RowId] = RowI.GetIntAttr(DstColIdx); DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); SrcCol2[RowId] = RowI.GetStrMapById(SrcColIdx); DstCol1[RowId] = RowI.GetStrMapById(DstColIdx); DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); RowI++; } } } omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp single nowait { #pragma omp task untied shared(SrcCol1, DstCol1) { TTable::QSortKeyVal(SrcCol1, DstCol1, 0, NumRows-1); } } #pragma omp single nowait { #pragma omp task untied shared(SrcCol2, DstCol2) { TTable::QSortKeyVal(DstCol2, SrcCol2, 0, NumRows-1); } } #pragma omp taskwait } // TTable::PSRSKeyVal(SrcCol1, DstCol1, 0, NumRows-1); // TTable::PSRSKeyVal(DstCol2, SrcCol2, 0, NumRows-1); // TInt IsS = TTable::CheckSortedKeyVal(SrcCol1, DstCol1, 0, NumRows-1); // TInt IsD = TTable::CheckSortedKeyVal(DstCol2, SrcCol2, 0, NumRows-1); // printf("IsSorted = %d %d\n", IsS.Val, IsD.Val); // double endSort = omp_get_wtime(); // printf("Sort time = %f\n", endSort-endCopy); //return TNGraphMP::New(10, 100); TInt NumThreads = omp_get_max_threads(); TInt PartSize = (NumRows/NumThreads); TIntV SrcOffsets, DstOffsets; SrcOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) { CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); } } SrcOffsets.Add(NumRows); DstOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) { CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); } } DstOffsets.Add(NumRows); TInt SrcPartCnt = SrcOffsets.Len()-1; TInt DstPartCnt = DstOffsets.Len()-1; // for (TInt i = 0; i < SrcOffsets.Len(); i++) { // printf("%d ", SrcOffsets[i].Val); // } // printf("\n"); // for (TInt i = 0; i < DstOffsets.Len(); i++) { // printf("%d ", DstOffsets[i].Val); // } // printf("\n"); TIntV SrcNodeCounts, DstNodeCounts; SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt); DstNodeCounts.Reserve(DstPartCnt, DstPartCnt); #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { SrcNodeCounts[i] = 1; TInt CurrNode = SrcCol1[SrcOffsets[i]]; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { SrcNodeCounts[i]++; CurrNode = SrcCol1[j]; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { DstNodeCounts[i] = 1; TInt CurrNode = DstCol2[DstOffsets[i]]; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { DstNodeCounts[i]++; CurrNode = DstCol2[j]; } } } } } // for (TInt i = 0; i < SrcNodeCounts.Len(); i++) { // printf("%d ", SrcNodeCounts[i].Val); // } // printf("\n"); // for (TInt i = 0; i < DstNodeCounts.Len(); i++) { // printf("%d ", DstNodeCounts[i].Val); // } // printf("\n"); TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < SrcPartCnt; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < DstPartCnt; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val); TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { TInt CurrNode = SrcCol1[SrcOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { CurrNode = SrcCol1[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { TInt CurrNode = DstCol2[DstOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]); TInt CurrCount = 1; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { CurrNode = DstCol2[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } // double endNode = omp_get_wtime(); // printf("Node time = %f\n", endNode-endSort); TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); // double endNodeResize = omp_get_wtime(); // printf("(NodeResize time = %f)\n", endNodeResize-endNode); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } // double endMerge = omp_get_wtime(); // printf("Merge time = %f\n", endMerge-endNode); TInt NumNodes = Nodes.Len(); // printf("NumNodes = %d\n", NumNodes.Val); PGraphMP Graph = TNGraphMP::New(NumNodes, NumRows); NumThreads = 1; int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,Delta) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = DstCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } //printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val); OutVV[m].Reserve(Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = SrcCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } //printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val); InVV[m].Reserve(Sz); } //double endTr = omp_get_wtime(); //printf("Thread=%d, i=%d, t=%f\n", omp_get_thread_num(), m, endTr-startTr); } // double endAlloc = omp_get_wtime(); // printf("Alloc time = %f\n", endAlloc-endMerge); NumThreads = omp_get_max_threads(); Delta = (NumNodes+NumThreads-1)/(10*NumThreads); omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(dynamic) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = DstCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } //printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val); OutVV[m].CopyUniqueFrom(DstCol1, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = SrcCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } //printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val); InVV[m].CopyUniqueFrom(SrcCol2, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); //double endTr = omp_get_wtime(); //printf("Thread=%d, i=%d, t=%f\n", omp_get_thread_num(), m, endTr-startTr); } Graph->SetNodes(NumNodes); // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } /// Performs table to graph conversion in parallel. Uses the hash-first method, which is less optimal, use ToGraphMP instead. template<class PGraphMP> PGraphMP ToGraphMP3(PTable Table, const TStr& SrcCol, const TStr& DstCol) { PNGraphMP Graph; int MaxThreads = omp_get_max_threads(); int Length, Threads, Delta, Nodes, Last; uint64_t NumNodesEst; TInt SrcColIdx, DstColIdx; TIntV InVec, OutVec; SrcColIdx = Table->GetColIdx(SrcCol); DstColIdx = Table->GetColIdx(DstCol); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); /* Estimate number of nodes in the graph */ int NumRows = Table->Next.Len(); double Load = 10; int sz = NumRows / Load; int *buckets = (int *)malloc(sz * sizeof(int)); #pragma omp parallel for for (int i = 0; i < sz; i++) buckets[i] = 0; if (NodeType == atInt) { #pragma omp parallel for for (int i = 0; i < NumRows; i++) { int vert = Table->IntCols[DstColIdx][i]; buckets[vert % sz] = 1; } } else if (NodeType == atStr ) { #pragma omp parallel for for (int i = 0; i < NumRows; i++) { int vert = (Table->StrColMaps)[DstColIdx][i]; buckets[vert % sz] = 1; } } int cnt = 0; #pragma omp parallel for reduction(+:cnt) for (int i = 0; i < sz; i++) { if (buckets[i] == 0) cnt += 1; } NumNodesEst = sz * log ((double)sz / cnt); free (buckets); /* Until we correctly estimate the number of nodes */ while (1) { Graph = TNGraphMP::New(NumNodesEst, 100); Length = Graph->Reserved(); Threads = MaxThreads/2; Delta = (Length + Threads - 1) / Threads; OutVec.Gen(Length); InVec.Gen(Length); /* build the node hash table, count the size of edge lists */ Last = NumRows; Nodes = 0; omp_set_num_threads(Threads); #pragma omp parallel for schedule(static, Delta) for (int CurrRowIdx = 0; CurrRowIdx < Last; CurrRowIdx++) { if ((uint64_t) Nodes + 1000 >= NumNodesEst) { /* need bigger hash table */ continue; } TInt SVal, DVal; if (NodeType == atInt) { SVal = Table->IntCols[SrcColIdx][CurrRowIdx]; DVal = Table->IntCols[DstColIdx][CurrRowIdx]; } else if (NodeType == atStr ) { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; } int SrcIdx = abs((SVal.GetPrimHashCd()) % Length); if (!Graph->AddOutEdge1(SrcIdx, SVal, DVal)) { #pragma omp critical { Nodes++; } } __sync_fetch_and_add(&OutVec[SrcIdx].Val, 1); int DstIdx = abs((DVal.GetPrimHashCd()) % Length); if (!Graph->AddInEdge1(DstIdx, SVal, DVal)) { #pragma omp critical { Nodes++; } } __sync_fetch_and_add(&InVec[DstIdx].Val, 1); } if ((uint64_t) Nodes + 1000 >= NumNodesEst) { /* We need to double our num nodes estimate */ Graph.Clr(); InVec.Clr(); OutVec.Clr(); NumNodesEst *= 2; } else { break; } } Graph->SetNodes(Nodes); uint Edges = 0; for (int i = 0; i < Length; i++) { Edges += OutVec[i] + InVec[i]; } for (int Idx = 0; Idx < Length; Idx++) { if (OutVec[Idx] > 0 || InVec[Idx] > 0) { Graph->ReserveNodeDegs(Idx, InVec[Idx], OutVec[Idx]); } } /* assign edges */ Length = Graph->Reserved(); Threads = MaxThreads; Delta = (Length + Threads - 1) / Threads; omp_set_num_threads(Threads); #pragma omp parallel for schedule(static,Delta) for (int CurrRowIdx = 0; CurrRowIdx < Last; CurrRowIdx++) { TInt SVal, DVal; if (NodeType == atInt) { SVal = Table->IntCols[SrcColIdx][CurrRowIdx]; DVal = Table->IntCols[DstColIdx][CurrRowIdx]; } else if (NodeType == atStr) { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; } Graph->AddOutEdge2(SVal, DVal); Graph->AddInEdge2(SVal, DVal); } /* sort edges */ Length = Graph->Reserved(); Threads = MaxThreads*2; Delta = (Length + Threads - 1) / Threads; omp_set_num_threads(Threads); #pragma omp parallel for schedule(dynamic) for (int Idx = 0; Idx < Length; Idx++) { if (OutVec[Idx] > 0 || InVec[Idx] > 0) { Graph->SortEdges(Idx, InVec[Idx], OutVec[Idx]); } } return Graph; } /// Does Table to Network conversion in parallel using the sort-first algorithm. This is the recommended method to use. template<class PGraphMP> inline PGraphMP ToNetworkMP(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& SrcAttrV, TStrV& DstAttrV, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { TStopwatch* Sw = TStopwatch::GetInstance(); Sw->Start(TStopwatch::AllocateColumnCopies); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TInt NumRows = Table->GetNumValidRows(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2; THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol2.Reserve(NumRows, NumRows); } } Sw->Stop(TStopwatch::AllocateColumnCopies); Sw->Start(TStopwatch::CopyColumns); TIntPrV Partitions; Table->GetPartitionRanges(Partitions, omp_get_max_threads()); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } Sw->Stop(TStopwatch::CopyColumns); Sw->Start(TStopwatch::Sort); omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(SrcCol1, EdgeCol1) #endif { TTable::QSortKeyVal(SrcCol1, EdgeCol1, 0, NumRows-1); } } #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(EdgeCol2, DstCol2) #endif { TTable::QSortKeyVal(DstCol2, EdgeCol2, 0, NumRows-1); } } #ifndef GLib_WIN32 #pragma omp taskwait #endif } Sw->Stop(TStopwatch::Sort); Sw->Start(TStopwatch::Group); TInt NumThreads = omp_get_max_threads(); TInt PartSize = (NumRows/NumThreads); // Find the offset of all partitions, each of which contains a list of rows. // Nodes from same sources or destinations are ensured to be kept within same partition. TIntV SrcOffsets, DstOffsets; SrcOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) { // ensure that rows from the same sources are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); } } SrcOffsets.Add(NumRows); DstOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) { // ensure that rows to the same destinations are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); } } DstOffsets.Add(NumRows); TInt SrcPartCnt = SrcOffsets.Len()-1; // number of partitions TInt DstPartCnt = DstOffsets.Len()-1; // number of partitions // count the number of source nodes and destination nodes in each partition TIntV SrcNodeCounts, DstNodeCounts; SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt); DstNodeCounts.Reserve(DstPartCnt, DstPartCnt); #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { SrcNodeCounts[i] = 1; TInt CurrNode = SrcCol1[SrcOffsets[i]]; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { SrcNodeCounts[i]++; CurrNode = SrcCol1[j]; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { DstNodeCounts[i] = 1; TInt CurrNode = DstCol2[DstOffsets[i]]; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { DstNodeCounts[i]++; CurrNode = DstCol2[j]; } } } } } TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < SrcPartCnt; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < DstPartCnt; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val); // find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } // Find the starting offset of each node (in both src and dst) #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { TInt CurrNode = SrcCol1[SrcOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { CurrNode = SrcCol1[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { TInt CurrNode = DstCol2[DstOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]); TInt CurrCount = 1; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { CurrNode = DstCol2[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } Sw->Stop(TStopwatch::Group); Sw->Start(TStopwatch::MergeNeighborhoods); // Find the combined neighborhood (both out-neighbors and in-neighbors) of each node TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } Sw->Stop(TStopwatch::MergeNeighborhoods); Sw->Start(TStopwatch::AddNeighborhoods); TInt NumNodes = Nodes.Len(); PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows); // NumThreads = omp_get_max_threads(); // int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); // omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,100) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = EdgeCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } OutVV[m].Reserve(Sz); OutVV[m].CopyUniqueFrom(EdgeCol1, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = EdgeCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } InVV[m].Reserve(Sz); InVV[m].CopyUniqueFrom(EdgeCol2, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); } Graph->SetNodes(NumNodes); Sw->Stop(TStopwatch::AddNeighborhoods); Sw->Start(TStopwatch::AddEdges); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetIntAttr(SrcColIdx); TInt DstId = RowI.GetIntAttr(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) { TStr ColName = EdgeAttrV[ea_i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(RowId, Table->IntCols[Index][RowId], ColName); break; case atFlt: Graph->AddFltAttrDatE(RowId, Table->FltCols[Index][RowId], ColName); break; case atStr: Graph->AddStrAttrDatE(RowId, Table->GetStrValIdx(Index, RowId), ColName); break; } } if ((Table->SrcNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SrcId, Table->SrcNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } if ((Table->DstNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SrcId, Table->DstNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetStrMapById(SrcColIdx); TInt DstId = RowI.GetStrMapById(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) { TStr ColName = EdgeAttrV[ea_i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(RowId, Table->IntCols[Index][RowId], ColName); break; case atFlt: Graph->AddFltAttrDatE(RowId, Table->FltCols[Index][RowId], ColName); break; case atStr: Graph->AddStrAttrDatE(RowId, Table->GetStrValIdx(Index, RowId), ColName); break; } } if ((Table->SrcNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SrcId, Table->SrcNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } if ((Table->DstNodeAttrV).Len() > 0) { Table->AddNodeAttributes(SrcId, Table->DstNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs); } } } } // aggregate node attributes and add to graph if ((Table->SrcNodeAttrV).Len() > 0 || (Table->DstNodeAttrV).Len() > 0) { for (typename PGraphMP::TObj::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) { TInt NId = NodeI.GetId(); if (NodeIntAttrs.IsKey(NId)) { TStrIntVH IntAttrVals = NodeIntAttrs.GetDat(NId); for (TStrIntVH::TIter it = IntAttrVals.BegI(); it < IntAttrVals.EndI(); it++) { TInt AttrVal = Table->AggregateVector<TInt>(it.GetDat(), AggrPolicy); Graph->AddIntAttrDatN(NId, AttrVal, it.GetKey()); } } if (NodeFltAttrs.IsKey(NId)) { TStrFltVH FltAttrVals = NodeFltAttrs.GetDat(NId); for (TStrFltVH::TIter it = FltAttrVals.BegI(); it < FltAttrVals.EndI(); it++) { TFlt AttrVal = Table->AggregateVector<TFlt>(it.GetDat(), AggrPolicy); Graph->AddFltAttrDatN(NId, AttrVal, it.GetKey()); } } if (NodeStrAttrs.IsKey(NId)) { TStrStrVH StrAttrVals = NodeStrAttrs.GetDat(NId); for (TStrStrVH::TIter it = StrAttrVals.BegI(); it < StrAttrVals.EndI(); it++) { TStr AttrVal = Table->AggregateVector<TStr>(it.GetDat(), AggrPolicy); Graph->AddStrAttrDatN(NId, AttrVal, it.GetKey()); } } } } Graph->SetEdges(NumRows); Sw->Stop(TStopwatch::AddEdges); // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } /// Calls ToNetworkMP with empty attribute vector. Convenience wrapper. template<class PGraphMP> PGraphMP ToNetworkMP(PTable Table, const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy) { TStrV V; return ToNetworkMP<PGraphMP>(Table, SrcCol, DstCol, V,AggrPolicy); } ///Implements table to network conversion in parallel. Not the recommended algorithm, using ToNetworkMP instead. template<class PGraphMP> inline PGraphMP ToNetworkMP2(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& SrcAttrV, TStrV& DstAttrV, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { TStopwatch* Sw = TStopwatch::GetInstance(); Sw->Start(TStopwatch::AllocateColumnCopies); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TInt NumRows = Table->NumValidRows; const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol2.Reserve(NumRows, NumRows); } } Sw->Stop(TStopwatch::AllocateColumnCopies); Sw->Start(TStopwatch::CopyColumns); TIntPrV Partitions; // int NThreads = omp_get_max_threads(); const int NThreads = 40; Table->GetPartitionRanges(Partitions, NThreads); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } // printf("NumRows = %d\n", NumRows.Val); // printf("NThreads = %d\n", NThreads); // for (int i = 0; i < Partitions.Len(); i++) { // printf("Partition %d %d->%d\n", i, Partitions[i].GetVal1().Val, Partitions[i].GetVal2().Val); // } int Parts[NThreads+1]; for (int i = 0; i < NThreads; i++) { Parts[i] = NumRows.Val / NThreads * i; } Parts[NThreads] = NumRows; // for (int i = 0; i < NThreads+1; i++) { // printf("Parts[%d] = %d\n", i, Parts[i]); // } Sw->Stop(TStopwatch::CopyColumns); Sw->Start(TStopwatch::Sort); TInt ExtremePoints[4][NThreads]; omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp for schedule(static) nowait for (int i = 0; i < NThreads; i++) { TInt StartPos = Parts[i]; TInt EndPos = Parts[i+1]-1; // TODO: Handle empty partition TTable::QSortKeyVal(SrcCol1, EdgeCol1, StartPos, EndPos); ExtremePoints[0][i] = SrcCol1[StartPos]; ExtremePoints[2][i] = SrcCol1[EndPos]; } #pragma omp for schedule(static) nowait for (int i = 0; i < NThreads; i++) { TInt StartPos = Parts[i]; TInt EndPos = Parts[i+1]-1; // TODO: Handle empty partition TTable::QSortKeyVal(DstCol2, EdgeCol2, StartPos, EndPos); ExtremePoints[1][i] = DstCol2[StartPos]; ExtremePoints[3][i] = DstCol2[EndPos]; } } // for (int i = 0; i < NThreads; i++) { // printf("ExtremePoints[%d] = %d-%d -> %d-%d\n", i, ExtremePoints[0][i].Val, ExtremePoints[1][i].Val, ExtremePoints[2][i].Val, ExtremePoints[3][i].Val); // } // find min points TInt MinId(INT_MAX); for (int j = 0; j < 2; j++) { for (int i = 0; i < NThreads; i++) { if (MinId > ExtremePoints[j][i]) { MinId = ExtremePoints[j][i]; } } } TInt MaxId(-1); for (int j = 2; j < 4; j++) { for (int i = 0; i < NThreads; i++) { if (MaxId < ExtremePoints[j][i]) { MaxId = ExtremePoints[j][i]; } } } // printf("MinId = %d\n", MinId.Val); // printf("MaxId = %d\n", MaxId.Val); Sw->Stop(TStopwatch::Sort); Sw->Start(TStopwatch::Group); // const int NumCollectors = omp_get_max_threads(); const int NumCollectors = 20; int Range = MaxId.Val - MinId.Val; TIntV IdRanges(NumCollectors+1); for (int j = 0; j < NumCollectors; j++) { IdRanges[j] = MinId + Range/NumCollectors*j; } IdRanges[NumCollectors] = MaxId+1; // for (int i = 0; i < NumCollectors+1; i++) { // printf("IdRanges[%d] = %d\n", i, IdRanges[i].Val); // } int SrcOffsets[NThreads][NumCollectors+1]; #pragma omp parallel for schedule(static) for (int i = 0; i < NThreads; i++) { int CollectorId = 0; for (int j = Parts[i]; j < Parts[i+1]; j++) { while (SrcCol1[j] >= IdRanges[CollectorId]) { SrcOffsets[i][CollectorId++] = j; } } while (CollectorId <= NumCollectors) { SrcOffsets[i][CollectorId++] = Parts[i+1]; } } int DstOffsets[NThreads][NumCollectors+1]; #pragma omp parallel for schedule(static) for (int i = 0; i < NThreads; i++) { int CollectorId = 0; for (int j = Parts[i]; j < Parts[i+1]; j++) { while (DstCol2[j] >= IdRanges[CollectorId]) { DstOffsets[i][CollectorId++] = j; } } while (CollectorId <= NumCollectors) { DstOffsets[i][CollectorId++] = Parts[i+1]; } } // for (int i = 0; i < NThreads; i++) { // for (int j = 0; j < NumCollectors+1; j++) { // printf("SrcOffsets[%d][%d] = %d\n", i, j, SrcOffsets[i][j]); // } // } // for (int i = 0; i < NThreads; i++) { // for (int j = 0; j < NumCollectors+1; j++) { // printf("DstOffsets[%d][%d] = %d\n", i, j, DstOffsets[i][j]); // } // } TIntV SrcCollectorOffsets(NumCollectors+1); SrcCollectorOffsets[0] = 0; for (int k = 0; k < NumCollectors; k++) { int SumOffset = 0; for (int i = 0; i < NThreads; i++) { SumOffset += SrcOffsets[i][k+1] - SrcOffsets[i][k]; } SrcCollectorOffsets[k+1] = SrcCollectorOffsets[k] + SumOffset; } TIntV DstCollectorOffsets(NumCollectors+1); DstCollectorOffsets[0] = 0; for (int k = 0; k < NumCollectors; k++) { int SumOffset = 0; for (int i = 0; i < NThreads; i++) { SumOffset += DstOffsets[i][k+1] - DstOffsets[i][k]; } DstCollectorOffsets[k+1] = DstCollectorOffsets[k] + SumOffset; } // for (int i = 0; i < NumCollectors+1; i++) { // printf("SrcCollectorOffsets[%d] = %d\n", i, SrcCollectorOffsets[i].Val); // } // for (int i = 0; i < NumCollectors+1; i++) { // printf("DstCollectorOffsets[%d] = %d\n", i, DstCollectorOffsets[i].Val); // } TIntV SrcCol3, EdgeCol3, EdgeCol4, DstCol4; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol3.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol3.Reserve(NumRows, NumRows); } #pragma omp section { DstCol4.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol4.Reserve(NumRows, NumRows); } } TIntV SrcNodeCounts(NumCollectors), DstNodeCounts(NumCollectors); #pragma omp parallel for schedule(static) for (int k = 0; k < NumCollectors; k++) { int ind = SrcCollectorOffsets[k]; for (int i = 0; i < NThreads; i++) { for (int j = SrcOffsets[i][k]; j < SrcOffsets[i][k+1]; j++) { SrcCol3[ind] = SrcCol1[j]; EdgeCol3[ind] = EdgeCol1[j]; ind++; } } TTable::QSortKeyVal(SrcCol3, EdgeCol3, SrcCollectorOffsets[k], SrcCollectorOffsets[k+1]-1); int SrcCount = 0; if (SrcCollectorOffsets[k+1] > SrcCollectorOffsets[k]) { SrcCount = 1; for (int j = SrcCollectorOffsets[k]+1; j < SrcCollectorOffsets[k+1]; j++) { if (SrcCol3[j] != SrcCol3[j-1]) { SrcCount++; } } } SrcNodeCounts[k] = SrcCount; ind = DstCollectorOffsets[k]; for (int i = 0; i < NThreads; i++) { for (int j = DstOffsets[i][k]; j < DstOffsets[i][k+1]; j++) { DstCol4[ind] = DstCol2[j]; EdgeCol4[ind] = EdgeCol2[j]; ind++; } } TTable::QSortKeyVal(DstCol4, EdgeCol4, DstCollectorOffsets[k], DstCollectorOffsets[k+1]-1); int DstCount = 0; if (DstCollectorOffsets[k+1] > DstCollectorOffsets[k]) { DstCount = 1; for (int j = DstCollectorOffsets[k]+1; j < DstCollectorOffsets[k+1]; j++) { if (DstCol4[j] != DstCol4[j-1]) { DstCount++; } } } DstNodeCounts[k] = DstCount; } TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < NumCollectors; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } // printf("Sorted = %d - %d\n", SrcCol3.IsSorted(), DstCol4.IsSorted()); // for (int i = 0; i < NumRows-1; i++) { // if (SrcCol3[i] > SrcCol3[i+1]) { printf("i=%d: %d %d\n", i, SrcCol3[i].Val, SrcCol3[i+1].Val); } // } // for (int i = 0; i < NumRows-1; i++) { // if (DstCol4[i] > DstCol4[i+1]) { printf("i=%d: %d %d\n", i, DstCol4[i].Val, DstCol4[i+1].Val); } // } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < NumCollectors; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } // Find the starting offset of each node (in both src and dst) #pragma omp parallel for schedule(dynamic) for (int t = 0; t < 2*NumCollectors; t++) { if (t < NumCollectors) { TInt i = t; if (SrcCollectorOffsets[i] < SrcCollectorOffsets[i+1]) { TInt CurrNode = SrcCol3[SrcCollectorOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcCollectorOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcCollectorOffsets[i]+1; j < SrcCollectorOffsets[i+1]; j++) { while (j < SrcCollectorOffsets[i+1] && SrcCol3[j] == CurrNode) { j++; } if (j < SrcCollectorOffsets[i+1]) { CurrNode = SrcCol3[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - NumCollectors; if (DstCollectorOffsets[i] < DstCollectorOffsets[i+1]) { TInt CurrNode = DstCol4[DstCollectorOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstCollectorOffsets[i]); TInt CurrCount = 1; for (TInt j = DstCollectorOffsets[i]+1; j < DstCollectorOffsets[i+1]; j++) { while (j < DstCollectorOffsets[i+1] && DstCol4[j] == CurrNode) { j++; } if (j < DstCollectorOffsets[i+1]) { CurrNode = DstCol4[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } Sw->Stop(TStopwatch::Group); Sw->Start(TStopwatch::MergeNeighborhoods); // Find the combined neighborhood (both out-neighbors and in-neighbors) of each node TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } Sw->Stop(TStopwatch::MergeNeighborhoods); Sw->Start(TStopwatch::AddNeighborhoods); TInt NumNodes = Nodes.Len(); PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows); // NumThreads = omp_get_max_threads(); // int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); // omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,100) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = EdgeCol3.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } OutVV[m].Reserve(Sz); OutVV[m].CopyUniqueFrom(EdgeCol3, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = EdgeCol4.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } InVV[m].Reserve(Sz); InVV[m].CopyUniqueFrom(EdgeCol4, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); } Graph->SetNodes(NumNodes); Sw->Stop(TStopwatch::AddNeighborhoods); Sw->Start(TStopwatch::AddEdges); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetIntAttr(SrcColIdx); TInt DstId = RowI.GetIntAttr(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetStrMapById(SrcColIdx); TInt DstId = RowI.GetStrMapById(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } Graph->SetEdges(NumRows); Sw->Stop(TStopwatch::AddEdges); // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } /// Calls ToNetworkMP2 with an empty attribute vector. Convenience wrapper. template<class PGraphMP> PGraphMP ToNetworkMP2(PTable Table, const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy) { TStrV V; return ToNetworkMP2<PGraphMP>(Table, SrcCol, DstCol, V, V, V, AggrPolicy); } #endif // GCC_ATOMIC /// Loads a mode, with name Name, into the PMMNet from the TTable. NCol specifies the node id column and NodeAttrV the node attributes. int LoadModeNetToNet(PMMNet Graph, const TStr& Name, PTable Table, const TStr& NCol, TStrV& NodeAttrV); /// Loads the nodes specified in column NCol from the TTable with the attributes specified in NodeAttrV. int LoadMode(TModeNet& Graph, PTable Table, const TStr& NCol, TStrV& NodeAttrV); /// Loads a crossnet from Mode1 to Mode2, with name CrossName, from the provided TTable. EdgeAttrV specifies edge attributes. int LoadCrossNetToNet(PMMNet Graph, const TStr& Mode1, const TStr& Mode2, const TStr& CrossName, PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV); /// Loads the edges from the TTable and EdgeAttrV specifies columns containing edge attributes. int LoadCrossNet(TCrossNet& Graph, PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV); /// Converts table to a network sequentially. Use if network has only edge attributes. template<class PGraph> PGraph ToNetwork(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { PGraph Graph = PGraph::TObj::New(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); //Table->AddGraphAttributeV(SrcAttrV, false, true, false); //Table->AddGraphAttributeV(DstAttrV, false, false, true); //Table->AddGraphAttributeV(EdgeAttrV, true, false, true); // node values - i.e. the unique values of src/dst col //THashSet<TInt> IntNodeVals; // for both int and string node attr types. THash<TFlt, TInt> FltNodeVals; // make single pass over all rows in the table for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal, DVal; if (NodeType == atFlt) { TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal); TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal); } else if (NodeType == atInt || NodeType == atStr) { if (NodeType == atInt) { SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx]; DVal = (Table->IntCols)[DstColIdx][CurrRowIdx]; } else { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value } if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); } if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); } //CheckAndAddIntNode(Graph, IntNodeVals, SVal); //CheckAndAddIntNode(Graph, IntNodeVals, DVal); } // add edge and edge attributes Graph->AddEdge(SVal, DVal, CurrRowIdx); // Aggregate edge attributes and add to graph for (TInt i = 0; i < EdgeAttrV.Len(); i++) { TStr ColName = EdgeAttrV[i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrValIdx(Index, CurrRowIdx), ColName); break; } } } return Graph; } #ifdef GCC_ATOMIC /// Converts table to network in parallel. Use if network has only edge attributes. template<class PGraphMP> inline PGraphMP ToNetworkMP(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV, TAttrAggr AggrPolicy) { TStopwatch* Sw = TStopwatch::GetInstance(); Sw->Start(TStopwatch::AllocateColumnCopies); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TInt NumRows = Table->GetNumValidRows(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2; THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol2.Reserve(NumRows, NumRows); } } Sw->Stop(TStopwatch::AllocateColumnCopies); Sw->Start(TStopwatch::CopyColumns); TIntPrV Partitions; Table->GetPartitionRanges(Partitions, omp_get_max_threads()); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } Sw->Stop(TStopwatch::CopyColumns); Sw->Start(TStopwatch::Sort); omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(SrcCol1, EdgeCol1) #endif { TTable::QSortKeyVal(SrcCol1, EdgeCol1, 0, NumRows-1); } } #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(EdgeCol2, DstCol2) #endif { TTable::QSortKeyVal(DstCol2, EdgeCol2, 0, NumRows-1); } } #ifndef GLib_WIN32 #pragma omp taskwait #endif } Sw->Stop(TStopwatch::Sort); Sw->Start(TStopwatch::Group); TInt NumThreads = omp_get_max_threads(); TInt PartSize = (NumRows/NumThreads); // Find the offset of all partitions, each of which contains a list of rows. // Nodes from same sources or destinations are ensured to be kept within same partition. TIntV SrcOffsets, DstOffsets; SrcOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) { // ensure that rows from the same sources are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); } } SrcOffsets.Add(NumRows); DstOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) { // ensure that rows to the same destinations are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); } } DstOffsets.Add(NumRows); TInt SrcPartCnt = SrcOffsets.Len()-1; // number of partitions TInt DstPartCnt = DstOffsets.Len()-1; // number of partitions // count the number of source nodes and destination nodes in each partition TIntV SrcNodeCounts, DstNodeCounts; SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt); DstNodeCounts.Reserve(DstPartCnt, DstPartCnt); #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { SrcNodeCounts[i] = 1; TInt CurrNode = SrcCol1[SrcOffsets[i]]; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { SrcNodeCounts[i]++; CurrNode = SrcCol1[j]; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { DstNodeCounts[i] = 1; TInt CurrNode = DstCol2[DstOffsets[i]]; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { DstNodeCounts[i]++; CurrNode = DstCol2[j]; } } } } } TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < SrcPartCnt; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < DstPartCnt; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val); // find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } // Find the starting offset of each node (in both src and dst) #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { TInt CurrNode = SrcCol1[SrcOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { CurrNode = SrcCol1[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { TInt CurrNode = DstCol2[DstOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]); TInt CurrCount = 1; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { CurrNode = DstCol2[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } Sw->Stop(TStopwatch::Group); Sw->Start(TStopwatch::MergeNeighborhoods); // Find the combined neighborhood (both out-neighbors and in-neighbors) of each node TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } Sw->Stop(TStopwatch::MergeNeighborhoods); Sw->Start(TStopwatch::AddNeighborhoods); TInt NumNodes = Nodes.Len(); PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows); // NumThreads = omp_get_max_threads(); // int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); // omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,100) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = EdgeCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } OutVV[m].Reserve(Sz); OutVV[m].CopyUniqueFrom(EdgeCol1, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = EdgeCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } InVV[m].Reserve(Sz); InVV[m].CopyUniqueFrom(EdgeCol2, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); } Graph->SetNodes(NumNodes); Sw->Stop(TStopwatch::AddNeighborhoods); Sw->Start(TStopwatch::AddEdges); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetIntAttr(SrcColIdx); TInt DstId = RowI.GetIntAttr(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetStrMapById(SrcColIdx); TInt DstId = RowI.GetStrMapById(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } Graph->SetEdges(NumRows); Graph->SetMxEId(NumRows); Sw->Stop(TStopwatch::AddEdges); // make single pass over all rows in the table to add attributes for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) { TStr ColName = EdgeAttrV[ea_i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrValIdx(Index, CurrRowIdx), ColName); break; } } } // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } #endif // GCC_ATOMIC /// Converts table to network sequentially. Takes edges from \c Table and nodes explicitly from \c NodeCol in \c NodeTable, with attribute vectors passed as columns in corresponding tables. template<class PGraph> PGraph ToNetwork(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV, PTable NodeTable, const TStr& NodeCol, TStrV& NodeAttrV, TAttrAggr AggrPolicy) { PGraph Graph = PGraph::TObj::New(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TAttrType NodeTypeN = NodeTable->GetColType(NodeCol); const TInt NodeColIdx = NodeTable->GetColIdx(NodeCol); THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; //Table->AddGraphAttributeV(SrcAttrV, false, true, false); //Table->AddGraphAttributeV(DstAttrV, false, false, true); //Table->AddGraphAttributeV(EdgeAttrV, true, false, true); // node values - i.e. the unique values of src/dst col //THashSet<TInt> IntNodeVals; // for both int and string node attr types. THash<TFlt, TInt> FltNodeVals; // make single pass over all rows in the table for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } // add src and dst nodes to graph if they are not seen earlier TInt SVal, DVal; if (NodeType == atFlt) { TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal); TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx]; DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal); } else if (NodeType == atInt || NodeType == atStr) { if (NodeType == atInt) { SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx]; DVal = (Table->IntCols)[DstColIdx][CurrRowIdx]; } else { SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx]; // if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value } if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); } if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); } //CheckAndAddIntNode(Graph, IntNodeVals, SVal); //CheckAndAddIntNode(Graph, IntNodeVals, DVal); } // add edge and edge attributes Graph->AddEdge(SVal, DVal, CurrRowIdx); // Aggregate edge attributes and add to graph for (TInt i = 0; i < EdgeAttrV.Len(); i++) { TStr ColName = EdgeAttrV[i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrValIdx(Index, CurrRowIdx), ColName); break; } } } //Add node attribtes if (NodeAttrV.Len() > 0) { for (int CurrRowIdx = 0; CurrRowIdx < (NodeTable->Next).Len(); CurrRowIdx++) { if ((NodeTable->Next)[CurrRowIdx] == NodeTable->Invalid) { continue; } TInt NId; if (NodeTypeN == atInt) { NId = (NodeTable->IntCols)[NodeColIdx][CurrRowIdx]; } else if (NodeTypeN == atStr){ NId = (NodeTable->StrColMaps)[NodeColIdx][CurrRowIdx]; } for (TInt i = 0; i < NodeAttrV.Len(); i++) { TStr ColName = NodeAttrV[i]; TAttrType T = NodeTable->GetColType(ColName); TInt Index = NodeTable->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatN(NId, NodeTable->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatN(NId, NodeTable->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatN(NId, NodeTable->GetStrValIdx(Index, CurrRowIdx), ColName); break; } } } } return Graph; } #ifdef GCC_ATOMIC /// Converts table to network in parallel. Takes edges from \c Table and nodes explicitly from \c NodeCol in \c NodeTable, with attribute vectors passed as columns in corresponding tables. template<class PGraphMP> inline PGraphMP ToNetworkMP(PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV, PTable NodeTable, const TStr& NodeCol, TStrV& NodeAttrV, TAttrAggr AggrPolicy) { TStopwatch* Sw = TStopwatch::GetInstance(); Sw->Start(TStopwatch::AllocateColumnCopies); const TInt SrcColIdx = Table->GetColIdx(SrcCol); const TInt DstColIdx = Table->GetColIdx(DstCol); const TInt NumRows = Table->GetNumValidRows(); const TAttrType NodeType = Table->GetColType(SrcCol); Assert(NodeType == Table->GetColType(DstCol)); TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2; const TAttrType NodeTypeN = NodeTable->GetColType(NodeCol); const TInt NodeColIdx = NodeTable->GetColIdx(NodeCol); THash<TInt, TStrIntVH> NodeIntAttrs; THash<TInt, TStrFltVH> NodeFltAttrs; THash<TInt, TStrStrVH> NodeStrAttrs; #pragma omp parallel sections num_threads(4) { #pragma omp section { SrcCol1.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol1.Reserve(NumRows, NumRows); } #pragma omp section { DstCol2.Reserve(NumRows, NumRows); } #pragma omp section { EdgeCol2.Reserve(NumRows, NumRows); } } Sw->Stop(TStopwatch::AllocateColumnCopies); Sw->Start(TStopwatch::CopyColumns); TIntPrV Partitions; Table->GetPartitionRanges(Partitions, omp_get_max_threads()); TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1; // double endPartition = omp_get_wtime(); // printf("Partition time = %f\n", endPartition-endResize); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetIntAttr(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx); EdgeCol1[RowId] = RowId; DstCol2[RowId] = RowI.GetStrMapById(DstColIdx); EdgeCol2[RowId] = RowId; RowI++; } } } Sw->Stop(TStopwatch::CopyColumns); Sw->Start(TStopwatch::Sort); omp_set_num_threads(omp_get_max_threads()); #pragma omp parallel { #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(SrcCol1, EdgeCol1) #endif { TTable::QSortKeyVal(SrcCol1, EdgeCol1, 0, NumRows-1); } } #pragma omp single nowait { #ifndef GLib_WIN32 #pragma omp task untied shared(EdgeCol2, DstCol2) #endif { TTable::QSortKeyVal(DstCol2, EdgeCol2, 0, NumRows-1); } } #ifndef GLib_WIN32 #pragma omp taskwait #endif } Sw->Stop(TStopwatch::Sort); Sw->Start(TStopwatch::Group); TInt NumThreads = omp_get_max_threads(); TInt PartSize = (NumRows/NumThreads); // Find the offset of all partitions, each of which contains a list of rows. // Nodes from same sources or destinations are ensured to be kept within same partition. TIntV SrcOffsets, DstOffsets; SrcOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) { // ensure that rows from the same sources are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); } } SrcOffsets.Add(NumRows); DstOffsets.Add(0); for (TInt i = 1; i < NumThreads; i++) { TInt CurrOffset = i * PartSize; while (CurrOffset < (i+1) * PartSize && DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) { // ensure that rows to the same destinations are grouped together CurrOffset++; } if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); } } DstOffsets.Add(NumRows); TInt SrcPartCnt = SrcOffsets.Len()-1; // number of partitions TInt DstPartCnt = DstOffsets.Len()-1; // number of partitions // count the number of source nodes and destination nodes in each partition TIntV SrcNodeCounts, DstNodeCounts; SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt); DstNodeCounts.Reserve(DstPartCnt, DstPartCnt); #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { SrcNodeCounts[i] = 1; TInt CurrNode = SrcCol1[SrcOffsets[i]]; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { SrcNodeCounts[i]++; CurrNode = SrcCol1[j]; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { DstNodeCounts[i] = 1; TInt CurrNode = DstCol2[DstOffsets[i]]; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { DstNodeCounts[i]++; CurrNode = DstCol2[j]; } } } } } TInt TotalSrcNodes = 0; TIntV SrcIdOffsets; for (int i = 0; i < SrcPartCnt; i++) { SrcIdOffsets.Add(TotalSrcNodes); TotalSrcNodes += SrcNodeCounts[i]; } TInt TotalDstNodes = 0; TIntV DstIdOffsets; for (int i = 0; i < DstPartCnt; i++) { DstIdOffsets.Add(TotalDstNodes); TotalDstNodes += DstNodeCounts[i]; } // printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val); // find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id TIntPrV SrcNodeIds, DstNodeIds; #pragma omp parallel sections { #pragma omp section { SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); } #pragma omp section { DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); } } // Find the starting offset of each node (in both src and dst) #pragma omp parallel for schedule(dynamic) for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) { if (t < SrcPartCnt) { TInt i = t; if (SrcOffsets[i] != SrcOffsets[i+1]) { TInt CurrNode = SrcCol1[SrcOffsets[i]]; TInt ThreadOffset = SrcIdOffsets[i]; SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]); TInt CurrCount = 1; for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) { while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; } if (j < SrcOffsets[i+1]) { CurrNode = SrcCol1[j]; SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } else { TInt i = t - SrcPartCnt; if (DstOffsets[i] != DstOffsets[i+1]) { TInt CurrNode = DstCol2[DstOffsets[i]]; TInt ThreadOffset = DstIdOffsets[i]; DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]); TInt CurrCount = 1; for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) { while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; } if (j < DstOffsets[i+1]) { CurrNode = DstCol2[j]; DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j); CurrCount++; } } } } } Sw->Stop(TStopwatch::Group); Sw->Start(TStopwatch::MergeNeighborhoods); // Find the combined neighborhood (both out-neighbors and in-neighbors) of each node TIntTrV Nodes; Nodes.Reserve(TotalSrcNodes+TotalDstNodes); TInt i = 0, j = 0; while (i < TotalSrcNodes && j < TotalDstNodes) { if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j)); i++; j++; } else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); i++; } else { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); j++; } } for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); } for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); } Sw->Stop(TStopwatch::MergeNeighborhoods); Sw->Start(TStopwatch::AddNeighborhoods); TInt NumNodes = Nodes.Len(); PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows); // NumThreads = omp_get_max_threads(); // int Delta = (NumNodes+NumThreads-1)/NumThreads; TVec<TIntV> InVV(NumNodes); TVec<TIntV> OutVV(NumNodes); // omp_set_num_threads(NumThreads); #pragma omp parallel for schedule(static,100) for (int m = 0; m < NumNodes; m++) { //double startTr = omp_get_wtime(); //TIntV OutV, InV; TInt n, i, j; Nodes[m].GetVal(n, i, j); if (i >= 0) { TInt Offset = SrcNodeIds[i].GetVal2(); TInt Sz = EdgeCol1.Len()-Offset; if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; } OutVV[m].Reserve(Sz); OutVV[m].CopyUniqueFrom(EdgeCol1, Offset, Sz); } if (j >= 0) { TInt Offset = DstNodeIds[j].GetVal2(); TInt Sz = EdgeCol2.Len()-Offset; if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; } InVV[m].Reserve(Sz); InVV[m].CopyUniqueFrom(EdgeCol2, Offset, Sz); } Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]); } Graph->SetNodes(NumNodes); Sw->Stop(TStopwatch::AddNeighborhoods); Sw->Start(TStopwatch::AddEdges); omp_set_num_threads(omp_get_max_threads()); if (NodeType == atInt) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetIntAttr(SrcColIdx); TInt DstId = RowI.GetIntAttr(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } else if (NodeType == atStr) { #pragma omp parallel for schedule(static) for (int i = 0; i < Partitions.Len(); i++) { TRowIterator RowI(Partitions[i].GetVal1(), Table()); TRowIterator EndI(Partitions[i].GetVal2(), Table()); while (RowI < EndI) { TInt RowId = RowI.GetRowIdx(); // EdgeId TInt SrcId = RowI.GetStrMapById(SrcColIdx); TInt DstId = RowI.GetStrMapById(DstColIdx); Graph->AddEdgeUnchecked(RowId, SrcId, DstId); RowI++; } } } Graph->SetEdges(NumRows); Graph->SetMxEId(NumRows); Sw->Stop(TStopwatch::AddEdges); // make single pass over all rows in the table to add attributes for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) { if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; } for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) { TStr ColName = EdgeAttrV[ea_i]; TAttrType T = Table->GetColType(ColName); TInt Index = Table->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrValIdx(Index, CurrRowIdx), ColName); break; } } } // Add node attribtes if (NodeAttrV.Len() > 0) { for (int CurrRowIdx = 0; CurrRowIdx < (NodeTable->Next).Len(); CurrRowIdx++) { if ((NodeTable->Next)[CurrRowIdx] == NodeTable->Invalid) { continue; } TInt NId; if (NodeTypeN == atInt) { NId = (NodeTable->IntCols)[NodeColIdx][CurrRowIdx]; } else if (NodeTypeN == atStr){ NId = (NodeTable->StrColMaps)[NodeColIdx][CurrRowIdx]; } for (TInt i = 0; i < NodeAttrV.Len(); i++) { TStr ColName = NodeAttrV[i]; TAttrType T = NodeTable->GetColType(ColName); TInt Index = NodeTable->GetColIdx(ColName); switch (T) { case atInt: Graph->AddIntAttrDatN(NId, NodeTable->IntCols[Index][CurrRowIdx], ColName); break; case atFlt: Graph->AddFltAttrDatN(NId, NodeTable->FltCols[Index][CurrRowIdx], ColName); break; case atStr: Graph->AddStrAttrDatN(NId, NodeTable->GetStrValIdx(Index, CurrRowIdx), ColName); break; } } } } // double endAdd = omp_get_wtime(); // printf("Add time = %f\n", endAdd-endAlloc); return Graph; } #endif // GCC_ATOMIC }; // TSnap namespace #endif // CONV_H
transform.h
/*! * Copyright 2018 XGBoost contributors */ #ifndef XGBOOST_COMMON_TRANSFORM_H_ #define XGBOOST_COMMON_TRANSFORM_H_ #include <utility> #include <vector> #include <type_traits> // enable_if #include <dmlc/omp.h> #include <dmlc/common.h> #include "xgboost/data.h" #include "xgboost/host_device_vector.h" #include "xgboost/span.h" #include "common.h" #if defined (__CUDACC__) #include "device_helpers.cuh" #endif // defined (__CUDACC__) namespace xgboost { namespace common { constexpr size_t kBlockThreads = 256; namespace detail { #if defined(__CUDACC__) template <typename Functor, typename... SpanType> __global__ void LaunchCUDAKernel(Functor _func, Range _range, SpanType... _spans) { for (auto i : dh::GridStrideRange(*_range.begin(), *_range.end())) { _func(i, _spans...); } } #endif // defined(__CUDACC__) } // namespace detail /*! \brief Do Transformation on HostDeviceVectors. * * \tparam CompiledWithCuda A bool parameter used to distinguish compilation * trajectories, users do not need to use it. * * Note: Using Transform is a VERY tricky thing to do. Transform uses template * argument to duplicate itself into two different types, one for CPU, * another for CUDA. The trick is not without its flaw: * * If you use it in a function that can be compiled by both nvcc and host * compiler, the behaviour is un-defined! Because your function is NOT * duplicated by `CompiledWithCuda`. At link time, cuda compiler resolution * will merge functions with same signature. */ template <bool CompiledWithCuda = WITH_CUDA()> class Transform { private: template <typename Functor> struct Evaluator { public: Evaluator(Functor func, Range range, int device, bool shard) : func_(func), range_{std::move(range)}, shard_{shard}, device_{device} {} /*! * \brief Evaluate the functor with input pointers to HostDeviceVector. * * \tparam HDV... HostDeviceVectors type. * \param vectors Pointers to HostDeviceVector. */ template <typename... HDV> void Eval(HDV... vectors) const { bool on_device = device_ >= 0; if (on_device) { LaunchCUDA(func_, vectors...); } else { LaunchCPU(func_, vectors...); } } private: // CUDA UnpackHDV template <typename T> Span<T> UnpackHDVOnDevice(HostDeviceVector<T>* _vec) const { auto span = _vec->DeviceSpan(); return span; } template <typename T> Span<T const> UnpackHDVOnDevice(const HostDeviceVector<T>* _vec) const { auto span = _vec->ConstDeviceSpan(); return span; } // CPU UnpackHDV template <typename T> Span<T> UnpackHDV(HostDeviceVector<T>* _vec) const { return Span<T> {_vec->HostPointer(), static_cast<typename Span<T>::index_type>(_vec->Size())}; } template <typename T> Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec) const { return Span<T const> {_vec->ConstHostPointer(), static_cast<typename Span<T>::index_type>(_vec->Size())}; } // Recursive sync host template <typename T> void SyncHost(const HostDeviceVector<T> *_vector) const { _vector->ConstHostPointer(); } template <typename Head, typename... Rest> void SyncHost(const HostDeviceVector<Head> *_vector, const HostDeviceVector<Rest> *... _vectors) const { _vector->ConstHostPointer(); SyncHost(_vectors...); } // Recursive unpack for Shard. template <typename T> void UnpackShard(int device, const HostDeviceVector<T> *vector) const { vector->SetDevice(device); } template <typename Head, typename... Rest> void UnpackShard(int device, const HostDeviceVector<Head> *_vector, const HostDeviceVector<Rest> *... _vectors) const { _vector->SetDevice(device); UnpackShard(device, _vectors...); } #if defined(__CUDACC__) template <typename std::enable_if<CompiledWithCuda>::type* = nullptr, typename... HDV> void LaunchCUDA(Functor _func, HDV*... _vectors) const { if (shard_) { UnpackShard(device_, _vectors...); } size_t range_size = *range_.end() - *range_.begin(); // Extract index to deal with possible old OpenMP. // This deals with situation like multi-class setting where // granularity is used in data vector. size_t shard_size = range_size; Range shard_range {0, static_cast<Range::DifferenceType>(shard_size)}; dh::safe_cuda(cudaSetDevice(device_)); const int kGrids = static_cast<int>(DivRoundUp(*(range_.end()), kBlockThreads)); if (kGrids == 0) { return; } detail::LaunchCUDAKernel<<<kGrids, kBlockThreads>>>( // NOLINT _func, shard_range, UnpackHDVOnDevice(_vectors)...); } #else /*! \brief Dummy funtion defined when compiling for CPU. */ template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr, typename... HDV> void LaunchCUDA(Functor _func, HDV*... _vectors) const { LOG(FATAL) << "Not part of device code. WITH_CUDA: " << WITH_CUDA(); } #endif // defined(__CUDACC__) template <typename... HDV> void LaunchCPU(Functor func, HDV*... vectors) const { omp_ulong end = static_cast<omp_ulong>(*(range_.end())); dmlc::OMPException omp_exc; SyncHost(vectors...); #pragma omp parallel for schedule(static) for (omp_ulong idx = 0; idx < end; ++idx) { omp_exc.Run(func, idx, UnpackHDV(vectors)...); } omp_exc.Rethrow(); } private: /*! \brief Callable object. */ Functor func_; /*! \brief Range object specifying parallel threads index range. */ Range range_; /*! \brief Whether sharding for vectors is required. */ bool shard_; int device_; }; public: /*! * \brief Initialize a Transform object. * * \tparam Functor A callable object type. * \return A Evaluator having one method Eval. * * \param func A callable object, accepting a size_t thread index, * followed by a set of Span classes. * \param range Range object specifying parallel threads index range. * \param device Specify GPU to use. * \param shard Whether Shard for HostDeviceVector is needed. */ template <typename Functor> static Evaluator<Functor> Init(Functor func, Range const range, int device, bool const shard = true) { return Evaluator<Functor> {func, std::move(range), device, shard}; } }; } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_TRANSFORM_H_
kMedoidPlusPlus.c
#include <stdio.h> #include <stdlib.h> #include "../../dataStruct/data.h" #include "../../input/random.h" /** * * * * **/ extern dataIF data; void GetRandomWithDist(int ,double*, double*); void KMedoidPlusPlusInit(int k){ int i = 0; int size; double *D = malloc(sizeof(double)*GetDataSize()); double *prob = NULL; int c = (int)GetUniform(0,GetDataSize()); c = AddCentroid(c); int j; for (j = 0; j < GetDataSize(); ++j){ value vj, vc; GetIthData(j, &vj); GetIthData(c, &vc); D[j] = data.distance(&vj,&vc); } double max = D[0]; for(j=1; j< GetDataSize(); j++) if (D[j] > max) max = D[j]; for(j=0; j< GetDataSize(); j++) D[j]/=max; i++; while(i < k){ size = GetDataSize() - i; prob = malloc(sizeof(double)* size); GetRandomWithDist(size, D, prob); double r = GetUniform(0,prob[size-1]); //printf("------_+++===%f , r =%f\n", prob[size-1], r); for(j = 0;j < size; j++){ if(j == size-1){ c = j; continue; } if(prob[j]<r && r <= prob[j+1]) c = j; } c = AddCentroid(c); double tempD=0.0; // #pragma omp parallel for for (j = 0; j < GetDataSize() - i; ++j){ value vj, vc; GetIthData(j, &vj); GetIthData(c, &vc); double dist = data.distance(&vj,&vc); if (D[j] > dist) D[j] = dist; } i++; free(prob); } free(D); } void GetRandomWithDist(int size, double* D, double *prob){ int j; prob[0] = D[0]*D[0]; for(j=1; j< size; j++){ prob[j] = prob[j-1] + D[j]*D[j]; } }